From cb64bb687427cb757e8ebe77731455cba371b298 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 13 Jan 2011 14:58:53 -0800 Subject: [PATCH 001/651] Porting Matti's branch to git. No changes. --- aux/broctl | 2 +- policy/bro.init | 12 +- policy/icmp.bro | 144 +++++++++++- src/Analyzer.cc | 8 +- src/AnalyzerTags.h | 2 +- src/DPM.cc | 12 +- src/ICMP.cc | 555 +++++++++++++++++++++++++++++++++++++++------ src/ICMP.h | 77 ++----- src/Sessions.cc | 50 +++- src/Val.cc | 1 - src/Val.h | 8 +- src/event.bif | 18 +- src/net_util.cc | 59 ++++- src/net_util.h | 1 + 14 files changed, 795 insertions(+), 154 deletions(-) diff --git a/aux/broctl b/aux/broctl index a05be1242b..0d8b64252f 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit a05be1242b4e06dca1bb1a38ed871e7e2d78181b +Subproject commit 0d8b64252f00f147f31f5e8c02a6a710699b67d9 diff --git a/policy/bro.init b/policy/bro.init index 1ba8f59b4d..e812db0075 100644 --- a/policy/bro.init +++ b/policy/bro.init @@ -38,11 +38,12 @@ type icmp_context: record { id: conn_id; len: count; proto: count; - frag_offset: count; - bad_hdr_len: bool; - bad_checksum: bool; - MF: bool; - DF: bool; + frag_offset: count &optional; #no frag offset for IPv6 + bad_hdr_len: bool &optional; + bad_checksum: bool &optional; #no checksum in IPv6 header + MF: bool &optional; #no MF for IPv6 + DF: bool &optional; #no DF for IPv6 + ICMP6Flag: bool; }; type addr_set: set[addr]; @@ -534,6 +535,7 @@ const IPPROTO_IGMP = 2; # group mgmt protocol const IPPROTO_IPIP = 4; # IP encapsulation in IP const IPPROTO_TCP = 6; # TCP const IPPROTO_UDP = 17; # user datagram protocol +const IPPROTO_ICMPV6 = 58; # ICMP for IPv6 const IPPROTO_RAW = 255; # raw IP packet type ip_hdr: record { diff --git a/policy/icmp.bro b/policy/icmp.bro index c6c3c87d44..d75b2a3731 100644 --- a/policy/icmp.bro +++ b/policy/icmp.bro @@ -1,4 +1,5 @@ # $Id: icmp.bro 6883 2009-08-19 21:08:09Z vern $ +# While using this script, please notice that the last F/T value is the IPv6 Flag @load hot @load weird @@ -55,8 +56,16 @@ type flow_info: record { payload: string; }; +#Insert whitelisted routers here, Router advertisements from other +#routers will be logged as possible rogue router attacks +const routers_whitelist: table[string] of bool = { + #["fe80::260:97ff:fe07:69ea"] = T, #an example + } &redef &default = F; + const names: table[count] of string = { [0] = "echo_reply", + [1] = "unreach", #icmpv6 + [2] = "too_big", #icmpv6 [3] = "unreach", [4] = "quench", [5] = "redirect", @@ -71,6 +80,33 @@ const names: table[count] of string = { [16] = "info_reply", [17] = "mask_req", [18] = "mask_reply", + [128] = "echo_req", #icmpv6 + [129] = "echo_reply", #icmpv6 + [130] = "group_memb_query", #icmpv6 + [131] = "group_memb_report", #icmpv6 + [132] = "group_memb_reduct", #icmpv6 + [133] = "router_sol", #icmpv6 + [134] = "router_ad", #icmpv6 + [135] = "neighbor_sol", #icmpv6 + [136] = "neighbor_ad", #icmpv6 + [137] = "redirect", #icmpv6 + [138] = "router_renum", #icmpv6 + [139] = "node_info_query", #icmpv6 + [140] = "node_info_resp", #icmpv6 + [141] = "inv_neigh_disc_sol", #icmpv6 + [142] = "inv_neigh_disc_ad", #icmpv6 + [143] = "mul_lis_report", #icmpv6 + [144] = "home_agent_addr_req", #icmpv6 + [145] = "home_agent_addr_reply",#icmpv6 + [146] = "mobible_prefx_sol", #icmpv6 + [147] = "mobible_prefx_ad", #icmpv6 + [148] = "cert_path_sol", #icmpv6 + [149] = "cert_path_ad", #icmpv6 + [150] = "experimental", #icmpv6 + [151] = "mcast_router_ad", #icmpv6 + [152] = "mcast_router_sol", #icmpv6 + [153] = "mcast_router_term", #icmpv6 + [154] = "fmip", #icmpv6 } &default = function(n: count): string { return fmt("icmp-%d", n); }; @@ -80,7 +116,8 @@ const IP_proto_name: table[count] of string = { [2] = "IGMP", [6] = "TCP", [17] = "UDP", - [41] = "IPV6", + [41] = "IP6", + [58] = "ICMP6", } &default = function(n: count): string { return fmt("%s", n); } &redef; @@ -123,12 +160,13 @@ global flows: table[flow_id] of flow_info &read_expire = 45 sec &expire_func = flush_flow; -event icmp_sent(c: connection, icmp: icmp_conn) +event icmp_sent(c: connection, icmp: icmp_conn, ICMP6: bool) { - print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s", + + print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s", network_time(), 0.0, icmp$orig_h, icmp$resp_h, names[icmp$itype], icmp$itype, icmp$icode, "icmp", - icmp$len, "0", "SH"); + icmp$len, "0", "SH", ICMP6); } event flow_summary(flow: flow_id, last_time: time) @@ -173,13 +211,64 @@ function update_flow(icmp: icmp_conn, id: count, is_orig: bool, payload: string) schedule +30sec { flow_summary(fid, fi$last_time) }; } -event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string) + +event icmp_error_message(c: connection, icmp: icmp_conn, code: count, context: icmp_context) #for other but the unreach types, which is preserved + { + + if ( active_connection(context$id) ) + { + # This section allows Bro to act on ICMP error message packets + # that happen in the context of an active connection. It is + # not currently used. + local c2 = connection_record(context$id); + local os = c2$orig$state; + local rs = c2$resp$state; + local is_attempt = + is_tcp_port(c2$id$orig_p) ? + (os == TCP_SYN_SENT && rs == TCP_INACTIVE) : + (os == UDP_ACTIVE && rs == UDP_INACTIVE); + + # Insert action here. + } + + if ( log_details ) + { + # ICMP error message packets are logged here. + # Due to the connection data contained *within* + # them, each log line will contain two connections' worth + # of data. The initial ICMP connection info is the same + # as logged for connections. + print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s", + network_time(), 0.0, icmp$orig_h, icmp$resp_h, + names[icmp$itype], icmp$itype, icmp$icode, "icmp", + icmp$len, "0", "EncapPkt:", + # This is the encapsulated packet: + context$id$orig_h, context$id$orig_p, + context$id$resp_h, context$id$resp_p, + context$len, IP_proto_name[context$proto], + context$len, context$bad_hdr_len, + context$bad_checksum, context$ICMP6Flag); + } + + } + + + +event icmp6_placeholder(c: connection, icmp: icmp_conn, ICMP6: bool) #just for testing + { + print "icmp6_placeholder triggered"; + } + + +event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string, ICMP6: bool) { update_flow(icmp, id, T, payload); + local orig = icmp$orig_h; local resp = icmp$resp_h; + # Simple ping scan detector. if ( detect_scans && (orig !in Scan::distinct_peers || @@ -231,7 +320,7 @@ event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, p } event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, - seq: count, payload: string) + seq: count, payload: string, ICMP6: bool) { # Check payload with the associated flow. @@ -240,6 +329,8 @@ event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, fid$resp_h = icmp$orig_h; # it's an echo reply. fid$id = id; + + if ( fid !in flows ) { # NOTICE([$note=ICMPUnpairedEchoReply, @@ -266,9 +357,12 @@ event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, update_flow(icmp, id, F, payload); } + + event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) { + if ( active_connection(context$id) ) { # This section allows Bro to act on ICMP-unreachable packets @@ -292,7 +386,7 @@ event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, # them, each log line will contain two connections' worth # of data. The initial ICMP connection info is the same # as logged for connections. - print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s", + print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s", network_time(), 0.0, icmp$orig_h, icmp$resp_h, names[icmp$itype], icmp$itype, icmp$icode, "icmp", icmp$len, "0", "EncapPkt:", @@ -301,6 +395,40 @@ event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context$id$resp_h, context$id$resp_p, context$len, IP_proto_name[context$proto], context$len, context$bad_hdr_len, - context$bad_checksum); + context$bad_checksum, context$ICMP6Flag); } } + + + event icmp_router_advertisement(c: connection, icmp: icmp_conn, ICMP6: bool) + { + if ( routers_whitelist[ fmt("%s",icmp$orig_h) ] ) + { + print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s", + network_time(), 0.0, icmp$orig_h, icmp$resp_h, + names[icmp$itype], icmp$itype, icmp$icode, "icmp", + icmp$len, "0", "SH", ICMP6); + } + else + { + print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s", + network_time(), 0.0, icmp$orig_h, icmp$resp_h, + names[icmp$itype], "Possible Rogue Router Detected", icmp$itype, icmp$icode, + icmp$len, ICMP6); + } + + } + + + + + + + + + + + + + + diff --git a/src/Analyzer.cc b/src/Analyzer.cc index c323f99e23..a3f6bbfc2c 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -48,7 +48,7 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { { AnalyzerTag::ICMP, "ICMP", ICMP_Analyzer::InstantiateAnalyzer, ICMP_Analyzer::Available, 0, false }, - { AnalyzerTag::ICMP_TimeExceeded, "ICMP_TIMEEXCEEDED", + /*{ AnalyzerTag::ICMP_TimeExceeded, "ICMP_TIMEEXCEEDED", ICMP_TimeExceeded_Analyzer::InstantiateAnalyzer, ICMP_TimeExceeded_Analyzer::Available, 0, false }, { AnalyzerTag::ICMP_Unreachable, "ICMP_UNREACHABLE", @@ -56,7 +56,11 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { ICMP_Unreachable_Analyzer::Available, 0, false }, { AnalyzerTag::ICMP_Echo, "ICMP_ECHO", ICMP_Echo_Analyzer::InstantiateAnalyzer, - ICMP_Echo_Analyzer::Available, 0, false }, + ICMP_Echo_Analyzer::Available, 0, false },*/ + + + + { AnalyzerTag::TCP, "TCP", TCP_Analyzer::InstantiateAnalyzer, TCP_Analyzer::Available, 0, false }, diff --git a/src/AnalyzerTags.h b/src/AnalyzerTags.h index 9bf3efbd3c..eb18a03a73 100644 --- a/src/AnalyzerTags.h +++ b/src/AnalyzerTags.h @@ -22,7 +22,7 @@ namespace AnalyzerTag { PIA_TCP, PIA_UDP, // Transport-layer analyzers. - ICMP, ICMP_TimeExceeded, ICMP_Unreachable, ICMP_Echo, TCP, UDP, + ICMP,/* ICMP_TimeExceeded, ICMP_Unreachable, ICMP_Echo*/ TCP, UDP, // Application-layer analyzers (hand-written). BitTorrent, BitTorrentTracker, diff --git a/src/DPM.cc b/src/DPM.cc index 35111a38fa..7be9376b9f 100644 --- a/src/DPM.cc +++ b/src/DPM.cc @@ -215,7 +215,10 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn, case TRANSPORT_ICMP: { const struct icmp* icmpp = (const struct icmp *) data; - switch ( icmpp->icmp_type ) { + + + //Old code, moving to having only one ICMP analyzer + /*switch ( icmpp->icmp_type ) { case ICMP_ECHO: case ICMP_ECHOREPLY: @@ -241,10 +244,11 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn, DBG_DPD(conn, "activated ICMP Time Exceeded analyzer"); } break; - } + }*/ + //if ( ! root ) - if ( ! root ) - root = new ICMP_Analyzer(conn); + root = new ICMP_Analyzer(conn); + DBG_DPD(conn, "activated ICMP analyzer"); analyzed = true; break; diff --git a/src/ICMP.cc b/src/ICMP.cc index d73a9a781e..b83cf76a40 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -9,12 +9,17 @@ #include "Event.h" #include "ICMP.h" +#include + + + ICMP_Analyzer::ICMP_Analyzer(Connection* c) : TransportLayerAnalyzer(AnalyzerTag::ICMP, c) { icmp_conn_val = 0; c->SetInactivityTimeout(icmp_inactivity_timeout); request_len = reply_len = -1; + } ICMP_Analyzer::ICMP_Analyzer(AnalyzerTag::Tag tag, Connection* c) @@ -45,16 +50,43 @@ void ICMP_Analyzer::DeliverPacket(int arg_len, const u_char* data, // Subtract off the common part of ICMP header. PacketContents(data + 8, min(len, caplen) - 8); + const struct icmp* icmpp = (const struct icmp*) data; len = arg_len; - if ( ! ignore_checksums && caplen >= len && - icmp_checksum(icmpp, len) != 0xffff ) + + //We need a separate calculation for ICMP6 checksums, pseudoheader is appended to the + //ICMP6 checksum calculation, which is different from ICMP4 +#ifdef BROv6 + + + if (ip->NextProto() == IPPROTO_ICMPV6 && ! ignore_checksums && + caplen >= len && icmp6_checksum(icmpp,ip->IP6_Hdr(),len )!= 0xffff ) + { + Weird("bad_ICMP6_checksum"); + return; + } + else if (ip->NextProto() != IPPROTO_ICMPV6 && ! ignore_checksums && + caplen >= len && icmp_checksum(icmpp, len) != 0xffff ) { Weird("bad_ICMP_checksum"); return; } + + +#else + + if ( ! ignore_checksums && caplen >= len && + icmp_checksum(icmpp, len) != 0xffff ) + { + Weird("bad_ICMP_checksum"); + return; + } +#endif + + + Conn()->SetLastTime(current_timestamp); if ( rule_matcher ) @@ -66,7 +98,7 @@ void ICMP_Analyzer::DeliverPacket(int arg_len, const u_char* data, type = icmpp->icmp_type; code = icmpp->icmp_code; - // Move past common portion of ICMP header. + // Move past common portion of ICMP header. //OK for ICMPv6? data += 8; caplen -= 8; len -= 8; @@ -77,33 +109,113 @@ void ICMP_Analyzer::DeliverPacket(int arg_len, const u_char* data, else len_stat += len; - NextICMP(current_timestamp, icmpp, len, caplen, data); + NextICMP(current_timestamp, icmpp, len, caplen, data, ip); if ( rule_matcher ) matcher_state.Match(Rule::PAYLOAD, data, len, is_orig, false, false, true); } -void ICMP_Analyzer::NextICMP(double /* t */, const struct icmp* /* icmpp */, - int /* len */, int /* caplen */, - const u_char*& /* data */) - { - ICMPEvent(icmp_sent); - } -void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f) - { + +/********************Generic analyzer for all ICMP4/ICMP6******************************/ +void ICMP_Analyzer::NextICMP(double t , const struct icmp* icmpp , int len , int caplen, + const u_char*& data, const IP_Hdr* ip_hdr ) + { + int ICMP6Flag = 0; + + //printf("Executing: ICMP_Analyzer::NextICMP\n"); + //printf("New analyzer structure\n"); + + if ( ip_hdr->NextProto() == IPPROTO_ICMPV6 ) + { + //printf("ICMP6!\n"); + ICMP6Flag = 1; + + switch (type) //Add new ICMP6 functions here, you can also use codes to narrow the area of single functions. + { + //All the echo stuff here + case ICMP6_ECHO_REQUEST: + case ICMP6_ECHO_REPLY: + Echo(t, icmpp, len, caplen, data, ip_hdr); + break; + + + //Error messages all have the same structure for their context, and are handled by the same function. + case ICMP6_PARAM_PROB: + case ICMP6_TIME_EXCEEDED: + case ICMP6_PACKET_TOO_BIG: + case ICMP6_DST_UNREACH: + Context(t, icmpp, len, caplen, data, ip_hdr); + break; + + //All router related stuff should eventually be handled by the Router() + case ND_REDIRECT: + case ND_ROUTER_SOLICIT: + case ICMP6_ROUTER_RENUMBERING: + case ND_ROUTER_ADVERT: + Router(t, icmpp, len, caplen, data, ip_hdr); //currently only logs the router stuff for other than router_advert + break; + + /* listed for convenience + case ICMP6_PARAM_PROB: break; + case MLD_LISTENER_QUERY: break; + case MLD_LISTENER_REPORT: break; + case MLD_LISTENER_REDUCTION: break; + case ND_NEIGHBOR_SOLICIT: break; + case ND_NEIGHBOR_ADVERT: break; + case ND_REDIRECT: break; + case ICMP6_ROUTER_RENUMBERING: break; + case ND_NEIGHBOR_SOLICIT: break; + case ND_NEIGHBOR_ADVERT: break; + case ICMP6_TIME_EXCEEDED: break; + */ + + default: ICMPEvent(icmp_sent, ICMP6Flag); break; + } + } + else if ( ip_hdr->NextProto() == IPPROTO_ICMP ) + { + + switch (type) //Add new ICMP4 functions here + { + case ICMP_ECHO: + case ICMP_ECHOREPLY: + Echo(t, icmpp, len, caplen, data, ip_hdr); + break; + + case ICMP_UNREACH: + case ICMP_TIMXCEED: + Context(t, icmpp, len, caplen, data, ip_hdr); + break; + + default: ICMPEvent(icmp_sent, ICMP6Flag); break; + } + + + } + else + Weird("Malformed ip header"); + } + + +void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f, int ICMP6Flag) + { if ( ! f ) - return; + return; + val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal()); + vl->append(BuildICMPVal(ICMP6Flag)); + //if ( f == icmp_sent ) //for now, testing purposes + vl->append(new Val(ICMP6Flag, TYPE_BOOL)); ConnectionEvent(f, vl); } -RecordVal* ICMP_Analyzer::BuildICMPVal() + +RecordVal* ICMP_Analyzer::BuildICMPVal(int ICMP6Flag) { if ( ! icmp_conn_val ) { @@ -111,7 +223,13 @@ RecordVal* ICMP_Analyzer::BuildICMPVal() icmp_conn_val->Assign(0, new AddrVal(Conn()->OrigAddr())); icmp_conn_val->Assign(1, new AddrVal(Conn()->RespAddr())); - icmp_conn_val->Assign(2, new Val(type, TYPE_COUNT)); + + if ( ICMP6Flag == 1 ) + icmp_conn_val->Assign(2, new Val(Type6to4(type), TYPE_COUNT)); //to avoid errors in getting the message type *name* right on the scripting level, type number will be different from true ipv6 + else + icmp_conn_val->Assign(2, new Val(type, TYPE_COUNT)); + + icmp_conn_val->Assign(3, new Val(code, TYPE_COUNT)); icmp_conn_val->Assign(4, new Val(len, TYPE_COUNT)); } @@ -121,48 +239,170 @@ RecordVal* ICMP_Analyzer::BuildICMPVal() return icmp_conn_val; } -RecordVal* ICMP_Analyzer::ExtractICMPContext(int len, const u_char*& data) +RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) { - const struct ip* ip = (const struct ip *) data; - uint32 ip_hdr_len = ip->ip_hl * 4; + /** + * For use only with ICMP4, ICMPV6 context extraction is still non-functional + */ + + const IP_Hdr ip_hdr_data((const struct ip*) data); + const IP_Hdr* ip_hdr = &ip_hdr_data; + int ICMP6Flag = 0; + + uint32 ip_hdr_len = ip_hdr->HdrLen(); uint32 ip_len, frag_offset; TransportProto proto = TRANSPORT_UNKNOWN; int DF, MF, bad_hdr_len, bad_checksum; - uint32 src_addr, dst_addr; + uint32 src_addr, dst_addr,src_addr2, dst_addr2; uint32 src_port, dst_port; - if ( ip_hdr_len < sizeof(struct ip) || ip_hdr_len > uint32(len) ) - { // We don't have an entire IP header. + if ( ip_hdr_len < sizeof(struct ip) || ip_hdr_len > uint32(len) ) + { // We don't have an entire IP header. + bad_hdr_len = 1; + ip_len = frag_offset = 0; + DF = MF = bad_checksum = 0; + src_addr = dst_addr = 0; + src_port = dst_port = 0; + } + + else + { + bad_hdr_len = 0; + ip_len = ip_hdr->TotalLen(); + bad_checksum = ones_complement_checksum((void*) ip_hdr->IP4_Hdr(), ip_hdr_len, 0) != 0xffff; + + src_addr = ip_hdr->SrcAddr4(); + dst_addr = ip_hdr->DstAddr4(); + + switch ( ip_hdr->NextProto() ) { + case 1: proto = TRANSPORT_ICMP; break; + case 6: proto = TRANSPORT_TCP; break; + case 17: proto = TRANSPORT_UDP; break; + + // Default uses TRANSPORT_UNKNOWN, per initialization above. + } + + uint32 frag_field = ip_hdr->FragField(); + DF = ip_hdr->DF(); + MF = frag_field & 0x2000; + frag_offset = frag_field & /* IP_OFFMASK not portable */ 0x1fff; + + const u_char* transport_hdr = ((u_char *) ip_hdr->IP4_Hdr() + ip_hdr_len); + + if ( uint32(len) < ip_hdr_len + 4 ) //what is this value for ipv6? + { + // 4 above is the magic number meaning that both + // port numbers are included in the ICMP. + bad_hdr_len = 1; + src_port = dst_port = 0; + } + + switch ( proto ) { + case TRANSPORT_ICMP: + { + const struct icmp* icmpp = + (const struct icmp *) transport_hdr; + bool is_one_way; // dummy + src_port = ntohs(icmpp->icmp_type); + dst_port = ntohs(ICMP4_counterpart(icmpp->icmp_type, + icmpp->icmp_code, + is_one_way)); + } + break; + + case TRANSPORT_TCP: + { + const struct tcphdr* tp = + (const struct tcphdr *) transport_hdr; + src_port = ntohs(tp->th_sport); + dst_port = ntohs(tp->th_dport); + } + break; + + case TRANSPORT_UDP: + { + const struct udphdr* up = + (const struct udphdr *) transport_hdr; + src_port = ntohs(up->uh_sport); + dst_port = ntohs(up->uh_dport); + } + break; + + default: + src_port = dst_port = ntohs(0); + } + } + + RecordVal* iprec = new RecordVal(icmp_context); + RecordVal* id_val = new RecordVal(conn_id); + + id_val->Assign(0, new AddrVal(src_addr)); + id_val->Assign(1, new PortVal(src_port, proto)); + id_val->Assign(2, new AddrVal(dst_addr)); + id_val->Assign(3, new PortVal(dst_port, proto)); + iprec->Assign(0, id_val); + + iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); + iprec->Assign(2, new Val(proto, TYPE_COUNT)); + iprec->Assign(3, new Val(frag_offset, TYPE_COUNT)); + iprec->Assign(4, new Val(bad_hdr_len, TYPE_BOOL)); + iprec->Assign(5, new Val(bad_checksum, TYPE_BOOL)); + iprec->Assign(6, new Val(MF, TYPE_BOOL)); + iprec->Assign(7, new Val(DF, TYPE_BOOL)); + iprec->Assign(8, new Val(ICMP6Flag, TYPE_BOOL)); + + return iprec; + } + + + + +RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) + { + /** + * For use with ICMP6 error message context extraction (possibly very frail function) + */ + + const IP_Hdr ip_hdr_data((const struct ip6_hdr*) data); + const IP_Hdr* ip_hdr = &ip_hdr_data; + int ICMP6Flag = 1; + int DF = 0, MF = 0, bad_hdr_len = 0, bad_checksum = 0; + + uint32 ip_hdr_len = ip_hdr->HdrLen(); //should always be 40 + uint32* src_addr; + uint32* dst_addr; + uint32 ip_len, frag_offset = 0; + TransportProto proto = TRANSPORT_UNKNOWN; + uint32 src_port, dst_port; + + if ( ip_hdr_len < sizeof(struct ip6_hdr) || ip_hdr_len != 40 ) + { bad_hdr_len = 1; - ip_len = frag_offset = 0; - DF = MF = bad_checksum = 0; + ip_len = 0; src_addr = dst_addr = 0; src_port = dst_port = 0; } - else { - bad_hdr_len = 0; - ip_len = ntohs(ip->ip_len); - bad_checksum = ones_complement_checksum((void*) ip, ip_hdr_len, 0) != 0xffff; + ip_len = ip_hdr->TotalLen(); - src_addr = uint32(ip->ip_src.s_addr); - dst_addr = uint32(ip->ip_dst.s_addr); + src_addr = (uint32 *) ip_hdr->SrcAddr(); + dst_addr = (uint32 *) ip_hdr->DstAddr(); - switch ( ip->ip_p ) { + + + switch ( ip_hdr->NextProto() ) { case 1: proto = TRANSPORT_ICMP; break; case 6: proto = TRANSPORT_TCP; break; case 17: proto = TRANSPORT_UDP; break; + case 58: proto = TRANSPORT_ICMP; break; //TransportProto Hack // Default uses TRANSPORT_UNKNOWN, per initialization above. } - uint32 frag_field = ntohs(ip->ip_off); - DF = frag_field & 0x4000; - MF = frag_field & 0x2000; - frag_offset = frag_field & /* IP_OFFMASK not portable */ 0x1fff; - const u_char* transport_hdr = ((u_char *) ip + ip_hdr_len); + + const u_char* transport_hdr = ((u_char *)ip_hdr->IP6_Hdr() + ip_hdr_len); if ( uint32(len) < ip_hdr_len + 4 ) { @@ -179,7 +419,7 @@ RecordVal* ICMP_Analyzer::ExtractICMPContext(int len, const u_char*& data) (const struct icmp *) transport_hdr; bool is_one_way; // dummy src_port = ntohs(icmpp->icmp_type); - dst_port = ntohs(ICMP_counterpart(icmpp->icmp_type, + dst_port = ntohs(ICMP6_counterpart(icmpp->icmp_type, icmpp->icmp_code, is_one_way)); } @@ -215,19 +455,42 @@ RecordVal* ICMP_Analyzer::ExtractICMPContext(int len, const u_char*& data) id_val->Assign(1, new PortVal(src_port, proto)); id_val->Assign(2, new AddrVal(dst_addr)); id_val->Assign(3, new PortVal(dst_port, proto)); - iprec->Assign(0, id_val); + iprec->Assign(0, id_val); iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); - iprec->Assign(2, new Val(proto, TYPE_COUNT)); - iprec->Assign(3, new Val(frag_offset, TYPE_COUNT)); + + //TransportProto Hack + if ( ip_hdr->NextProto() == 58 || 17 ) //if the encap packet is ICMPv6 we force this... (cause there is no IGMP (by that name) for ICMPv6), rather ugly hack once more + { + iprec->Assign(2, new Val(58, TYPE_COUNT)); + } + else + { + iprec->Assign(2, new Val(proto, TYPE_COUNT)); + } + + iprec->Assign(3, new Val(frag_offset, TYPE_COUNT)); //NA for ip6 iprec->Assign(4, new Val(bad_hdr_len, TYPE_BOOL)); iprec->Assign(5, new Val(bad_checksum, TYPE_BOOL)); - iprec->Assign(6, new Val(MF, TYPE_BOOL)); - iprec->Assign(7, new Val(DF, TYPE_BOOL)); + iprec->Assign(6, new Val(MF, TYPE_BOOL)); //NA for ip6 + iprec->Assign(7, new Val(DF, TYPE_BOOL)); //NA for ip6 + iprec->Assign(8, new Val(ICMP6Flag, TYPE_BOOL)); //ICMP6Flag return iprec; } + + + + + + + + + + + + bool ICMP_Analyzer::IsReuse(double /* t */, const u_char* /* pkt */) { return 0; @@ -277,57 +540,156 @@ unsigned int ICMP_Analyzer::MemoryAllocation() const + (icmp_conn_val ? icmp_conn_val->MemoryAllocation() : 0); } -ICMP_Echo_Analyzer::ICMP_Echo_Analyzer(Connection* c) -: ICMP_Analyzer(AnalyzerTag::ICMP_Echo, c) - { - } -void ICMP_Echo_Analyzer::NextICMP(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data) - { - EventHandlerPtr f = type == ICMP_ECHO ? icmp_echo_request : icmp_echo_reply; +void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) + { //For handling all Echo related ICMP messages + EventHandlerPtr f = 0; + int ICMP6Flag = 0; + + //printf("Executing: Echo, NextProto:%d\n",ip_hdr->NextProto()); + + if ( ip_hdr->NextProto() == IPPROTO_ICMPV6 ) + { + f = type == ICMP6_ECHO_REQUEST ? icmp_echo_request : icmp_echo_reply; + ICMP6Flag = 1; + } + else + f = type == ICMP_ECHO ? icmp_echo_request : icmp_echo_reply; + if ( ! f ) return; int iid = ntohs(icmpp->icmp_hun.ih_idseq.icd_id); int iseq = ntohs(icmpp->icmp_hun.ih_idseq.icd_seq); + //printf("Check these values: iid:[%d] iseq:[%d]\n",iid,iseq); + BroString* payload = new BroString(data, caplen, 0); val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal()); + vl->append(BuildICMPVal(ICMP6Flag)); vl->append(new Val(iid, TYPE_COUNT)); vl->append(new Val(iseq, TYPE_COUNT)); vl->append(new StringVal(payload)); + vl->append(new Val(ICMP6Flag, TYPE_BOOL)); ConnectionEvent(f, vl); } -void ICMP_Context_Analyzer::NextICMP(double t, const struct icmp* icmpp, - int len, int caplen, const u_char*& data) + + + + + + + + +void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + //For handling router related ICMP messages, { EventHandlerPtr f = 0; - switch ( type ) { - case ICMP_UNREACH: f = icmp_unreachable; break; - case ICMP_TIMXCEED: f = icmp_time_exceeded; break; + int ICMP6Flag = 1; + + switch ( type ) + { + case ND_ROUTER_ADVERT: f = icmp_router_advertisement; break; + + case ND_REDIRECT: + case ND_ROUTER_SOLICIT: + case ICMP6_ROUTER_RENUMBERING: + default: ICMPEvent(icmp_sent,ICMP6Flag); return; + } + + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(ICMP6Flag)); + vl->append(new Val(ICMP6Flag, TYPE_BOOL)); + + ConnectionEvent(f, vl); } - if ( f ) - { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal()); - vl->append(new Val(code, TYPE_COUNT)); - vl->append(ExtractICMPContext(caplen, data)); - ConnectionEvent(f, vl); + + + + + + + + + + + + +void ICMP_Analyzer::Context(double t, const struct icmp* icmpp, + int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) + {//For handling the ICMP error messages + + EventHandlerPtr f = 0; + int ICMP6Flag = 0; + + + if ( ip_hdr->NextProto() == IPPROTO_ICMPV6 ) //is ip6 + { + + ICMP6Flag = 1; + //printf("Executing: Context for ICMPv6\n"); + + switch ( type ) + { + case ICMP6_DST_UNREACH: f = icmp_unreachable; break; + case ICMP6_PARAM_PROB: f = icmp_error_message; break; + case ICMP6_TIME_EXCEEDED: f = icmp_error_message; break; + case ICMP6_PACKET_TOO_BIG: f = icmp_error_message; break; + } + + if ( f ) + { + val_list* vl = new val_list; + vl->append(BuildConnVal()); //check for ip6 functionality + vl->append(BuildICMPVal(ICMP6Flag)); //check for ip6 functionality + vl->append(new Val(code, TYPE_COUNT)); + vl->append(ExtractICMP6Context(caplen, data)); + + ConnectionEvent(f, vl); + } + + } + else if ( ip_hdr->NextProto() == IPPROTO_ICMP ) + { + //printf("Executing: Context for ICMP\n"); + switch ( type ) + { + case ICMP_UNREACH: f = icmp_unreachable; break; + case ICMP_TIMXCEED: f = icmp_error_message; break; + } + + if ( f ) + { + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(ICMP6Flag)); + vl->append(new Val(code, TYPE_COUNT)); + vl->append(ExtractICMP4Context(caplen, data)); + + + ConnectionEvent(f, vl); + } + + } + else + { + Weird("ICMP packet, invalid data\n"); //make this more descriptive } } -int ICMP_counterpart(int icmp_type, int icmp_code, bool& is_one_way) + +int ICMP4_counterpart(int icmp_type, int icmp_code, bool& is_one_way) { is_one_way = false; @@ -338,14 +700,77 @@ int ICMP_counterpart(int icmp_type, int icmp_code, bool& is_one_way) switch ( icmp_type ) { case ICMP_ECHO: return ICMP_ECHOREPLY; case ICMP_ECHOREPLY: return ICMP_ECHO; + case ICMP_TSTAMP: return ICMP_TSTAMPREPLY; case ICMP_TSTAMPREPLY: return ICMP_TSTAMP; + case ICMP_IREQ: return ICMP_IREQREPLY; case ICMP_IREQREPLY: return ICMP_IREQ; + case ICMP_ROUTERSOLICIT: return ICMP_ROUTERADVERT; + case ICMP_MASKREQ: return ICMP_MASKREPLY; case ICMP_MASKREPLY: return ICMP_MASKREQ; default: is_one_way = true; return icmp_code; } } + +int ICMP6_counterpart(int icmp_type, int icmp_code, bool& is_one_way) + { + is_one_way = false; + + /**ICMP6 version of the ICMP4_counterpart, under work**/ + //not yet used anywhere, for the context class + + switch ( icmp_type ) { + + + case ICMP6_ECHO_REQUEST: return ICMP6_ECHO_REPLY; + case ICMP6_ECHO_REPLY: return ICMP6_ECHO_REQUEST; + + case ND_ROUTER_SOLICIT: return ND_ROUTER_ADVERT; + case ND_ROUTER_ADVERT: return ND_ROUTER_SOLICIT; + + case ND_NEIGHBOR_SOLICIT: return ND_NEIGHBOR_ADVERT; + case ND_NEIGHBOR_ADVERT: return ND_NEIGHBOR_SOLICIT; + + case MLD_LISTENER_QUERY: return MLD_LISTENER_REPORT; + case MLD_LISTENER_REPORT: return MLD_LISTENER_QUERY; + + case 139: return 140; //ICMP node information query and response respectively (not defined in icmp6.h) + case 140: return 139; + + case 144: return 145; //Home Agent Address Discovery Request Message and reply + case 145: return 144; + + //check the rest of the counterparts + + default: is_one_way = true; return icmp_code; + } + } + + //For mapping ICMP types and codes of v6 to v4. Because we are using same events for both icmpv4 and icmpv6 there is some overlap + //in ICMP types. If this function is used, the name (checked from a table in the scripts) will be incorrect for the listed + //types, but the names will be correct for all ICMP types. + int Type6to4(int icmp_type) + { + switch ( icmp_type ) //For these three values, the type number will be wrong if this is used! + { //easy way to disable this is just to comment all the cases out, and leave only the default. + case ICMP6_DST_UNREACH: return ICMP_UNREACH; break; + case ICMP6_TIME_EXCEEDED: return ICMP_TIMXCEED; break; + case ICMP6_PARAM_PROB: return ICMP_PARAMPROB; break; + + default: return icmp_type; break; + } + } + + int Code6to4(int icmp_code) //not used yet for anything + { + switch ( icmp_code ) + { + default: return icmp_code; break; + } + } + + diff --git a/src/ICMP.h b/src/ICMP.h index 43921f1aac..14f6971915 100644 --- a/src/ICMP.h +++ b/src/ICMP.h @@ -34,15 +34,27 @@ protected: virtual bool IsReuse(double t, const u_char* pkt); virtual unsigned int MemoryAllocation() const; - void ICMPEvent(EventHandlerPtr f); + void ICMPEvent(EventHandlerPtr f, int ICMP6Flag); + + void Echo(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + void Context(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + void Router(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + + + void Describe(ODesc* d) const; - RecordVal* BuildICMPVal(); + RecordVal* BuildICMPVal(int ICMP6Flag); virtual void NextICMP(double t, const struct icmp* icmpp, - int len, int caplen, const u_char*& data); + int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + + RecordVal* ExtractICMP4Context(int len, const u_char*& data); + RecordVal* ExtractICMP6Context(int len, const u_char*& data); - RecordVal* ExtractICMPContext(int len, const u_char*& data); RecordVal* icmp_conn_val; int type; @@ -54,65 +66,24 @@ protected: RuleMatcherState matcher_state; }; -class ICMP_Echo_Analyzer : public ICMP_Analyzer { -public: - ICMP_Echo_Analyzer(Connection* conn); +/*class ICMP4_Analyzer : public ICMP_Analyzer { - static Analyzer* InstantiateAnalyzer(Connection* conn) - { return new ICMP_Echo_Analyzer(conn); } - static bool Available() { return icmp_echo_request || icmp_echo_reply; } -protected: - ICMP_Echo_Analyzer() { } - - virtual void NextICMP(double t, const struct icmp* icmpp, - int len, int caplen, const u_char*& data); }; -class ICMP_Context_Analyzer : public ICMP_Analyzer { -public: - ICMP_Context_Analyzer(AnalyzerTag::Tag tag, Connection* conn) - : ICMP_Analyzer(tag, conn) { } +class ICMP6_Analyzer : public ICMP_Analyzer { -protected: - ICMP_Context_Analyzer() { } - virtual void NextICMP(double t, const struct icmp* icmpp, - int len, int caplen, const u_char*& data); -}; - -class ICMP_TimeExceeded_Analyzer : public ICMP_Context_Analyzer { -public: - ICMP_TimeExceeded_Analyzer(Connection* conn) - : ICMP_Context_Analyzer(AnalyzerTag::ICMP_TimeExceeded, conn) { } - - static Analyzer* InstantiateAnalyzer(Connection* conn) - { return new ICMP_TimeExceeded_Analyzer(conn); } - - static bool Available() { return icmp_time_exceeded; } - -protected: - ICMP_TimeExceeded_Analyzer() { } -}; - -class ICMP_Unreachable_Analyzer : public ICMP_Context_Analyzer { -public: - ICMP_Unreachable_Analyzer(Connection* conn) - : ICMP_Context_Analyzer(AnalyzerTag::ICMP_Unreachable, conn) { } - - static Analyzer* InstantiateAnalyzer(Connection* conn) - { return new ICMP_Unreachable_Analyzer(conn); } - - static bool Available() { return icmp_unreachable; } - -protected: - ICMP_Unreachable_Analyzer() { } -}; +};*/ // Returns the counterpart type to the given type (e.g., the counterpart // to ICMP_ECHOREPLY is ICMP_ECHO). -extern int ICMP_counterpart(int icmp_type, int icmp_code, bool& is_one_way); +//extern int ICMP_counterpart(int icmp_type, int icmp_code, bool& is_one_way); +extern int ICMP4_counterpart(int icmp_type, int icmp_code, bool& is_one_way); +extern int ICMP6_counterpart(int icmp_type, int icmp_code, bool& is_one_way); +extern int Type6to4(int icmp_type); +extern int Code6to4(int icmp_code); #endif diff --git a/src/Sessions.cc b/src/Sessions.cc index fd443d4dcc..de0a1cb488 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -299,6 +299,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, } const struct ip* ip = (const struct ip*) (pkt + hdr_size); + if ( ip->ip_v == 4 ) { IP_Hdr ip_hdr(ip); @@ -332,6 +333,8 @@ void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* ++num_packets_processed; + + uint32 caplen = hdr->caplen - hdr_size; if ( caplen < sizeof(struct ip) ) { @@ -459,7 +462,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, int proto = ip_hdr->NextProto(); if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP && - proto != IPPROTO_ICMP ) + proto != IPPROTO_ICMP && proto != IPPROTO_ICMPV6) // Added ICMPV6, Matti { dump_this_packet = 1; return; @@ -530,7 +533,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, caplen -= ip_hdr_len; uint32 min_hdr_len = (proto == IPPROTO_TCP) ? sizeof(struct tcphdr) : - (proto == IPPROTO_UDP ? sizeof(struct udphdr) : ICMP_MINLEN); + (proto == IPPROTO_UDP ? sizeof(struct udphdr) : ICMP_MINLEN); //needs checking for ICMPV6?, Matti if ( len < min_hdr_len ) { @@ -582,7 +585,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, const struct icmp* icmpp = (const struct icmp *) data; id.src_port = icmpp->icmp_type; - id.dst_port = ICMP_counterpart(icmpp->icmp_type, + id.dst_port = ICMP4_counterpart(icmpp->icmp_type, icmpp->icmp_code, id.is_one_way); @@ -593,6 +596,23 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, break; } + case IPPROTO_ICMPV6: // new case, identical to ICMP, is this correct?? Matti + { + const struct icmp* icmpp = (const struct icmp *) data; + + id.src_port = icmpp->icmp_type; + //printf("TYPE: %d\n", id.src_port); //testing, Matti + id.dst_port = ICMP6_counterpart(icmpp->icmp_type, + icmpp->icmp_code, + id.is_one_way); + + id.src_port = htons(id.src_port); + id.dst_port = htons(id.dst_port); + + d = &icmp_conns; + break; + + } default: Weird(fmt("unknown_protocol %d", proto), hdr, pkt); return; @@ -611,6 +631,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, else { conn = (Connection*) d->Lookup(h); + + if ( ! conn ) { conn = NewConn(h, t, &id, data, proto); @@ -620,6 +642,9 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, else { // We already know that connection. + + + int consistent = CheckConnectionTag(conn); if ( consistent < 0 ) { @@ -773,6 +798,19 @@ Val* NetSessions::BuildHeader(const struct ip* ip) break; } + case IPPROTO_ICMPV6: //Added, Matti + { + const struct icmp* icmpp = (const struct icmp *) data; + RecordVal* icmp_hdr = new RecordVal(icmp_hdr_type); + + //printf("datalen:%d",data_len); //Testing, Matti + + icmp_hdr->Assign(0, new Val(icmpp->icmp_type, TYPE_COUNT)); + + pkt_hdr->Assign(3, icmp_hdr); + break; + } + default: { // This is not a protocol we understand. @@ -968,7 +1006,7 @@ void NetSessions::Remove(Connection* c) ; else if ( ! tcp_conns.RemoveEntry(k) ) - internal_error("connection missing"); + internal_error(fmt("connection missing")); break; case TRANSPORT_UDP: @@ -1157,6 +1195,9 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, case IPPROTO_UDP: tproto = TRANSPORT_UDP; break; + case IPPROTO_ICMPV6: //TransportProto Hack + tproto = TRANSPORT_ICMP; + break; default: internal_error("unknown transport protocol"); break; @@ -1242,7 +1283,6 @@ bool NetSessions::IsLikelyServerPort(uint32 port, TransportProto proto) const port |= UDP_PORT_MASK; else if ( proto == TRANSPORT_ICMP ) port |= ICMP_PORT_MASK; - return port_cache.find(port) != port_cache.end(); } diff --git a/src/Val.cc b/src/Val.cc index 9a1ee700ff..66770cbdb1 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -872,7 +872,6 @@ PortVal::PortVal(uint32 p, TransportProto port_type) : Val(TYPE_PORT) case TRANSPORT_ICMP: p |= ICMP_PORT_MASK; break; - default: break; // "other" } diff --git a/src/Val.h b/src/Val.h index 5a2faee9d7..b6effcb9e9 100644 --- a/src/Val.h +++ b/src/Val.h @@ -513,9 +513,10 @@ protected: #define NUM_PORT_SPACES 4 #define PORT_SPACE_MASK 0x30000 -#define TCP_PORT_MASK 0x10000 -#define UDP_PORT_MASK 0x20000 -#define ICMP_PORT_MASK 0x30000 +#define TCP_PORT_MASK 0x10000 +#define UDP_PORT_MASK 0x20000 +#define ICMP_PORT_MASK 0x30000 + typedef enum { TRANSPORT_UNKNOWN, TRANSPORT_TCP, TRANSPORT_UDP, TRANSPORT_ICMP, @@ -537,6 +538,7 @@ public: int IsUDP() const; int IsICMP() const; + TransportProto PortType() const { if ( IsTCP() ) diff --git a/src/event.bif b/src/event.bif index 3171b02dde..ffee9244b7 100644 --- a/src/event.bif +++ b/src/event.bif @@ -52,11 +52,21 @@ event udp_request%(u: connection%); event udp_reply%(u: connection%); event udp_contents%(u: connection, is_orig: bool, contents: string%); event udp_session_done%(u: connection%); -event icmp_sent%(c: connection, icmp: icmp_conn%); -event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); -event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); + +event icmp_sent%(c: connection, icmp: icmp_conn, ICMP6: bool%); +event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string, ICMP6: bool%); +event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string, ICMP6: bool%); event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); +event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); +event icmp_router_advertisement%(c: connection, icmp: icmp_conn, ICMP6: bool%); + + + +event icmp6_placeholder%(c: connection, icmp: icmp_conn, ICMP6: bool%); + + + + event net_stats_update%(t: time, ns: net_stats%); event conn_stats%(c: connection, os: endpoint_stats, rs: endpoint_stats%); event conn_weird%(name: string, c: connection%); diff --git a/src/net_util.cc b/src/net_util.cc index e49d575fa0..4c57f12213 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -86,6 +86,14 @@ int udp_checksum(const struct ip* ip, const struct udphdr* up, int len) #ifdef BROv6 int udp6_checksum(const struct ip6_hdr* ip6, const struct udphdr* up, int len) { + /**From RFC for udp4 (same for udp6, except for different pseudoheader which is same as for icmp6) + Computed as the 16-bit one's complement of the one's complement sum of a + pseudo header of information from the IP header, the UDP header, and the + data, padded as needed with zero bytes at the end to make a multiple of + two bytes. If the checksum is cleared to zero, then checksuming is + disabled. If the computed checksum is zero, then this field must be set + to 0xFFFF. + **/ uint32 sum; if ( len % 2 == 1 ) @@ -97,19 +105,61 @@ int udp6_checksum(const struct ip6_hdr* ip6, const struct udphdr* up, int len) sum = ones_complement_checksum((void*) ip6->ip6_src.s6_addr, 16, sum); sum = ones_complement_checksum((void*) ip6->ip6_dst.s6_addr, 16, sum); - sum = ones_complement_checksum((void*) &len, 4, sum); + uint32 l = htonl(len); + sum = ones_complement_checksum((void*) &l, 4, sum); uint32 addl_pseudo = htons(IPPROTO_UDP); sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); sum = ones_complement_checksum((void*) up, len, sum); + //printf("checksum, calculated for UDP6: %d\n",sum); + return sum; } + + +int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) + { + /**From RFC + Checksum that covers the ICMPv6 message. This field contains the 16-bit one's + complement of the one's complement sum of the entire ICMPv6 message starting + with the ICMPv6 message type field, prepended with a pseudo-header of IPv6 + header fields. + **/ + uint32 sum; + + if ( len % 2 == 1 ) + // Add in pad byte. + sum += htons(((const u_char*) icmpp)[len - 1] << 8); + else + sum = 0; + + //pseudoheader as in udp6 above + sum = ones_complement_checksum((void*) ip6->ip6_src.s6_addr, 16, sum); + sum = ones_complement_checksum((void*) ip6->ip6_dst.s6_addr, 16, sum); + uint32 l = htonl(len); + sum = ones_complement_checksum((void*) &l, 4, sum); + uint32 addl_pseudo = htons(IPPROTO_ICMPV6); + sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); + //pseudoheader complete + + sum = ones_complement_checksum((void*) icmpp, len, sum); + + //printf("checksum, calculated for ICMP6: %d\n",sum); + + return sum; + } + #endif int icmp_checksum(const struct icmp* icmpp, int len) { + /**From RFC + Checksum that covers the ICMP message. This is the 16-bit one's + complement of the one's complement sum of the ICMP message starting + with the Type field. The checksum field should be cleared to zero + before generating the checksum. + **/ uint32 sum; - if ( len % 2 == 1 ) // Add in pad byte. sum = htons(((const u_char*) icmpp)[len - 1] << 8); @@ -118,10 +168,15 @@ int icmp_checksum(const struct icmp* icmpp, int len) sum = ones_complement_checksum((void*) icmpp, len, sum); + //printf("checksum, calculated for ICMP4: %d\n",sum); + return sum; } + + + #define CLASS_A 0x00000000 #define CLASS_B 0x80000000 #define CLASS_C 0xc0000000 diff --git a/src/net_util.h b/src/net_util.h index 25b6b293fc..4c17104573 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -91,6 +91,7 @@ extern int udp_checksum(const struct ip* ip, const struct udphdr* up, int len); #ifdef BROv6 extern int udp6_checksum(const struct ip6_hdr* ip, const struct udphdr* up, int len); +extern int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len); #endif extern int icmp_checksum(const struct icmp* icmpp, int len); From 50181edd8415e2f698a705932af42e0fb0e5c2f9 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 25 Jan 2011 17:54:20 -0800 Subject: [PATCH 002/651] Significant edit pass over ICMPv6 code. Matti, more per mail. --- policy/bro.init | 13 +- policy/icmp.bro | 243 +++++---------- src/Analyzer.cc | 14 - src/AnalyzerTags.h | 2 +- src/DPM.cc | 34 --- src/ICMP.cc | 741 ++++++++++++++++++--------------------------- src/ICMP.h | 43 ++- src/event.bif | 8 +- src/net_util.cc | 39 +-- src/net_util.h | 6 +- 10 files changed, 424 insertions(+), 719 deletions(-) diff --git a/policy/bro.init b/policy/bro.init index e812db0075..5b963169df 100644 --- a/policy/bro.init +++ b/policy/bro.init @@ -28,6 +28,8 @@ type icmp_conn: record { itype: count; icode: count; len: count; + + v6: bool; # true if it's an ICMPv6 packet. }; type icmp_hdr: record { @@ -38,12 +40,11 @@ type icmp_context: record { id: conn_id; len: count; proto: count; - frag_offset: count &optional; #no frag offset for IPv6 - bad_hdr_len: bool &optional; - bad_checksum: bool &optional; #no checksum in IPv6 header - MF: bool &optional; #no MF for IPv6 - DF: bool &optional; #no DF for IPv6 - ICMP6Flag: bool; + bad_hdr_len: bool; + bad_checksum: bool; # always true for ICMPv6. + frag_offset: count; # always 0 for IMCPv6. + MF: bool; # always false for IMCPv6. + DF: bool; # always true for ICMPv6. }; type addr_set: set[addr]; diff --git a/policy/icmp.bro b/policy/icmp.bro index d75b2a3731..d5e06c4afa 100644 --- a/policy/icmp.bro +++ b/policy/icmp.bro @@ -1,5 +1,4 @@ # $Id: icmp.bro 6883 2009-08-19 21:08:09Z vern $ -# While using this script, please notice that the last F/T value is the IPv6 Flag @load hot @load weird @@ -18,6 +17,7 @@ export { ICMPAsymPayload, # payload in echo req-resp not the same ICMPConnectionPair, # too many ICMPs between hosts ICMPAddressScan, + ICMPRogueRouter, # v6 advertisement from unknown router # The following isn't presently sufficiently useful due # to cold start and packet drops. @@ -35,6 +35,12 @@ export { const detect_conn_pairs = F &redef; # switch for connection pair const detect_payload_asym = F &redef; # switch for echo payload const conn_pair_threshold = 200 &redef; + + # If the IPv6 routers in a network are all known, they can be + # whitelisted here. If so, any other router seen sending an + # announcement will be reported. If this set remains empty, no such + # detection will be done. + const router_whitelist: set[addr] &redef; } global conn_pair:table[addr] of set[addr] &create_expire = 1 day; @@ -56,16 +62,10 @@ type flow_info: record { payload: string; }; -#Insert whitelisted routers here, Router advertisements from other -#routers will be logged as possible rogue router attacks -const routers_whitelist: table[string] of bool = { - #["fe80::260:97ff:fe07:69ea"] = T, #an example - } &redef &default = F; - const names: table[count] of string = { [0] = "echo_reply", - [1] = "unreach", #icmpv6 - [2] = "too_big", #icmpv6 + [1] = "unreach", # icmpv6 + [2] = "too_big", # icmpv6 [3] = "unreach", [4] = "quench", [5] = "redirect", @@ -80,33 +80,33 @@ const names: table[count] of string = { [16] = "info_reply", [17] = "mask_req", [18] = "mask_reply", - [128] = "echo_req", #icmpv6 - [129] = "echo_reply", #icmpv6 - [130] = "group_memb_query", #icmpv6 - [131] = "group_memb_report", #icmpv6 - [132] = "group_memb_reduct", #icmpv6 - [133] = "router_sol", #icmpv6 - [134] = "router_ad", #icmpv6 - [135] = "neighbor_sol", #icmpv6 - [136] = "neighbor_ad", #icmpv6 - [137] = "redirect", #icmpv6 - [138] = "router_renum", #icmpv6 - [139] = "node_info_query", #icmpv6 - [140] = "node_info_resp", #icmpv6 - [141] = "inv_neigh_disc_sol", #icmpv6 - [142] = "inv_neigh_disc_ad", #icmpv6 - [143] = "mul_lis_report", #icmpv6 - [144] = "home_agent_addr_req", #icmpv6 - [145] = "home_agent_addr_reply",#icmpv6 - [146] = "mobible_prefx_sol", #icmpv6 - [147] = "mobible_prefx_ad", #icmpv6 - [148] = "cert_path_sol", #icmpv6 - [149] = "cert_path_ad", #icmpv6 - [150] = "experimental", #icmpv6 - [151] = "mcast_router_ad", #icmpv6 - [152] = "mcast_router_sol", #icmpv6 - [153] = "mcast_router_term", #icmpv6 - [154] = "fmip", #icmpv6 + [128] = "echo_req", # icmpv6 + [129] = "echo_reply", # icmpv6 + [130] = "group_memb_query", # icmpv6 + [131] = "group_memb_report", # icmpv6 + [132] = "group_memb_reduct", # icmpv6 + [133] = "router_sol", # icmpv6 + [134] = "router_ad", # icmpv6 + [135] = "neighbor_sol", # icmpv6 + [136] = "neighbor_ad", # icmpv6 + [137] = "redirect", # icmpv6 + [138] = "router_renum", # icmpv6 + [139] = "node_info_query", # icmpv6 + [140] = "node_info_resp", # icmpv6 + [141] = "inv_neigh_disc_sol", # icmpv6 + [142] = "inv_neigh_disc_ad", # icmpv6 + [143] = "mul_lis_report", # icmpv6 + [144] = "home_agent_addr_req", # icmpv6 + [145] = "home_agent_addr_reply",# icmpv6 + [146] = "mobible_prefx_sol", # icmpv6 + [147] = "mobible_prefx_ad", # icmpv6 + [148] = "cert_path_sol", # icmpv6 + [149] = "cert_path_ad", # icmpv6 + [150] = "experimental", # icmpv6 + [151] = "mcast_router_ad", # icmpv6 + [152] = "mcast_router_sol", # icmpv6 + [153] = "mcast_router_term", # icmpv6 + [154] = "fmip", # icmpv6 } &default = function(n: count): string { return fmt("icmp-%d", n); }; @@ -116,8 +116,8 @@ const IP_proto_name: table[count] of string = { [2] = "IGMP", [6] = "TCP", [17] = "UDP", - [41] = "IP6", - [58] = "ICMP6", + [41] = "IPV6", + [58] = "ICMPV6", } &default = function(n: count): string { return fmt("%s", n); } &redef; @@ -160,13 +160,38 @@ global flows: table[flow_id] of flow_info &read_expire = 45 sec &expire_func = flush_flow; -event icmp_sent(c: connection, icmp: icmp_conn, ICMP6: bool) +function print_log(c: connection, icmp: icmp_conn, addl: string) { + if ( ! log_details ) + return; - print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s", + print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s", network_time(), 0.0, icmp$orig_h, icmp$resp_h, - names[icmp$itype], icmp$itype, icmp$icode, "icmp", - icmp$len, "0", "SH", ICMP6); + names[icmp$itype], icmp$itype, icmp$icode, + icmp$v6 ? "icmp6" : "icmp", icmp$len, addl); + } + +function print_log_with_context(c: connection, icmp: icmp_conn, context: icmp_context, addl: string) + { + # Due to the connection data contained *within* + # them, each log line will contain two connections' worth + # of data. The initial ICMP connection info is the same + # as logged for connections. + + local ctx = fmt("0 EncapPkt: %s %s %s %s %s %s %s %s %s", + context$id$orig_h, context$id$orig_p, + context$id$resp_h, context$id$resp_p, + context$len, IP_proto_name[context$proto], + context$len, context$bad_hdr_len, + context$bad_checksum); + + print_log(c, icmp, ctx); + } + + +event icmp_sent(c: connection, icmp: icmp_conn) + { + print_log(c, icmp, "0 SH"); } event flow_summary(flow: flow_id, last_time: time) @@ -212,63 +237,18 @@ function update_flow(icmp: icmp_conn, id: count, is_orig: bool, payload: string) } -event icmp_error_message(c: connection, icmp: icmp_conn, code: count, context: icmp_context) #for other but the unreach types, which is preserved +event icmp_error_message(c: connection, icmp: icmp_conn, code: count, context: icmp_context) { - - if ( active_connection(context$id) ) - { - # This section allows Bro to act on ICMP error message packets - # that happen in the context of an active connection. It is - # not currently used. - local c2 = connection_record(context$id); - local os = c2$orig$state; - local rs = c2$resp$state; - local is_attempt = - is_tcp_port(c2$id$orig_p) ? - (os == TCP_SYN_SENT && rs == TCP_INACTIVE) : - (os == UDP_ACTIVE && rs == UDP_INACTIVE); - - # Insert action here. - } - - if ( log_details ) - { - # ICMP error message packets are logged here. - # Due to the connection data contained *within* - # them, each log line will contain two connections' worth - # of data. The initial ICMP connection info is the same - # as logged for connections. - print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s", - network_time(), 0.0, icmp$orig_h, icmp$resp_h, - names[icmp$itype], icmp$itype, icmp$icode, "icmp", - icmp$len, "0", "EncapPkt:", - # This is the encapsulated packet: - context$id$orig_h, context$id$orig_p, - context$id$resp_h, context$id$resp_p, - context$len, IP_proto_name[context$proto], - context$len, context$bad_hdr_len, - context$bad_checksum, context$ICMP6Flag); - } - + print_log_with_context(c, icmp, context, ""); } - - -event icmp6_placeholder(c: connection, icmp: icmp_conn, ICMP6: bool) #just for testing - { - print "icmp6_placeholder triggered"; - } - - -event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string, ICMP6: bool) +event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string) { update_flow(icmp, id, T, payload); - local orig = icmp$orig_h; local resp = icmp$resp_h; - # Simple ping scan detector. if ( detect_scans && (orig !in Scan::distinct_peers || @@ -320,7 +300,7 @@ event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, p } event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, - seq: count, payload: string, ICMP6: bool) + seq: count, payload: string) { # Check payload with the associated flow. @@ -329,8 +309,6 @@ event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, fid$resp_h = icmp$orig_h; # it's an echo reply. fid$id = id; - - if ( fid !in flows ) { # NOTICE([$note=ICMPUnpairedEchoReply, @@ -357,78 +335,19 @@ event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, update_flow(icmp, id, F, payload); } - - event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) { - - if ( active_connection(context$id) ) - { - # This section allows Bro to act on ICMP-unreachable packets - # that happen in the context of an active connection. It is - # not currently used. - local c2 = connection_record(context$id); - local os = c2$orig$state; - local rs = c2$resp$state; - local is_attempt = - is_tcp_port(c2$id$orig_p) ? - (os == TCP_SYN_SENT && rs == TCP_INACTIVE) : - (os == UDP_ACTIVE && rs == UDP_INACTIVE); - - # Insert action here. - } - - if ( log_details ) - { - # ICMP unreachable packets are the only ones currently - # logged. Due to the connection data contained *within* - # them, each log line will contain two connections' worth - # of data. The initial ICMP connection info is the same - # as logged for connections. - print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s", - network_time(), 0.0, icmp$orig_h, icmp$resp_h, - names[icmp$itype], icmp$itype, icmp$icode, "icmp", - icmp$len, "0", "EncapPkt:", - # This is the encapsulated packet: - context$id$orig_h, context$id$orig_p, - context$id$resp_h, context$id$resp_p, - context$len, IP_proto_name[context$proto], - context$len, context$bad_hdr_len, - context$bad_checksum, context$ICMP6Flag); - } + print_log_with_context(c, icmp, context, ""); } - - - event icmp_router_advertisement(c: connection, icmp: icmp_conn, ICMP6: bool) + +event icmp_router_advertisement(c: connection, icmp: icmp_conn) { - if ( routers_whitelist[ fmt("%s",icmp$orig_h) ] ) - { - print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s", - network_time(), 0.0, icmp$orig_h, icmp$resp_h, - names[icmp$itype], icmp$itype, icmp$icode, "icmp", - icmp$len, "0", "SH", ICMP6); - } - else - { - print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s", - network_time(), 0.0, icmp$orig_h, icmp$resp_h, - names[icmp$itype], "Possible Rogue Router Detected", icmp$itype, icmp$icode, - icmp$len, ICMP6); - } - - } - + print_log(c, icmp, ""); - - - - - - - - - - - - + if ( |router_whitelist| == 0 || icmp$orig_h in router_whitelist ) + return; + + NOTICE([$note=ICMPRogueRouter, + $msg=fmt("rouge router advertisement from %s", icmp$orig_h)]); + } diff --git a/src/Analyzer.cc b/src/Analyzer.cc index a3f6bbfc2c..06b05960b7 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -48,20 +48,6 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { { AnalyzerTag::ICMP, "ICMP", ICMP_Analyzer::InstantiateAnalyzer, ICMP_Analyzer::Available, 0, false }, - /*{ AnalyzerTag::ICMP_TimeExceeded, "ICMP_TIMEEXCEEDED", - ICMP_TimeExceeded_Analyzer::InstantiateAnalyzer, - ICMP_TimeExceeded_Analyzer::Available, 0, false }, - { AnalyzerTag::ICMP_Unreachable, "ICMP_UNREACHABLE", - ICMP_Unreachable_Analyzer::InstantiateAnalyzer, - ICMP_Unreachable_Analyzer::Available, 0, false }, - { AnalyzerTag::ICMP_Echo, "ICMP_ECHO", - ICMP_Echo_Analyzer::InstantiateAnalyzer, - ICMP_Echo_Analyzer::Available, 0, false },*/ - - - - - { AnalyzerTag::TCP, "TCP", TCP_Analyzer::InstantiateAnalyzer, TCP_Analyzer::Available, 0, false }, { AnalyzerTag::UDP, "UDP", UDP_Analyzer::InstantiateAnalyzer, diff --git a/src/AnalyzerTags.h b/src/AnalyzerTags.h index eb18a03a73..231b39364a 100644 --- a/src/AnalyzerTags.h +++ b/src/AnalyzerTags.h @@ -22,7 +22,7 @@ namespace AnalyzerTag { PIA_TCP, PIA_UDP, // Transport-layer analyzers. - ICMP,/* ICMP_TimeExceeded, ICMP_Unreachable, ICMP_Echo*/ TCP, UDP, + ICMP, TCP, UDP, // Application-layer analyzers (hand-written). BitTorrent, BitTorrentTracker, diff --git a/src/DPM.cc b/src/DPM.cc index 7be9376b9f..b9afb15196 100644 --- a/src/DPM.cc +++ b/src/DPM.cc @@ -214,42 +214,8 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn, break; case TRANSPORT_ICMP: { - const struct icmp* icmpp = (const struct icmp *) data; - - - //Old code, moving to having only one ICMP analyzer - /*switch ( icmpp->icmp_type ) { - - case ICMP_ECHO: - case ICMP_ECHOREPLY: - if ( ICMP_Echo_Analyzer::Available() ) - { - root = new ICMP_Echo_Analyzer(conn); - DBG_DPD(conn, "activated ICMP Echo analyzer"); - } - break; - - case ICMP_UNREACH: - if ( ICMP_Unreachable_Analyzer::Available() ) - { - root = new ICMP_Unreachable_Analyzer(conn); - DBG_DPD(conn, "activated ICMP Unreachable analyzer"); - } - break; - - case ICMP_TIMXCEED: - if ( ICMP_TimeExceeded_Analyzer::Available() ) - { - root = new ICMP_TimeExceeded_Analyzer(conn); - DBG_DPD(conn, "activated ICMP Time Exceeded analyzer"); - } - break; - }*/ - //if ( ! root ) - root = new ICMP_Analyzer(conn); DBG_DPD(conn, "activated ICMP analyzer"); - analyzed = true; break; } diff --git a/src/ICMP.cc b/src/ICMP.cc index b83cf76a40..1ec1d2901c 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -11,15 +11,12 @@ #include - - ICMP_Analyzer::ICMP_Analyzer(Connection* c) : TransportLayerAnalyzer(AnalyzerTag::ICMP, c) { icmp_conn_val = 0; c->SetInactivityTimeout(icmp_inactivity_timeout); request_len = reply_len = -1; - } ICMP_Analyzer::ICMP_Analyzer(AnalyzerTag::Tag tag, Connection* c) @@ -37,7 +34,7 @@ void ICMP_Analyzer::Done() matcher_state.FinishEndpointMatcher(); } -void ICMP_Analyzer::DeliverPacket(int arg_len, const u_char* data, +void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, int seq, const IP_Hdr* ip, int caplen) { assert(ip); @@ -50,42 +47,39 @@ void ICMP_Analyzer::DeliverPacket(int arg_len, const u_char* data, // Subtract off the common part of ICMP header. PacketContents(data + 8, min(len, caplen) - 8); - const struct icmp* icmpp = (const struct icmp*) data; - len = arg_len; + assert(caplen >= len); // Should have been caught earlier already. + + if ( ! ignore_checksums ) + { + int chksum; - //We need a separate calculation for ICMP6 checksums, pseudoheader is appended to the - //ICMP6 checksum calculation, which is different from ICMP4 #ifdef BROv6 - - - if (ip->NextProto() == IPPROTO_ICMPV6 && ! ignore_checksums && - caplen >= len && icmp6_checksum(icmpp,ip->IP6_Hdr(),len )!= 0xffff ) + switch ( ip->NextProto() ) { - Weird("bad_ICMP6_checksum"); - return; + case IPPROTO_ICMP: + chksum = icmp_checksum(icmpp, len); + break; + + case IPPROTO_ICMPV6: + chksum = icmp6_checksum(icmpp, ip->IP6_Hdr(), len); + break; + + default: + internal_error("unexpected IP proto in ICMP analyzer"); } - else if (ip->NextProto() != IPPROTO_ICMPV6 && ! ignore_checksums && - caplen >= len && icmp_checksum(icmpp, len) != 0xffff ) - { - Weird("bad_ICMP_checksum"); - return; - } - - - #else - - if ( ! ignore_checksums && caplen >= len && - icmp_checksum(icmpp, len) != 0xffff ) - { - Weird("bad_ICMP_checksum"); - return; - } + # Classic v4 version. + chksum = icmp_checksum(icmpp, len); #endif - + if ( chksum != 0xffff ) + { + Weird("bad_ICMP6_checksum"); + return; + } + } Conn()->SetLastTime(current_timestamp); @@ -95,127 +89,104 @@ void ICMP_Analyzer::DeliverPacket(int arg_len, const u_char* data, matcher_state.InitEndpointMatcher(this, ip, len, is_orig, 0); } - type = icmpp->icmp_type; - code = icmpp->icmp_code; - - // Move past common portion of ICMP header. //OK for ICMPv6? + // Move past common portion of ICMP header. data += 8; caplen -= 8; len -= 8; - int& len_stat = is_orig ? request_len : reply_len; - if ( len_stat < 0 ) - len_stat = len; + if ( ip->NextProto() == IPPROTO_ICMP ) + NextICMP4(current_timestamp, icmpp, len, caplen, data, ip); else - len_stat += len; + NextICMP6(current_timestamp, icmpp, len, caplen, data, ip); - NextICMP(current_timestamp, icmpp, len, caplen, data, ip); if ( rule_matcher ) matcher_state.Match(Rule::PAYLOAD, data, len, is_orig, false, false, true); } - - -/********************Generic analyzer for all ICMP4/ICMP6******************************/ -void ICMP_Analyzer::NextICMP(double t , const struct icmp* icmpp , int len , int caplen, +void ICMP_Analyzer::NextICMP4(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr ) { - int ICMP6Flag = 0; - - //printf("Executing: ICMP_Analyzer::NextICMP\n"); - //printf("New analyzer structure\n"); - - if ( ip_hdr->NextProto() == IPPROTO_ICMPV6 ) + switch ( icmpp->icmp_type ) { - //printf("ICMP6!\n"); - ICMP6Flag = 1; - - switch (type) //Add new ICMP6 functions here, you can also use codes to narrow the area of single functions. - { - //All the echo stuff here - case ICMP6_ECHO_REQUEST: - case ICMP6_ECHO_REPLY: + case ICMP_ECHO: + case ICMP_ECHOREPLY: Echo(t, icmpp, len, caplen, data, ip_hdr); break; + case ICMP_UNREACH: + case ICMP_TIMXCEED: + Context4(t, icmpp, len, caplen, data, ip_hdr); + break; - //Error messages all have the same structure for their context, and are handled by the same function. - case ICMP6_PARAM_PROB: - case ICMP6_TIME_EXCEEDED: - case ICMP6_PACKET_TOO_BIG: - case ICMP6_DST_UNREACH: - Context(t, icmpp, len, caplen, data, ip_hdr); - break; - - //All router related stuff should eventually be handled by the Router() - case ND_REDIRECT: - case ND_ROUTER_SOLICIT: - case ICMP6_ROUTER_RENUMBERING: - case ND_ROUTER_ADVERT: - Router(t, icmpp, len, caplen, data, ip_hdr); //currently only logs the router stuff for other than router_advert - break; - - /* listed for convenience - case ICMP6_PARAM_PROB: break; - case MLD_LISTENER_QUERY: break; - case MLD_LISTENER_REPORT: break; - case MLD_LISTENER_REDUCTION: break; - case ND_NEIGHBOR_SOLICIT: break; - case ND_NEIGHBOR_ADVERT: break; - case ND_REDIRECT: break; - case ICMP6_ROUTER_RENUMBERING: break; - case ND_NEIGHBOR_SOLICIT: break; - case ND_NEIGHBOR_ADVERT: break; - case ICMP6_TIME_EXCEEDED: break; - */ - - default: ICMPEvent(icmp_sent, ICMP6Flag); break; - } + default: + ICMPEvent(icmp_sent, icmpp, len, 0); break; } - else if ( ip_hdr->NextProto() == IPPROTO_ICMP ) - { + } - switch (type) //Add new ICMP4 functions here - { - case ICMP_ECHO: - case ICMP_ECHOREPLY: +#ifdef BROv6 +void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int caplen, + const u_char*& data, const IP_Hdr* ip_hdr ) + { + switch ( icmpp->icmp_type ) + { + // Echo types. + case ICMP6_ECHO_REQUEST: + case ICMP6_ECHO_REPLY: Echo(t, icmpp, len, caplen, data, ip_hdr); break; - case ICMP_UNREACH: - case ICMP_TIMXCEED: - Context(t, icmpp, len, caplen, data, ip_hdr); + // Error messages all have the same structure for their context, + // and are handled by the same function. + case ICMP6_PARAM_PROB: + case ICMP6_TIME_EXCEEDED: + case ICMP6_PACKET_TOO_BIG: + case ICMP6_DST_UNREACH: + Context6(t, icmpp, len, caplen, data, ip_hdr); break; - default: ICMPEvent(icmp_sent, ICMP6Flag); break; - } - + // Router related messages. + case ND_REDIRECT: + case ND_ROUTER_SOLICIT: + case ICMP6_ROUTER_RENUMBERING: + case ND_ROUTER_ADVERT: + Router(t, icmpp, len, caplen, data, ip_hdr); + break; +#if 0 + // Currently not specifically implemented. + case ICMP6_PARAM_PROB: + case MLD_LISTENER_QUERY: + case MLD_LISTENER_REPORT: + case MLD_LISTENER_REDUCTION: + case ND_NEIGHBOR_SOLICIT: + case ND_NEIGHBOR_ADVERT: + case ND_REDIRECT: + case ICMP6_ROUTER_RENUMBERING: + case ND_NEIGHBOR_SOLICIT: + case ND_NEIGHBOR_ADVERT: + case ICMP6_TIME_EXCEEDED: +#endif + default: + ICMPEvent(icmp_sent, icmpp, len, 1); + break; } - else - Weird("Malformed ip header"); - } + } +#endif - -void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f, int ICMP6Flag) +void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, int icmpv6) { if ( ! f ) - return; - + return; val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(ICMP6Flag)); - //if ( f == icmp_sent ) //for now, testing purposes - vl->append(new Val(ICMP6Flag, TYPE_BOOL)); - + vl->append(BuildICMPVal(icmpp, len, icmpv6)); ConnectionEvent(f, vl); } - -RecordVal* ICMP_Analyzer::BuildICMPVal(int ICMP6Flag) +RecordVal* ICMP_Analyzer::BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6) { if ( ! icmp_conn_val ) { @@ -223,15 +194,10 @@ RecordVal* ICMP_Analyzer::BuildICMPVal(int ICMP6Flag) icmp_conn_val->Assign(0, new AddrVal(Conn()->OrigAddr())); icmp_conn_val->Assign(1, new AddrVal(Conn()->RespAddr())); - - if ( ICMP6Flag == 1 ) - icmp_conn_val->Assign(2, new Val(Type6to4(type), TYPE_COUNT)); //to avoid errors in getting the message type *name* right on the scripting level, type number will be different from true ipv6 - else - icmp_conn_val->Assign(2, new Val(type, TYPE_COUNT)); - - - icmp_conn_val->Assign(3, new Val(code, TYPE_COUNT)); + icmp_conn_val->Assign(2, new Val(icmpp->icmp_type, TYPE_COUNT)); + icmp_conn_val->Assign(3, new Val(icmpp->icmp_code, TYPE_COUNT)); icmp_conn_val->Assign(4, new Val(len, TYPE_COUNT)); + icmp_conn_val->Assign(5, new Val(icmpv6, TYPE_BOOL)); } Ref(icmp_conn_val); @@ -239,15 +205,74 @@ RecordVal* ICMP_Analyzer::BuildICMPVal(int ICMP6Flag) return icmp_conn_val; } +TransportProto ICMP_Analyzer::GetContextProtocol(const IP_Hdr* ip_hdr, uint32* src_port, uint32* dst_port) + { + const u_char* transport_hdr; + uint32 ip_hdr_len = ip_hdr->HdrLen(); + bool ip4 = ip_hdr->IP4_Hdr(); + + if ( ip4 ) + transport_hdr = ((u_char *) ip_hdr->IP4_Hdr() + ip_hdr_len); + else + transport_hdr = ((u_char *) ip_hdr->IP6_Hdr() + ip_hdr_len); + + TransportProto proto; + + switch ( ip_hdr->NextProto() ) { + case 1: proto = TRANSPORT_ICMP; break; + case 6: proto = TRANSPORT_TCP; break; + case 17: proto = TRANSPORT_UDP; break; + case 58: proto = TRANSPORT_ICMP; //TransportProto Hack // XXX What's this? + default: proto = TRANSPORT_UNKNOWN; break; + } + + switch ( proto ) { + case TRANSPORT_ICMP: + { + const struct icmp* icmpp = + (const struct icmp *) transport_hdr; + bool is_one_way; // dummy + *src_port = ntohs(icmpp->icmp_type); + + if ( ip4 ) + *dst_port = ntohs(ICMP4_counterpart(icmpp->icmp_type, + icmpp->icmp_code, is_one_way)); + else + *dst_port = ntohs(ICMP6_counterpart(icmpp->icmp_type, + icmpp->icmp_code, is_one_way)); + + break; + } + + case TRANSPORT_TCP: + { + const struct tcphdr* tp = + (const struct tcphdr *) transport_hdr; + *src_port = ntohs(tp->th_sport); + *dst_port = ntohs(tp->th_dport); + break; + } + + case TRANSPORT_UDP: + { + const struct udphdr* up = + (const struct udphdr *) transport_hdr; + *src_port = ntohs(up->uh_sport); + *dst_port = ntohs(up->uh_dport); + break; + } + + default: + *src_port = *dst_port = ntohs(0); + } + + return proto; + } + RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) { - /** - * For use only with ICMP4, ICMPV6 context extraction is still non-functional - */ - const IP_Hdr ip_hdr_data((const struct ip*) data); const IP_Hdr* ip_hdr = &ip_hdr_data; - int ICMP6Flag = 0; uint32 ip_hdr_len = ip_hdr->HdrLen(); @@ -257,132 +282,82 @@ RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) uint32 src_addr, dst_addr,src_addr2, dst_addr2; uint32 src_port, dst_port; - if ( ip_hdr_len < sizeof(struct ip) || ip_hdr_len > uint32(len) ) - { // We don't have an entire IP header. - bad_hdr_len = 1; - ip_len = frag_offset = 0; - DF = MF = bad_checksum = 0; - src_addr = dst_addr = 0; - src_port = dst_port = 0; - } - - else - { - bad_hdr_len = 0; - ip_len = ip_hdr->TotalLen(); - bad_checksum = ones_complement_checksum((void*) ip_hdr->IP4_Hdr(), ip_hdr_len, 0) != 0xffff; - - src_addr = ip_hdr->SrcAddr4(); - dst_addr = ip_hdr->DstAddr4(); - - switch ( ip_hdr->NextProto() ) { - case 1: proto = TRANSPORT_ICMP; break; - case 6: proto = TRANSPORT_TCP; break; - case 17: proto = TRANSPORT_UDP; break; - - // Default uses TRANSPORT_UNKNOWN, per initialization above. - } - - uint32 frag_field = ip_hdr->FragField(); - DF = ip_hdr->DF(); - MF = frag_field & 0x2000; - frag_offset = frag_field & /* IP_OFFMASK not portable */ 0x1fff; - - const u_char* transport_hdr = ((u_char *) ip_hdr->IP4_Hdr() + ip_hdr_len); - - if ( uint32(len) < ip_hdr_len + 4 ) //what is this value for ipv6? - { - // 4 above is the magic number meaning that both - // port numbers are included in the ICMP. - bad_hdr_len = 1; - src_port = dst_port = 0; - } - - switch ( proto ) { - case TRANSPORT_ICMP: - { - const struct icmp* icmpp = - (const struct icmp *) transport_hdr; - bool is_one_way; // dummy - src_port = ntohs(icmpp->icmp_type); - dst_port = ntohs(ICMP4_counterpart(icmpp->icmp_type, - icmpp->icmp_code, - is_one_way)); - } - break; - - case TRANSPORT_TCP: - { - const struct tcphdr* tp = - (const struct tcphdr *) transport_hdr; - src_port = ntohs(tp->th_sport); - dst_port = ntohs(tp->th_dport); - } - break; - - case TRANSPORT_UDP: - { - const struct udphdr* up = - (const struct udphdr *) transport_hdr; - src_port = ntohs(up->uh_sport); - dst_port = ntohs(up->uh_dport); - } - break; - - default: - src_port = dst_port = ntohs(0); - } - } - - RecordVal* iprec = new RecordVal(icmp_context); - RecordVal* id_val = new RecordVal(conn_id); - - id_val->Assign(0, new AddrVal(src_addr)); - id_val->Assign(1, new PortVal(src_port, proto)); - id_val->Assign(2, new AddrVal(dst_addr)); - id_val->Assign(3, new PortVal(dst_port, proto)); - iprec->Assign(0, id_val); - - iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); - iprec->Assign(2, new Val(proto, TYPE_COUNT)); - iprec->Assign(3, new Val(frag_offset, TYPE_COUNT)); - iprec->Assign(4, new Val(bad_hdr_len, TYPE_BOOL)); - iprec->Assign(5, new Val(bad_checksum, TYPE_BOOL)); - iprec->Assign(6, new Val(MF, TYPE_BOOL)); - iprec->Assign(7, new Val(DF, TYPE_BOOL)); - iprec->Assign(8, new Val(ICMP6Flag, TYPE_BOOL)); - - return iprec; + if ( ip_hdr_len < sizeof(struct ip) || ip_hdr_len > uint32(len) ) + { + // We don't have an entire IP header. + bad_hdr_len = 1; + ip_len = frag_offset = 0; + DF = MF = bad_checksum = 0; + src_addr = dst_addr = 0; + src_port = dst_port = 0; } + else + { + bad_hdr_len = 0; + ip_len = ip_hdr->TotalLen(); + bad_checksum = ones_complement_checksum((void*) ip_hdr->IP4_Hdr(), ip_hdr_len, 0) != 0xffff; + src_addr = ip_hdr->SrcAddr4(); + dst_addr = ip_hdr->DstAddr4(); + uint32 frag_field = ip_hdr->FragField(); + DF = ip_hdr->DF(); + MF = frag_field & 0x2000; + frag_offset = frag_field & /* IP_OFFMASK not portable */ 0x1fff; + + if ( uint32(len) >= ip_hdr_len + 4 ) + proto = GetContextProtocol(ip_hdr, &src_port, &dst_port); + else + { + // 4 above is the magic number meaning that both + // port numbers are included in the ICMP. + src_port = dst_port = 0; + bad_hdr_len = 1; + } + } + + RecordVal* iprec = new RecordVal(icmp_context); + RecordVal* id_val = new RecordVal(conn_id); + + id_val->Assign(0, new AddrVal(src_addr)); + id_val->Assign(1, new PortVal(src_port, proto)); + id_val->Assign(2, new AddrVal(dst_addr)); + id_val->Assign(3, new PortVal(dst_port, proto)); + + iprec->Assign(0, id_val); + iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); + iprec->Assign(2, new Val(proto, TYPE_COUNT)); + iprec->Assign(3, new Val(bad_hdr_len, TYPE_BOOL)); + iprec->Assign(4, new Val(bad_checksum, TYPE_BOOL)); + iprec->Assign(5, new Val(frag_offset, TYPE_COUNT)); + iprec->Assign(6, new Val(MF, TYPE_BOOL)); + iprec->Assign(7, new Val(DF, TYPE_BOOL)); + + return iprec; + } RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) { - /** - * For use with ICMP6 error message context extraction (possibly very frail function) - */ - const IP_Hdr ip_hdr_data((const struct ip6_hdr*) data); const IP_Hdr* ip_hdr = &ip_hdr_data; - int ICMP6Flag = 1; int DF = 0, MF = 0, bad_hdr_len = 0, bad_checksum = 0; + TransportProto proto = TRANSPORT_UNKNOWN; uint32 ip_hdr_len = ip_hdr->HdrLen(); //should always be 40 uint32* src_addr; uint32* dst_addr; uint32 ip_len, frag_offset = 0; - TransportProto proto = TRANSPORT_UNKNOWN; uint32 src_port, dst_port; - if ( ip_hdr_len < sizeof(struct ip6_hdr) || ip_hdr_len != 40 ) + if ( ip_hdr_len < sizeof(struct ip6_hdr) || ip_hdr_len != 40 ) // XXX What's the 2nd part doing? { bad_hdr_len = 1; ip_len = 0; src_addr = dst_addr = 0; src_port = dst_port = 0; } + else { ip_len = ip_hdr->TotalLen(); @@ -390,62 +365,15 @@ RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) src_addr = (uint32 *) ip_hdr->SrcAddr(); dst_addr = (uint32 *) ip_hdr->DstAddr(); - - - switch ( ip_hdr->NextProto() ) { - case 1: proto = TRANSPORT_ICMP; break; - case 6: proto = TRANSPORT_TCP; break; - case 17: proto = TRANSPORT_UDP; break; - case 58: proto = TRANSPORT_ICMP; break; //TransportProto Hack - - // Default uses TRANSPORT_UNKNOWN, per initialization above. - } - - - const u_char* transport_hdr = ((u_char *)ip_hdr->IP6_Hdr() + ip_hdr_len); - - if ( uint32(len) < ip_hdr_len + 4 ) + if ( uint32(len) >= ip_hdr_len + 4 ) + proto = GetContextProtocol(ip_hdr, &src_port, &dst_port); + else { // 4 above is the magic number meaning that both // port numbers are included in the ICMP. - bad_hdr_len = 1; src_port = dst_port = 0; + bad_hdr_len = 1; } - - switch ( proto ) { - case TRANSPORT_ICMP: - { - const struct icmp* icmpp = - (const struct icmp *) transport_hdr; - bool is_one_way; // dummy - src_port = ntohs(icmpp->icmp_type); - dst_port = ntohs(ICMP6_counterpart(icmpp->icmp_type, - icmpp->icmp_code, - is_one_way)); - } - break; - - case TRANSPORT_TCP: - { - const struct tcphdr* tp = - (const struct tcphdr *) transport_hdr; - src_port = ntohs(tp->th_sport); - dst_port = ntohs(tp->th_dport); - } - break; - - case TRANSPORT_UDP: - { - const struct udphdr* up = - (const struct udphdr *) transport_hdr; - src_port = ntohs(up->uh_sport); - dst_port = ntohs(up->uh_dport); - } - break; - - default: - src_port = dst_port = ntohs(0); - } } RecordVal* iprec = new RecordVal(icmp_context); @@ -459,7 +387,7 @@ RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) iprec->Assign(0, id_val); iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); - //TransportProto Hack + //TransportProto Hack // XXX Likewise. if ( ip_hdr->NextProto() == 58 || 17 ) //if the encap packet is ICMPv6 we force this... (cause there is no IGMP (by that name) for ICMPv6), rather ugly hack once more { iprec->Assign(2, new Val(58, TYPE_COUNT)); @@ -469,28 +397,18 @@ RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) iprec->Assign(2, new Val(proto, TYPE_COUNT)); } - iprec->Assign(3, new Val(frag_offset, TYPE_COUNT)); //NA for ip6 - iprec->Assign(4, new Val(bad_hdr_len, TYPE_BOOL)); - iprec->Assign(5, new Val(bad_checksum, TYPE_BOOL)); - iprec->Assign(6, new Val(MF, TYPE_BOOL)); //NA for ip6 - iprec->Assign(7, new Val(DF, TYPE_BOOL)); //NA for ip6 - iprec->Assign(8, new Val(ICMP6Flag, TYPE_BOOL)); //ICMP6Flag + iprec->Assign(3, new Val(bad_hdr_len, TYPE_BOOL)); + + // The following are not available for IPv6. + iprec->Assign(4, new Val(0, TYPE_BOOL)); // bad_checksum + iprec->Assign(5, new Val(frag_offset, TYPE_COUNT)); // frag_offset + iprec->Assign(6, new Val(0, TYPE_BOOL)); // MF + iprec->Assign(7, new Val(1, TYPE_BOOL)); // DF return iprec; } - - - - - - - - - - - bool ICMP_Analyzer::IsReuse(double /* t */, const u_char* /* pkt */) { return 0; @@ -504,10 +422,12 @@ void ICMP_Analyzer::Describe(ODesc* d) const d->AddSP(")"); d->Add(dotted_addr(Conn()->OrigAddr())); +#if 0 d->Add("."); d->Add(type); d->Add("."); d->Add(code); +#endif d->SP(); d->AddSP("->"); @@ -543,19 +463,16 @@ unsigned int ICMP_Analyzer::MemoryAllocation() const void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) - { //For handling all Echo related ICMP messages + { + // For handling all Echo related ICMP messages EventHandlerPtr f = 0; - int ICMP6Flag = 0; - - //printf("Executing: Echo, NextProto:%d\n",ip_hdr->NextProto()); +#ifdef BROv6 if ( ip_hdr->NextProto() == IPPROTO_ICMPV6 ) - { - f = type == ICMP6_ECHO_REQUEST ? icmp_echo_request : icmp_echo_reply; - ICMP6Flag = 1; - } + f = (icmpp->icmp_type == ICMP6_ECHO_REQUEST) ? icmp_echo_request : icmp_echo_reply; else - f = type == ICMP_ECHO ? icmp_echo_request : icmp_echo_reply; +#endif + f = (icmpp->icmp_type == ICMP_ECHO) ? icmp_echo_request : icmp_echo_reply; if ( ! f ) return; @@ -563,137 +480,110 @@ void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, int iid = ntohs(icmpp->icmp_hun.ih_idseq.icd_id); int iseq = ntohs(icmpp->icmp_hun.ih_idseq.icd_seq); - //printf("Check these values: iid:[%d] iseq:[%d]\n",iid,iseq); - BroString* payload = new BroString(data, caplen, 0); val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(ICMP6Flag)); + vl->append(BuildICMPVal(icmpp, len, ip_hdr->NextProto() != IPPROTO_ICMP)); vl->append(new Val(iid, TYPE_COUNT)); vl->append(new Val(iseq, TYPE_COUNT)); vl->append(new StringVal(payload)); - vl->append(new Val(ICMP6Flag, TYPE_BOOL)); ConnectionEvent(f, vl); } - - - - - - - - void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) - //For handling router related ICMP messages, { EventHandlerPtr f = 0; - int ICMP6Flag = 1; - switch ( type ) + switch ( icmpp->icmp_type ) { - case ND_ROUTER_ADVERT: f = icmp_router_advertisement; break; + case ND_ROUTER_ADVERT: + f = icmp_router_advertisement; + break; case ND_REDIRECT: case ND_ROUTER_SOLICIT: case ICMP6_ROUTER_RENUMBERING: - default: ICMPEvent(icmp_sent,ICMP6Flag); return; + default: + ICMPEvent(icmp_sent, icmpp, len, 1); + return; } val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(ICMP6Flag)); - vl->append(new Val(ICMP6Flag, TYPE_BOOL)); + vl->append(BuildICMPVal(icmpp, len, 1)); ConnectionEvent(f, vl); } - - - - - - - - - - - - -void ICMP_Analyzer::Context(double t, const struct icmp* icmpp, - int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) - {//For handling the ICMP error messages - +void ICMP_Analyzer::Context4(double t, const struct icmp* icmpp, + int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) + { EventHandlerPtr f = 0; - int ICMP6Flag = 0; - - if ( ip_hdr->NextProto() == IPPROTO_ICMPV6 ) //is ip6 - { - - ICMP6Flag = 1; - //printf("Executing: Context for ICMPv6\n"); - - switch ( type ) - { - case ICMP6_DST_UNREACH: f = icmp_unreachable; break; - case ICMP6_PARAM_PROB: f = icmp_error_message; break; - case ICMP6_TIME_EXCEEDED: f = icmp_error_message; break; - case ICMP6_PACKET_TOO_BIG: f = icmp_error_message; break; - } - - if ( f ) - { - val_list* vl = new val_list; - vl->append(BuildConnVal()); //check for ip6 functionality - vl->append(BuildICMPVal(ICMP6Flag)); //check for ip6 functionality - vl->append(new Val(code, TYPE_COUNT)); - vl->append(ExtractICMP6Context(caplen, data)); - - ConnectionEvent(f, vl); - } - - } - else if ( ip_hdr->NextProto() == IPPROTO_ICMP ) - { - //printf("Executing: Context for ICMP\n"); - switch ( type ) - { - case ICMP_UNREACH: f = icmp_unreachable; break; - case ICMP_TIMXCEED: f = icmp_error_message; break; - } - - if ( f ) - { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(ICMP6Flag)); - vl->append(new Val(code, TYPE_COUNT)); - vl->append(ExtractICMP4Context(caplen, data)); - - - ConnectionEvent(f, vl); - } - - } - else + switch ( icmpp->icmp_type ) { - Weird("ICMP packet, invalid data\n"); //make this more descriptive + case ICMP_UNREACH: + f = icmp_unreachable; + break; + + case ICMP_TIMXCEED: + f = icmp_error_message; + break; + } + + if ( f ) + { + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(icmpp, len, 0)); + vl->append(new Val(icmpp->icmp_code, TYPE_COUNT)); + vl->append(ExtractICMP4Context(caplen, data)); + ConnectionEvent(f, vl); } } +#ifdef BROv6 +void ICMP_Analyzer::Context6(double t, const struct icmp* icmpp, + int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) + { + EventHandlerPtr f = 0; + + switch ( icmpp->icmp_type ) + { + case ICMP6_DST_UNREACH: + f = icmp_unreachable; + break; + + case ICMP6_PARAM_PROB: + case ICMP6_TIME_EXCEEDED: + case ICMP6_PACKET_TOO_BIG: + f = icmp_error_message; + break; + } + + if ( f ) + { + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(new Val(icmpp->icmp_code, TYPE_COUNT)); + vl->append(ExtractICMP6Context(caplen, data)); + ConnectionEvent(f, vl); + } + } +#endif int ICMP4_counterpart(int icmp_type, int icmp_code, bool& is_one_way) { is_one_way = false; - // return the counterpart type if one exists. This allows us + // Return the counterpart type if one exists. This allows us // to track corresponding ICMP requests/replies. // Note that for the two-way ICMP messages, icmp_code is // always 0 (RFC 792). @@ -720,57 +610,30 @@ int ICMP6_counterpart(int icmp_type, int icmp_code, bool& is_one_way) { is_one_way = false; - /**ICMP6 version of the ICMP4_counterpart, under work**/ - //not yet used anywhere, for the context class - switch ( icmp_type ) { + case ICMP6_ECHO_REQUEST: return ICMP6_ECHO_REPLY; + case ICMP6_ECHO_REPLY: return ICMP6_ECHO_REQUEST; + case ND_ROUTER_SOLICIT: return ND_ROUTER_ADVERT; + case ND_ROUTER_ADVERT: return ND_ROUTER_SOLICIT; - case ICMP6_ECHO_REQUEST: return ICMP6_ECHO_REPLY; - case ICMP6_ECHO_REPLY: return ICMP6_ECHO_REQUEST; + case ND_NEIGHBOR_SOLICIT: return ND_NEIGHBOR_ADVERT; + case ND_NEIGHBOR_ADVERT: return ND_NEIGHBOR_SOLICIT; - case ND_ROUTER_SOLICIT: return ND_ROUTER_ADVERT; - case ND_ROUTER_ADVERT: return ND_ROUTER_SOLICIT; + case MLD_LISTENER_QUERY: return MLD_LISTENER_REPORT; + case MLD_LISTENER_REPORT: return MLD_LISTENER_QUERY; - case ND_NEIGHBOR_SOLICIT: return ND_NEIGHBOR_ADVERT; - case ND_NEIGHBOR_ADVERT: return ND_NEIGHBOR_SOLICIT; + // ICMP node information query and response respectively (not defined in + // icmp6.h) + case 139: return 140; + case 140: return 139; - case MLD_LISTENER_QUERY: return MLD_LISTENER_REPORT; - case MLD_LISTENER_REPORT: return MLD_LISTENER_QUERY; - - case 139: return 140; //ICMP node information query and response respectively (not defined in icmp6.h) - case 140: return 139; - - case 144: return 145; //Home Agent Address Discovery Request Message and reply + // Home Agent Address Discovery Request Message and reply + case 144: return 145; case 145: return 144; - //check the rest of the counterparts + // TODO: Add further counterparts. default: is_one_way = true; return icmp_code; } } - - //For mapping ICMP types and codes of v6 to v4. Because we are using same events for both icmpv4 and icmpv6 there is some overlap - //in ICMP types. If this function is used, the name (checked from a table in the scripts) will be incorrect for the listed - //types, but the names will be correct for all ICMP types. - int Type6to4(int icmp_type) - { - switch ( icmp_type ) //For these three values, the type number will be wrong if this is used! - { //easy way to disable this is just to comment all the cases out, and leave only the default. - case ICMP6_DST_UNREACH: return ICMP_UNREACH; break; - case ICMP6_TIME_EXCEEDED: return ICMP_TIMXCEED; break; - case ICMP6_PARAM_PROB: return ICMP_PARAMPROB; break; - - default: return icmp_type; break; - } - } - - int Code6to4(int icmp_code) //not used yet for anything - { - switch ( icmp_code ) - { - default: return icmp_code; break; - } - } - - diff --git a/src/ICMP.h b/src/ICMP.h index 14f6971915..aed814d2b1 100644 --- a/src/ICMP.h +++ b/src/ICMP.h @@ -34,7 +34,7 @@ protected: virtual bool IsReuse(double t, const u_char* pkt); virtual unsigned int MemoryAllocation() const; - void ICMPEvent(EventHandlerPtr f, int ICMP6Flag); + void ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, int icmpv6); void Echo(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); @@ -43,47 +43,40 @@ protected: void Router(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); - - void Describe(ODesc* d) const; - RecordVal* BuildICMPVal(int ICMP6Flag); + RecordVal* BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6); - virtual void NextICMP(double t, const struct icmp* icmpp, - int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + void NextICMP4(double t, const struct icmp* icmpp, int len, int caplen, + const u_char*& data, const IP_Hdr* ip_hdr ); RecordVal* ExtractICMP4Context(int len, const u_char*& data); + + void Context4(double t, const struct icmp* icmpp, int len, int caplen, + const u_char*& data, const IP_Hdr* ip_hdr); + + TransportProto GetContextProtocol(const IP_Hdr* ip_hdr, uint32* src_port, + uint32* dst_port); + +#ifdef BROv6 + void NextICMP6(double t, const struct icmp* icmpp, int len, int caplen, + const u_char*& data, const IP_Hdr* ip_hdr ); + RecordVal* ExtractICMP6Context(int len, const u_char*& data); + void Context6(double t, const struct icmp* icmpp, int len, int caplen, + const u_char*& data, const IP_Hdr* ip_hdr); +#endif RecordVal* icmp_conn_val; - int type; - int code; - int len; - int request_len, reply_len; RuleMatcherState matcher_state; }; -/*class ICMP4_Analyzer : public ICMP_Analyzer { - - - -}; - -class ICMP6_Analyzer : public ICMP_Analyzer { - - - -};*/ - // Returns the counterpart type to the given type (e.g., the counterpart // to ICMP_ECHOREPLY is ICMP_ECHO). -//extern int ICMP_counterpart(int icmp_type, int icmp_code, bool& is_one_way); extern int ICMP4_counterpart(int icmp_type, int icmp_code, bool& is_one_way); extern int ICMP6_counterpart(int icmp_type, int icmp_code, bool& is_one_way); -extern int Type6to4(int icmp_type); -extern int Code6to4(int icmp_code); #endif diff --git a/src/event.bif b/src/event.bif index ffee9244b7..d0cee28c03 100644 --- a/src/event.bif +++ b/src/event.bif @@ -53,12 +53,12 @@ event udp_reply%(u: connection%); event udp_contents%(u: connection, is_orig: bool, contents: string%); event udp_session_done%(u: connection%); -event icmp_sent%(c: connection, icmp: icmp_conn, ICMP6: bool%); -event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string, ICMP6: bool%); -event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string, ICMP6: bool%); +event icmp_sent%(c: connection, icmp: icmp_conn%); +event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); +event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -event icmp_router_advertisement%(c: connection, icmp: icmp_conn, ICMP6: bool%); +event icmp_router_advertisement%(c: connection, icmp: icmp_conn%); diff --git a/src/net_util.cc b/src/net_util.cc index 4c57f12213..75dfd929c2 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -86,14 +86,8 @@ int udp_checksum(const struct ip* ip, const struct udphdr* up, int len) #ifdef BROv6 int udp6_checksum(const struct ip6_hdr* ip6, const struct udphdr* up, int len) { - /**From RFC for udp4 (same for udp6, except for different pseudoheader which is same as for icmp6) - Computed as the 16-bit one's complement of the one's complement sum of a - pseudo header of information from the IP header, the UDP header, and the - data, padded as needed with zero bytes at the end to make a multiple of - two bytes. If the checksum is cleared to zero, then checksuming is - disabled. If the computed checksum is zero, then this field must be set - to 0xFFFF. - **/ + // UDP over IPv6 uses the same checksum function as over IPv4 but a + // different pseuod-header over which it is computed. uint32 sum; if ( len % 2 == 1 ) @@ -108,23 +102,18 @@ int udp6_checksum(const struct ip6_hdr* ip6, const struct udphdr* up, int len) uint32 l = htonl(len); sum = ones_complement_checksum((void*) &l, 4, sum); uint32 addl_pseudo = htons(IPPROTO_UDP); + sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); sum = ones_complement_checksum((void*) up, len, sum); - //printf("checksum, calculated for UDP6: %d\n",sum); - return sum; } int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) { - /**From RFC - Checksum that covers the ICMPv6 message. This field contains the 16-bit one's - complement of the one's complement sum of the entire ICMPv6 message starting - with the ICMPv6 message type field, prepended with a pseudo-header of IPv6 - header fields. - **/ + // ICMP6 uses the same checksum function as over ICMP4 but a different + // pseuod-header over which it is computed. uint32 sum; if ( len % 2 == 1 ) @@ -133,19 +122,17 @@ int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) else sum = 0; - //pseudoheader as in udp6 above + // Pseudo-header as for UDP over IPv6 above. sum = ones_complement_checksum((void*) ip6->ip6_src.s6_addr, 16, sum); sum = ones_complement_checksum((void*) ip6->ip6_dst.s6_addr, 16, sum); uint32 l = htonl(len); sum = ones_complement_checksum((void*) &l, 4, sum); + uint32 addl_pseudo = htons(IPPROTO_ICMPV6); sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); - //pseudoheader complete sum = ones_complement_checksum((void*) icmpp, len, sum); - //printf("checksum, calculated for ICMP6: %d\n",sum); - return sum; } @@ -153,12 +140,6 @@ int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) int icmp_checksum(const struct icmp* icmpp, int len) { - /**From RFC - Checksum that covers the ICMP message. This is the 16-bit one's - complement of the one's complement sum of the ICMP message starting - with the Type field. The checksum field should be cleared to zero - before generating the checksum. - **/ uint32 sum; if ( len % 2 == 1 ) // Add in pad byte. @@ -168,15 +149,9 @@ int icmp_checksum(const struct icmp* icmpp, int len) sum = ones_complement_checksum((void*) icmpp, len, sum); - //printf("checksum, calculated for ICMP4: %d\n",sum); - return sum; } - - - - #define CLASS_A 0x00000000 #define CLASS_B 0x80000000 #define CLASS_C 0xc0000000 diff --git a/src/net_util.h b/src/net_util.h index 4c17104573..9a6c12b3c3 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -88,12 +88,14 @@ extern int ones_complement_checksum(const void* p, int b, uint32 sum); extern int tcp_checksum(const struct ip* ip, const struct tcphdr* tp, int len); extern int udp_checksum(const struct ip* ip, const struct udphdr* up, int len); +extern int icmp_checksum(const struct icmp* icmpp, int len); + #ifdef BROv6 extern int udp6_checksum(const struct ip6_hdr* ip, const struct udphdr* up, int len); -extern int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len); +extern int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, + int len); #endif -extern int icmp_checksum(const struct icmp* icmpp, int len); // Given an address in host order, returns its "classical network prefix", // also in host order. From 9c388a18091d07e5adf5fa3b621c83fd46d5890a Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Wed, 3 Aug 2011 14:12:47 -0700 Subject: [PATCH 003/651] Adding support to de-capsulate tunnels. Checkpoint. Decapsulation happens after IP Defragmentation. The "identity" of the enclosing tunnel (the "parent") is added to the connection record of the child (tunneled) connection as an optional field $tunnel_parent. --- policy/bro.init | 6 ++++ src/CMakeLists.txt | 1 + src/Conn.cc | 5 ++- src/Conn.h | 3 +- src/ConnCompressor.cc | 4 +-- src/Sessions.cc | 46 ++++++++++++++++++++------- src/Sessions.h | 7 ++++- src/TunnelHandler.cc | 48 ++++++++++++++++++++++++++++ src/TunnelHandler.h | 73 +++++++++++++++++++++++++++++++++++++++++++ src/types.bif | 7 +++++ 10 files changed, 183 insertions(+), 17 deletions(-) create mode 100644 src/TunnelHandler.cc create mode 100644 src/TunnelHandler.h diff --git a/policy/bro.init b/policy/bro.init index fda8cfd6f4..17607a7113 100644 --- a/policy/bro.init +++ b/policy/bro.init @@ -81,6 +81,11 @@ type endpoint_stats: record { type AnalyzerID: count; +type tunnel_parent_t: record { + cid: conn_id; + tunnel_type: tunneltype_t; +}; + type connection: record { id: conn_id; orig: endpoint; @@ -92,6 +97,7 @@ type connection: record { hot: count; # how hot; 0 = don't know or not hot history: string; uid: string; + tunnel_parent: tunnel_parent_t &optional; }; type SYN_packet: record { diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1a5f096f70..e79fad4ca0 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -388,6 +388,7 @@ set(bro_SRCS Timer.cc Traverse.cc Trigger.cc + TunnelHandler.cc Type.cc UDP.cc Val.cc diff --git a/src/Conn.cc b/src/Conn.cc index bab032cbd0..67e337fda9 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -139,7 +139,7 @@ unsigned int Connection::external_connections = 0; IMPLEMENT_SERIAL(Connection, SER_CONNECTION); -Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id) +Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, RecordVal *arg_tunnel_parent) { sessions = s; key = k; @@ -183,6 +183,8 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id) uid = 0; // Will set later. + tunnel_parent = arg_tunnel_parent; + if ( conn_timer_mgr ) { ++external_connections; @@ -370,6 +372,7 @@ RecordVal* Connection::BuildConnVal() char tmp[20]; conn_val->Assign(9, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62))); + conn_val->Assign(10, tunnel_parent); } if ( root_analyzer ) diff --git a/src/Conn.h b/src/Conn.h index 8f817fd003..e22c0b83ec 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -86,7 +86,7 @@ class Analyzer; class Connection : public BroObj { public: - Connection(NetSessions* s, HashKey* k, double t, const ConnID* id); + Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, RecordVal *arg_tunnel_parent); virtual ~Connection(); // Invoked when connection is about to be removed. Use Ref(this) @@ -335,6 +335,7 @@ protected: double inactivity_timeout; RecordVal* conn_val; LoginConn* login_conn; // either nil, or this + RecordVal* tunnel_parent; // nil if not tunneled int suppress_event; // suppress certain events to once per conn. unsigned int installed_status_timer:1; diff --git a/src/ConnCompressor.cc b/src/ConnCompressor.cc index e173463205..2d617b0fc4 100644 --- a/src/ConnCompressor.cc +++ b/src/ConnCompressor.cc @@ -521,7 +521,7 @@ Connection* ConnCompressor::Instantiate(HashKey* key, PendingConn* pending) // Fake the first packet. const IP_Hdr* faked_pkt = PendingConnToPacket(pending); Connection* new_conn = sessions->NewConn(key, pending->time, &conn_id, - faked_pkt->Payload(), IPPROTO_TCP); + faked_pkt->Payload(), IPPROTO_TCP, 0); if ( ! new_conn ) { @@ -574,7 +574,7 @@ Connection* ConnCompressor::Instantiate(double t, HashKey* key, conn_id.dst_port = tp->th_dport; Connection* new_conn = - sessions->NewConn(key, t, &conn_id, ip->Payload(), IPPROTO_TCP); + sessions->NewConn(key, t, &conn_id, ip->Payload(), IPPROTO_TCP, 0); if ( ! new_conn ) { diff --git a/src/Sessions.cc b/src/Sessions.cc index 1678f6798f..48fab1bff4 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -33,6 +33,7 @@ #include "DPM.h" #include "PacketSort.h" +#include "TunnelHandler.h" // These represent NetBIOS services on ephemeral ports. They're numbered // so that we can use a single int to hold either an actual TCP/UDP server @@ -128,6 +129,12 @@ NetSessions::NetSessions() arp_analyzer = new ARP_Analyzer(); else arp_analyzer = 0; + + + if ( 1 ) + tunnel_handler = new TunnelHandler(this); + else + tunnel_handler = 0; } NetSessions::~NetSessions() @@ -433,14 +440,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( discarder && discarder->NextPacket(ip_hdr, len, caplen) ) return; - int proto = ip_hdr->NextProto(); - if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP && - proto != IPPROTO_ICMP ) - { - dump_this_packet = 1; - return; - } - FragReassembler* f = 0; uint32 frag_field = ip_hdr->FragField(); @@ -474,6 +473,23 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } } + TunnelInfo *tunnel_info = tunnel_handler->DecapsulateTunnel(ip_hdr, len, caplen, hdr, pkt); + if (tunnel_info) + { + ip4 = tunnel_info->child->IP4_Hdr(); + ip_hdr = tunnel_info->child; + len -= tunnel_info->hdr_len; + caplen -= tunnel_info->hdr_len; + } + + int proto = ip_hdr->NextProto(); + if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP && + proto != IPPROTO_ICMP ) + { + dump_this_packet = 1; + return; + } + len -= ip_hdr_len; // remove IP header caplen -= ip_hdr_len; @@ -561,7 +577,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, conn = (Connection*) d->Lookup(h); if ( ! conn ) { - conn = NewConn(h, t, &id, data, proto); + conn = NewConn(h, t, &id, data, proto, tunnel_info); if ( conn ) d->Insert(h, conn); } @@ -581,7 +597,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, conn->Event(connection_reused, 0); Remove(conn); - conn = NewConn(h, t, &id, data, proto); + conn = NewConn(h, t, &id, data, proto, tunnel_info); if ( conn ) d->Insert(h, conn); } @@ -609,6 +625,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, record_packet, record_content, hdr, pkt, hdr_size); + if ( tunnel_info ) + delete tunnel_info; if ( f ) { // Above we already recorded the fragment in its entirety. @@ -1045,13 +1063,17 @@ void NetSessions::GetStats(SessionStats& s) const } Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, - const u_char* data, int proto) + const u_char* data, int proto, TunnelInfo* tunnel_info) { // FIXME: This should be cleaned up a bit, it's too protocol-specific. // But I'm not yet sure what the right abstraction for these things is. int src_h = ntohs(id->src_port); int dst_h = ntohs(id->dst_port); int flags = 0; + RecordVal *tunnel_parent = 0; + + if ( tunnel_info ) + tunnel_parent = tunnel_info->GetRecordVal(); // Hmm... This is not great. TransportProto tproto; @@ -1098,7 +1120,7 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, id = &flip_id; } - Connection* conn = new Connection(this, k, t, id); + Connection* conn = new Connection(this, k, t, id, tunnel_parent); conn->SetTransport(tproto); dpm->BuildInitialAnalyzerTree(tproto, conn, data); diff --git a/src/Sessions.h b/src/Sessions.h index 6adc333282..9551ba5254 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -28,6 +28,9 @@ class Discarder; class SteppingStoneManager; class PacketFilter; +class TunnelHandler; +class TunnelInfo; + class PacketSortElement; struct SessionStats { @@ -144,7 +147,7 @@ protected: friend class TimerMgrExpireTimer; Connection* NewConn(HashKey* k, double t, const ConnID* id, - const u_char* data, int proto); + const u_char* data, int proto, TunnelInfo *tunnel_info); // Check whether the tag of the current packet is consistent with // the given connection. Returns: @@ -214,6 +217,8 @@ protected: int num_packets_processed; PacketProfiler* pkt_profiler; + TunnelHandler *tunnel_handler; + // We may use independent timer managers for different sets of related // activity. The managers are identified by an unique tag. typedef std::map TimerMgrMap; diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc new file mode 100644 index 0000000000..c739403ad8 --- /dev/null +++ b/src/TunnelHandler.cc @@ -0,0 +1,48 @@ +// $Id: Sessions.cc 7075 2010-09-13 02:39:38Z vern $ +// +// See the file "COPYING" in the main distribution directory for copyright. + + +#include "config.h" + +#include + +#include +#include + +#include "TunnelHandler.h" +#include "Conn.h" +#include "Sessions.h" + + +TunnelHandler::TunnelHandler(NetSessions *arg_s) + { + s = arg_s; + } + +TunnelInfo* TunnelHandler::DecapsulateTunnel(const IP_Hdr *ip_hdr, int len, int caplen, + const struct pcap_pkthdr* hdr, const u_char* const pkt) + { + TunnelInfo *tunnel_info = 0; + switch (ip_hdr->NextProto()) { +#ifdef BROv6 + case IPPROTO_IPV6: /* 6in4 and 6to4 */ + if (len < (int)sizeof(struct ip6_hdr) || caplen < (int)sizeof(struct ip6_hdr)) + { + s->Weird("truncated_header", hdr, pkt); + return 0; + } + // TODO: check if IP6 header makes sense + tunnel_info = new TunnelInfo(); + tunnel_info->child = new IP_Hdr((const struct ip6_hdr*)ip_hdr->Payload()); + tunnel_info->tunneltype = BifEnum::IP6inIP; + tunnel_info->hdr_len = ip_hdr->HdrLen(); + tunnel_info->SetParentIPs(ip_hdr); + return tunnel_info; + break; +#endif + default: + break; + } /* end switch */ + return 0; + } diff --git a/src/TunnelHandler.h b/src/TunnelHandler.h new file mode 100644 index 0000000000..aa4cae0a39 --- /dev/null +++ b/src/TunnelHandler.h @@ -0,0 +1,73 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef tunnelhandler_h +#define tunnelhandler_h + +#include "IP.h" +#include "Conn.h" +#include "Sessions.h" +#include "Val.h" + + +class TunnelInfo { +public: + TunnelInfo() + { + child = 0; + tunneltype = BifEnum::NONE; + hdr_len = 0; + parent.src_addr = parent.dst_addr = 0; + parent.src_port = parent.dst_port = 0; + parent.is_one_way = 0; + } + ~TunnelInfo() + { + if (child) delete child; + } + + void SetParentIPs(const IP_Hdr *ip_hdr) + { + parent.src_addr = ip_hdr->SrcAddr(); + parent.dst_addr = ip_hdr->DstAddr(); + } + void SetParentPorts(uint32 src_port, uint32 dst_port) + { + parent.src_port = src_port; + parent.dst_port = dst_port; + } + + RecordVal* GetRecordVal() const + { + RecordVal *rv = new RecordVal(BifType::Record::tunnel_parent_t); + + RecordVal* id_val = new RecordVal(conn_id); + id_val->Assign(0, new AddrVal(parent.src_addr)); + id_val->Assign(1, new PortVal(ntohs(parent.src_port), TRANSPORT_UNKNOWN)); + id_val->Assign(2, new AddrVal(parent.dst_addr)); + id_val->Assign(3, new PortVal(ntohs(parent.dst_port), TRANSPORT_UNKNOWN)); + rv->Assign(0, id_val); + rv->Assign(1, new EnumVal(tunneltype, BifType::Enum::tunneltype_t)); + return rv; + } + + IP_Hdr *child; + ConnID parent; + int hdr_len; + BifEnum::tunneltype_t tunneltype; +}; + +class TunnelHandler { +public: + TunnelHandler(NetSessions *arg_s); + ~TunnelHandler(); + + TunnelInfo* DecapsulateTunnel(const IP_Hdr* ip_hdr, int len, int caplen, + /* need those for passing them back to NetSessions::Weird() */ + const struct pcap_pkthdr* hdr, const u_char* const pkt); + +protected: + NetSessions *s; +}; + + +#endif diff --git a/src/types.bif b/src/types.bif index 8bc5ab8510..d44f177b82 100644 --- a/src/types.bif +++ b/src/types.bif @@ -167,3 +167,10 @@ enum ID %{ %} module GLOBAL; + +enum tunneltype_t %{ + NONE, + IP6inIP, +%} + +type tunnel_parent_t: record; From 8910cd2dcaf308f9febc575a38dcf32ed47be1d6 Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Thu, 4 Aug 2011 15:25:13 -0700 Subject: [PATCH 004/651] Adding support to de-capsulate tunnels. Checkpoint. Re-organizing code. Adding UDP tunnel handlers. Using policy level redef'able consts to tune behavior. UDP port settings not working yet. --- policy/bro.init | 36 +++++++++-- src/Sessions.cc | 25 +++++--- src/TunnelHandler.cc | 144 +++++++++++++++++++++++++++++++++++++++---- src/TunnelHandler.h | 34 +++++++--- src/const.bif | 4 ++ src/types.bif | 13 +++- 6 files changed, 216 insertions(+), 40 deletions(-) diff --git a/policy/bro.init b/policy/bro.init index 17607a7113..8be0ab754a 100644 --- a/policy/bro.init +++ b/policy/bro.init @@ -81,10 +81,14 @@ type endpoint_stats: record { type AnalyzerID: count; -type tunnel_parent_t: record { - cid: conn_id; - tunnel_type: tunneltype_t; -}; +module Tunnel; +export { + type parent_t: record { + cid: conn_id; + tunnel_type: tunneltype_t; + }; +} # end export +module GLOBAL; type connection: record { id: conn_id; @@ -97,7 +101,7 @@ type connection: record { hot: count; # how hot; 0 = don't know or not hot history: string; uid: string; - tunnel_parent: tunnel_parent_t &optional; + tunnel_parent: Tunnel::parent_t &optional; }; type SYN_packet: record { @@ -1489,6 +1493,28 @@ const skip_http_data = F &redef; # UDP tunnels. See also: udp_tunnel_port, policy/udp-tunnel.bro. const parse_udp_tunnels = F &redef; +module Tunnel; +export { + # Whether to decapsulate IP tunnels (IPinIP, 6in4, 6to4) + const decapsulate_ip = F &redef; + + # Whether to decapsulate URDP tunnels (e.g., Teredo, IPv4 in UDP) + const decapsulate_udp = F &redef; + + # If decapsulating UDP: the set of ports for which to do so + const udp_tunnel_ports: set[port] = { + 3544/udp, # Teredo + 5072/udp, # AYIAY + } &redef; + + # If udp_tunnel_allports is T udp_tunnel_ports is ignored and we + # check every UDP packet for tunnels. + const udp_tunnel_allports = F &redef; +} # end export +module GLOBAL; + + + # Load the site utilities. @load utils/site diff --git a/src/Sessions.cc b/src/Sessions.cc index 48fab1bff4..41a7f4c52d 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -131,10 +131,11 @@ NetSessions::NetSessions() arp_analyzer = 0; - if ( 1 ) + if ( BifConst::Tunnel::decapsulate_ip || BifConst::Tunnel::decapsulate_udp ) tunnel_handler = new TunnelHandler(this); else tunnel_handler = 0; + printf("tunnel_handler: %p\n", tunnel_handler); } NetSessions::~NetSessions() @@ -473,13 +474,20 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } } - TunnelInfo *tunnel_info = tunnel_handler->DecapsulateTunnel(ip_hdr, len, caplen, hdr, pkt); - if (tunnel_info) + len -= ip_hdr_len; // remove IP header + caplen -= ip_hdr_len; + + TunnelInfo *tunnel_info = 0; + if ( tunnel_handler ) { - ip4 = tunnel_info->child->IP4_Hdr(); - ip_hdr = tunnel_info->child; - len -= tunnel_info->hdr_len; - caplen -= tunnel_info->hdr_len; + tunnel_info = tunnel_handler->DecapsulateTunnel(ip_hdr, len, caplen, hdr, pkt); + if (tunnel_info) + { + ip4 = tunnel_info->child->IP4_Hdr(); + ip_hdr = tunnel_info->child; + len -= tunnel_info->hdr_len; + caplen -= tunnel_info->hdr_len; + } } int proto = ip_hdr->NextProto(); @@ -490,9 +498,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } - len -= ip_hdr_len; // remove IP header - caplen -= ip_hdr_len; - uint32 min_hdr_len = (proto == IPPROTO_TCP) ? sizeof(struct tcphdr) : (proto == IPPROTO_UDP ? sizeof(struct udphdr) : ICMP_MINLEN); diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc index c739403ad8..3256894cd0 100644 --- a/src/TunnelHandler.cc +++ b/src/TunnelHandler.cc @@ -18,31 +18,151 @@ TunnelHandler::TunnelHandler(NetSessions *arg_s) { s = arg_s; + for (int i=0; i< 65536; i++) + udp_ports[i] = 0; + udp_ports[3544] = 1; + udp_ports[5072] = 1; } TunnelInfo* TunnelHandler::DecapsulateTunnel(const IP_Hdr *ip_hdr, int len, int caplen, const struct pcap_pkthdr* hdr, const u_char* const pkt) { TunnelInfo *tunnel_info = 0; + switch (ip_hdr->NextProto()) { #ifdef BROv6 case IPPROTO_IPV6: /* 6in4 and 6to4 */ - if (len < (int)sizeof(struct ip6_hdr) || caplen < (int)sizeof(struct ip6_hdr)) - { - s->Weird("truncated_header", hdr, pkt); - return 0; - } - // TODO: check if IP6 header makes sense - tunnel_info = new TunnelInfo(); - tunnel_info->child = new IP_Hdr((const struct ip6_hdr*)ip_hdr->Payload()); - tunnel_info->tunneltype = BifEnum::IP6inIP; - tunnel_info->hdr_len = ip_hdr->HdrLen(); - tunnel_info->SetParentIPs(ip_hdr); - return tunnel_info; + if ( BifConst::Tunnel::decapsulate_ip ) + { + if (len < (int)sizeof(struct ip6_hdr) || caplen < (int)sizeof(struct ip6_hdr)) + { + s->Weird("truncated_header", hdr, pkt); + return 0; + } + // TODO: check if IP6 header makes sense + tunnel_info = new TunnelInfo(); + tunnel_info->child = new IP_Hdr((const struct ip6_hdr*)ip_hdr->Payload()); + tunnel_info->tunneltype = BifEnum::Tunnel::IP6inIP; + tunnel_info->hdr_len = tunnel_info->child->HdrLen(); + tunnel_info->SetParentIPs(ip_hdr); + return tunnel_info; + } break; #endif + // TODO: IP in IP. Find test traces first. IP proto 0 and/or 4 + case IPPROTO_UDP: + if ( BifConst::Tunnel::decapsulate_udp ) + { + if (len < (int)sizeof(struct udphdr) || caplen < (int)sizeof(struct udphdr)) + { + // No weird here. Main packet processing will raise it. + return 0; + } + return HandleUDP(ip_hdr, len, caplen); + } + + break; default: break; } /* end switch */ return 0; } + +TunnelInfo* TunnelHandler::HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen) + { + // We already know we that we have a valid UDP header + const u_char *data = ip_hdr->Payload(); + const struct udphdr* uh = (const struct udphdr*)data; + IP_Hdr *cand_ip_hdr = 0; + BifEnum::Tunnel::tunneltype_t tunneltype = BifEnum::Tunnel::NONE; + + int hdr_len = sizeof(struct udphdr); + data += hdr_len; + + int datalen = (int)ntohs(uh->uh_ulen); + datalen = min(datalen, min(len, caplen)); + datalen -= hdr_len; + + if ( BifConst::Tunnel::udp_tunnel_allports || + udp_ports[ntohs(uh->uh_sport)] || + udp_ports[ntohs(uh->uh_dport)] ) + { + cand_ip_hdr = LookForIPHdr(data, datalen); + if (cand_ip_hdr) + { + tunneltype = (cand_ip_hdr->IP4_Hdr()) ? + BifEnum::Tunnel::IP4inUDP : BifEnum::Tunnel::IP6inUDP; + } + else if (datalen >= 8) + { + // Look for AYIAY tunnels + u_char id_byte = data[0]; + u_char sig_byte = data[1]; + u_char next_hdr = data[3]; + + // identity length field is high bits of id_byte. + // length in octets is 2 to the power of length field + int id_len = (1 << (id_byte>>4)); + + // signature length field is high bits of sig_byte + // length in octets 4 * length field + int sig_len = 4*(sig_byte>>4); + + datalen -= 8 + id_len + sig_len; + data += 8 + id_len + sig_len; + if (datalen <= 0) + return 0; + cand_ip_hdr = LookForIPHdr(data, datalen); + if (cand_ip_hdr) + { + hdr_len += 8 + id_len + sig_len; + tunneltype = (cand_ip_hdr->IP4_Hdr()) ? + BifEnum::Tunnel::IP4inAYIAY : BifEnum::Tunnel::IP6inAYIAY; + } + } + if (cand_ip_hdr) + { + TunnelInfo *tunnel_info = new TunnelInfo(); + tunnel_info->child = cand_ip_hdr; + tunnel_info->tunneltype = tunneltype; + tunnel_info->SetParentIPs(ip_hdr); + tunnel_info->SetParentPorts(uh); + tunnel_info->hdr_len = hdr_len + cand_ip_hdr->HdrLen(); + return tunnel_info; + }; + } + return 0; + } + +IP_Hdr* TunnelHandler::LookForIPHdr(const u_char *data, int datalen) + { + IP_Hdr *cand_ip_hdr = 0; + if (datalen < (int)sizeof(struct ip)) + return 0; + + const struct ip *ip4 = (const struct ip*)(data); + if (ip4->ip_v == 4) + cand_ip_hdr = new IP_Hdr((const struct ip*)ip4); + else if (ip4->ip_v == 6 && (datalen > (int)sizeof(struct ip6_hdr))) + cand_ip_hdr = new IP_Hdr((const struct ip6_hdr*)data); + + if (cand_ip_hdr) + { + switch (cand_ip_hdr->NextProto()) { + case IPPROTO_UDP: + case IPPROTO_TCP: + case IPPROTO_ICMP: + if ((int)cand_ip_hdr->TotalLen() != datalen) + { + delete cand_ip_hdr; + cand_ip_hdr = 0; + } + break; + default: + delete cand_ip_hdr; + cand_ip_hdr = 0; + break; + } // end switch + } + return cand_ip_hdr; + } diff --git a/src/TunnelHandler.h b/src/TunnelHandler.h index aa4cae0a39..31c9791a1c 100644 --- a/src/TunnelHandler.h +++ b/src/TunnelHandler.h @@ -3,6 +3,7 @@ #ifndef tunnelhandler_h #define tunnelhandler_h +#include #include "IP.h" #include "Conn.h" #include "Sessions.h" @@ -14,7 +15,7 @@ public: TunnelInfo() { child = 0; - tunneltype = BifEnum::NONE; + tunneltype = BifEnum::Tunnel::NONE; hdr_len = 0; parent.src_addr = parent.dst_addr = 0; parent.src_port = parent.dst_port = 0; @@ -30,30 +31,39 @@ public: parent.src_addr = ip_hdr->SrcAddr(); parent.dst_addr = ip_hdr->DstAddr(); } - void SetParentPorts(uint32 src_port, uint32 dst_port) + void SetParentPorts(const struct udphdr *uh) { - parent.src_port = src_port; - parent.dst_port = dst_port; + parent.src_port = uh->uh_sport; + parent.dst_port = uh->uh_dport; } RecordVal* GetRecordVal() const { - RecordVal *rv = new RecordVal(BifType::Record::tunnel_parent_t); + RecordVal *rv = new RecordVal(BifType::Record::Tunnel::parent_t); + TransportProto tproto; + switch(tunneltype) { + case BifEnum::Tunnel::IP6inIP: + case BifEnum::Tunnel::IP4inIP: + tproto = TRANSPORT_UNKNOWN; + break; + default: + tproto = TRANSPORT_UDP; + } // end switch RecordVal* id_val = new RecordVal(conn_id); id_val->Assign(0, new AddrVal(parent.src_addr)); - id_val->Assign(1, new PortVal(ntohs(parent.src_port), TRANSPORT_UNKNOWN)); + id_val->Assign(1, new PortVal(ntohs(parent.src_port), tproto)); id_val->Assign(2, new AddrVal(parent.dst_addr)); - id_val->Assign(3, new PortVal(ntohs(parent.dst_port), TRANSPORT_UNKNOWN)); + id_val->Assign(3, new PortVal(ntohs(parent.dst_port), tproto)); rv->Assign(0, id_val); - rv->Assign(1, new EnumVal(tunneltype, BifType::Enum::tunneltype_t)); + rv->Assign(1, new EnumVal(tunneltype, BifType::Enum::Tunnel::tunneltype_t)); return rv; } IP_Hdr *child; ConnID parent; int hdr_len; - BifEnum::tunneltype_t tunneltype; + BifEnum::Tunnel::tunneltype_t tunneltype; }; class TunnelHandler { @@ -61,12 +71,16 @@ public: TunnelHandler(NetSessions *arg_s); ~TunnelHandler(); + // Main entry point. Returns a nil if not tunneled. TunnelInfo* DecapsulateTunnel(const IP_Hdr* ip_hdr, int len, int caplen, - /* need those for passing them back to NetSessions::Weird() */ + // need those for passing them back to NetSessions::Weird() const struct pcap_pkthdr* hdr, const u_char* const pkt); protected: NetSessions *s; + short udp_ports[65536]; // which UDP ports to decapsulate + IP_Hdr* LookForIPHdr(const u_char *data, int datalen); + TunnelInfo* HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen); }; diff --git a/src/const.bif b/src/const.bif index 825c21e7a5..447812a902 100644 --- a/src/const.bif +++ b/src/const.bif @@ -12,3 +12,7 @@ const NFS3::return_data: bool; const NFS3::return_data_max: count; const NFS3::return_data_first_only: bool; +const Tunnel::decapsulate_ip: bool; +const Tunnel::decapsulate_udp: bool; +const Tunnel::udp_tunnel_ports: any; +const Tunnel::udp_tunnel_allports: bool; diff --git a/src/types.bif b/src/types.bif index d44f177b82..35c4db0daf 100644 --- a/src/types.bif +++ b/src/types.bif @@ -166,11 +166,18 @@ enum ID %{ Unknown, %} -module GLOBAL; +module Tunnel; enum tunneltype_t %{ NONE, - IP6inIP, + IP6inIP, + IP4inIP, + IP6inUDP, + IP4inUDP, + IP6inAYIAY, + IP4inAYIAY, %} -type tunnel_parent_t: record; +type parent_t: record; + +module GLOBAL; From 5ed3ec2f38b4a8546fba55cfeace04615021b76c Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Sun, 7 Aug 2011 09:49:41 -0700 Subject: [PATCH 005/651] Finishing tunnel decapsulation support in C++ core. Policy script is next. --- policy/bro.init | 2 +- src/Sessions.cc | 1 - src/TunnelHandler.cc | 18 +++++++++++++++--- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/policy/bro.init b/policy/bro.init index 8be0ab754a..59acf746dd 100644 --- a/policy/bro.init +++ b/policy/bro.init @@ -86,7 +86,7 @@ export { type parent_t: record { cid: conn_id; tunnel_type: tunneltype_t; - }; + } &log; } # end export module GLOBAL; diff --git a/src/Sessions.cc b/src/Sessions.cc index 41a7f4c52d..9241b17005 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -135,7 +135,6 @@ NetSessions::NetSessions() tunnel_handler = new TunnelHandler(this); else tunnel_handler = 0; - printf("tunnel_handler: %p\n", tunnel_handler); } NetSessions::~NetSessions() diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc index 3256894cd0..78428c700f 100644 --- a/src/TunnelHandler.cc +++ b/src/TunnelHandler.cc @@ -18,10 +18,22 @@ TunnelHandler::TunnelHandler(NetSessions *arg_s) { s = arg_s; + PortVal *pv = 0; + TableVal *udp_tunnel_ports = BifConst::Tunnel::udp_tunnel_ports->AsTableVal(); + // Find UDP ports we want to analyze. Store them in an array for faster + // lookup. for (int i=0; i< 65536; i++) - udp_ports[i] = 0; - udp_ports[3544] = 1; - udp_ports[5072] = 1; + { + Unref(pv); + pv = new PortVal(i, TRANSPORT_UDP); + if (udp_tunnel_ports->Lookup(pv, false)) + { + udp_ports[i] = 1; + } + else + udp_ports[i] = 0; + } + Unref(pv); } TunnelInfo* TunnelHandler::DecapsulateTunnel(const IP_Hdr *ip_hdr, int len, int caplen, From 32f37c9f6d151d73a47c2a0e558fcbd37b47f49c Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Sun, 7 Aug 2011 12:26:19 -0700 Subject: [PATCH 006/651] Documenting tunnel decapsulation. Haven't tested the autodoc output yet. --- scripts/base/bro.init | 17 +++++--- scripts/policy/frameworks/tunnel.bro | 61 ++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 5 deletions(-) create mode 100644 scripts/policy/frameworks/tunnel.bro diff --git a/scripts/base/bro.init b/scripts/base/bro.init index cf34a763c5..a6b0ff3890 100644 --- a/scripts/base/bro.init +++ b/scripts/base/bro.init @@ -83,8 +83,14 @@ type AnalyzerID: count; module Tunnel; export { + ## Records the identity of a the parent of a tunneled connection. type parent_t: record { + ## The 4-tuple of the tunnel "connection". In case of an IP-in-IP + ## tunnel the ports will be set to 0. The direction (i.e., orig and + ## resp) of the parent are set according to the tunneled connection + ## and not according to the side that established the tunnel. cid: conn_id; + ## The type of tunnel. tunnel_type: tunneltype_t; } &log; } # end export @@ -1495,20 +1501,21 @@ const parse_udp_tunnels = F &redef; module Tunnel; export { - # Whether to decapsulate IP tunnels (IPinIP, 6in4, 6to4) + ## Whether to decapsulate IP tunnels (IPinIP, 6in4, 6to4) const decapsulate_ip = F &redef; - # Whether to decapsulate URDP tunnels (e.g., Teredo, IPv4 in UDP) + ## Whether to decapsulate URDP tunnels (e.g., Teredo, IPv4 in UDP) const decapsulate_udp = F &redef; - # If decapsulating UDP: the set of ports for which to do so + ## If decapsulating UDP: the set of ports for which to do so. + ## Can be overridden by :bro:id:`Tunnel::udp_tunnel_allports` const udp_tunnel_ports: set[port] = { 3544/udp, # Teredo 5072/udp, # AYIAY } &redef; - # If udp_tunnel_allports is T udp_tunnel_ports is ignored and we - # check every UDP packet for tunnels. + ## If udp_tunnel_allports is T :bro:id:`udp_tunnel_ports` is ignored and we + ## check every UDP packet for tunnels. const udp_tunnel_allports = F &redef; } # end export module GLOBAL; diff --git a/scripts/policy/frameworks/tunnel.bro b/scripts/policy/frameworks/tunnel.bro new file mode 100644 index 0000000000..80e46c31a4 --- /dev/null +++ b/scripts/policy/frameworks/tunnel.bro @@ -0,0 +1,61 @@ +##! Handle tunneled connections. +##! +##! Bro can decapsulate IPinIP and IPinUDP tunnels, were "IP" can be either +##! IPv4 or IPv6. The most common case will be decapsulating Teredo, 6to4, +##! 6in4, and AYIAY. +##! +##! Decapsulation happens early in a packets processing, right after IP +##! defragmentation but before there is a connection context. The tunnel +##! headers are stripped from packet and the identity of the parent is +##! is stored as the ``tunnel_parent`` member of :bro:type:`connection`, +##! which is of type :bro:type:`parent_t`. +##! +##! *Limitation:* The decapsulated packets are not fed through the +##! defragmenter again. +##! +##! + +module Tunnel; + +redef use_connection_compressor = F; +redef Tunnel::decapsulate_ip = T; +redef Tunnel::decapsulate_udp = T; +redef Tunnel::udp_tunnel_allports = T; + +export { + redef enum Log::ID += { TUNNEL }; + + ## This record will be logged + type Info : record { + ## This is the time of the first record + ts: time &log; + ## The uid of the child connection, i.e. the connection in the tunnel + uid: string &log; + ## The connection id of the child + id: conn_id &log; + ## The child's transport protocol + proto: transport_proto &log; + ## The parent connection of IP-pair + parent: parent_t &log; + }; + global log_conn: event(rec: Info); +} + +event bro_init() + { + Log::create_stream(TUNNEL, [$columns=Info, $ev=log_conn]); + } + +event new_connection(c: connection) + { + if (c?$tunnel_parent) + { + local info: Info; + info$ts = c$start_time; + info$uid = c$uid; + info$id = c$id; + info$proto = get_port_transport_proto(c$id$resp_p); + info$parent = c$tunnel_parent; + Log::write(TUNNEL, info); + } + } From cd592203a0e3399ea8fdfe1288f51cce813db6d9 Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Sun, 7 Aug 2011 13:55:46 -0700 Subject: [PATCH 007/651] Remvoing old tunnel code (encap_hdr_size and udp_tunnels). --- scripts/base/bro.init | 6 ----- src/Sessions.cc | 56 ------------------------------------------- 2 files changed, 62 deletions(-) diff --git a/scripts/base/bro.init b/scripts/base/bro.init index a6b0ff3890..2f83b99bf8 100644 --- a/scripts/base/bro.init +++ b/scripts/base/bro.init @@ -515,12 +515,6 @@ const packet_sort_window = 0 usecs &redef; # state accrual. const frag_timeout = 0.0 sec &redef; -# If positive, indicates the encapsulation header size that should -# be skipped over for each captured packet .... -const encap_hdr_size = 0 &redef; -# ... or just for the following UDP port. -const tunnel_port = 0/udp &redef; - # Whether to use the ConnSize analyzer to count the number of # packets and IP-level bytes transfered by each endpoint. If # true, these values are returned in the connection's endpoint diff --git a/src/Sessions.cc b/src/Sessions.cc index 9241b17005..fa0d573660 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -177,62 +177,6 @@ void NetSessions::DispatchPacket(double t, const struct pcap_pkthdr* hdr, ip_data = pkt + hdr_size + (ip_hdr->ip_hl << 2); } - if ( encap_hdr_size > 0 && ip_data ) - { - // We're doing tunnel encapsulation. Check whether there's - // a particular associated port. - // - // Should we discourage the use of encap_hdr_size for UDP - // tunnneling? It is probably better handled by enabling - // BifConst::parse_udp_tunnels instead of specifying a fixed - // encap_hdr_size. - if ( udp_tunnel_port > 0 ) - { - ASSERT(ip_hdr); - if ( ip_hdr->ip_p == IPPROTO_UDP ) - { - const struct udphdr* udp_hdr = - reinterpret_cast - (ip_data); - - if ( ntohs(udp_hdr->uh_dport) == udp_tunnel_port ) - { - // A match. - hdr_size += encap_hdr_size; - } - } - } - - else - // Blanket encapsulation - hdr_size += encap_hdr_size; - } - - // Check IP packets encapsulated through UDP tunnels. - // Specifying a udp_tunnel_port is optional but recommended (to avoid - // the cost of checking every UDP packet). - else if ( BifConst::parse_udp_tunnels && ip_data && ip_hdr->ip_p == IPPROTO_UDP ) - { - const struct udphdr* udp_hdr = - reinterpret_cast(ip_data); - - if ( udp_tunnel_port == 0 || // 0 matches any port - udp_tunnel_port == ntohs(udp_hdr->uh_dport) ) - { - const u_char* udp_data = - ip_data + sizeof(struct udphdr); - const struct ip* ip_encap = - reinterpret_cast(udp_data); - const int ip_encap_len = - ntohs(udp_hdr->uh_ulen) - sizeof(struct udphdr); - const int ip_encap_caplen = - hdr->caplen - (udp_data - pkt); - - if ( looks_like_IPv4_packet(ip_encap_len, ip_encap) ) - hdr_size = udp_data - pkt; - } - } - if ( src_ps->FilterType() == TYPE_FILTER_NORMAL ) NextPacket(t, hdr, pkt, hdr_size, pkt_elem); else From fe6a05e2ade5151db7d22b7b6d39c04182e77145 Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Sun, 7 Aug 2011 15:33:50 -0700 Subject: [PATCH 008/651] Tunnel documentation tweaks. Checkpoint. Need to check cross-references once "make doc" works again. --- scripts/policy/frameworks/tunnel.bro | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/scripts/policy/frameworks/tunnel.bro b/scripts/policy/frameworks/tunnel.bro index 80e46c31a4..ebec2b0f06 100644 --- a/scripts/policy/frameworks/tunnel.bro +++ b/scripts/policy/frameworks/tunnel.bro @@ -2,7 +2,21 @@ ##! ##! Bro can decapsulate IPinIP and IPinUDP tunnels, were "IP" can be either ##! IPv4 or IPv6. The most common case will be decapsulating Teredo, 6to4, -##! 6in4, and AYIAY. +##! 6in4, and AYIAY. When this script is loaded, decapsulation will be +##! enabled. "tunnel.log" will log the "parent" for each tunneled +##! connection. The identity (and existence) of the tunnel connection +##! is otherwise lost. +##! +##! Currently handles: +##! +##! * IP6 in IP{4,6}. (IP4 in IP is easy to add, but omitted due to lack +##! of test cases. +##! * IP{4,6} in UDP. This decapsulates e.g., standard *Teredo* packets +##! (without authentication or origin indicator) +##! * IP{4,6} in AYIAY +##! * Only checks for UDP tunnels on Teredo's and AYIAY's default +##! ports. See :bro:id:`udp_tunnel_ports` and +##! :bro:id:`udp_tunnel_allports` ##! ##! Decapsulation happens early in a packets processing, right after IP ##! defragmentation but before there is a connection context. The tunnel @@ -11,13 +25,14 @@ ##! which is of type :bro:type:`parent_t`. ##! ##! *Limitation:* The decapsulated packets are not fed through the -##! defragmenter again. +##! defragmenter again and decapsulation happens only on the primary +##! path, i.e., it's not available for the secondary path. ##! ##! module Tunnel; -redef use_connection_compressor = F; +#redef use_connection_compressor = F; redef Tunnel::decapsulate_ip = T; redef Tunnel::decapsulate_udp = T; redef Tunnel::udp_tunnel_allports = T; From d0a67dc8bfc9bc21a164a17c57f3dcf65a08eedc Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Wed, 10 Aug 2011 13:47:02 -0700 Subject: [PATCH 009/651] Tweaking tunnel decapsulation. Changing names to comply with "Bro Scripting Conventions" Tweaking documentation. --- doc/scripts/DocSourcesList.cmake | 1 + scripts/base/init-bare.bro | 6 +++--- scripts/policy/frameworks/tunnel.bro | 16 ++++++++++++---- src/TunnelHandler.cc | 8 ++++---- src/TunnelHandler.h | 10 +++++----- src/types.bif | 16 ++++++++-------- 6 files changed, 33 insertions(+), 24 deletions(-) diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index 54783f61b3..fbf93ce869 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -95,6 +95,7 @@ rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro) rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro) rest_target(${psd} policy/frameworks/software/version-changes.bro) rest_target(${psd} policy/frameworks/software/vulnerable.bro) +rest_target(${psd} policy/frameworks/tunnel.bro) rest_target(${psd} policy/integration/barnyard2/base.bro) rest_target(${psd} policy/integration/barnyard2/event.bro) rest_target(${psd} policy/integration/barnyard2/types.bro) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 2f83b99bf8..45357fde77 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -84,14 +84,14 @@ type AnalyzerID: count; module Tunnel; export { ## Records the identity of a the parent of a tunneled connection. - type parent_t: record { + type Parent: record { ## The 4-tuple of the tunnel "connection". In case of an IP-in-IP ## tunnel the ports will be set to 0. The direction (i.e., orig and ## resp) of the parent are set according to the tunneled connection ## and not according to the side that established the tunnel. cid: conn_id; ## The type of tunnel. - tunnel_type: tunneltype_t; + tunnel_type: Tunneltype; } &log; } # end export module GLOBAL; @@ -107,7 +107,7 @@ type connection: record { hot: count; # how hot; 0 = don't know or not hot history: string; uid: string; - tunnel_parent: Tunnel::parent_t &optional; + tunnel_parent: Tunnel::Parent &optional; }; type SYN_packet: record { diff --git a/scripts/policy/frameworks/tunnel.bro b/scripts/policy/frameworks/tunnel.bro index ebec2b0f06..a24bd6e1f6 100644 --- a/scripts/policy/frameworks/tunnel.bro +++ b/scripts/policy/frameworks/tunnel.bro @@ -22,7 +22,7 @@ ##! defragmentation but before there is a connection context. The tunnel ##! headers are stripped from packet and the identity of the parent is ##! is stored as the ``tunnel_parent`` member of :bro:type:`connection`, -##! which is of type :bro:type:`parent_t`. +##! which is of type :bro:type:`Tunnel::Parent`. ##! ##! *Limitation:* The decapsulated packets are not fed through the ##! defragmenter again and decapsulation happens only on the primary @@ -30,9 +30,12 @@ ##! ##! +@load base/protocols/conn + module Tunnel; #redef use_connection_compressor = F; +## enab redef Tunnel::decapsulate_ip = T; redef Tunnel::decapsulate_udp = T; redef Tunnel::udp_tunnel_allports = T; @@ -51,14 +54,19 @@ export { ## The child's transport protocol proto: transport_proto &log; ## The parent connection of IP-pair - parent: parent_t &log; + parent: Parent &log; + }; + global log_tunnel: event(rec: Info); + + redef record Conn::Info += { + ## If the connection is tunneled the type of tunnel + tunnel_type: Tunneltype &log &optional; }; - global log_conn: event(rec: Info); } event bro_init() { - Log::create_stream(TUNNEL, [$columns=Info, $ev=log_conn]); + Log::create_stream(TUNNEL, [$columns=Info, $ev=log_tunnel]); } event new_connection(c: connection) diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc index 78428c700f..6b1f78e0c0 100644 --- a/src/TunnelHandler.cc +++ b/src/TunnelHandler.cc @@ -54,7 +54,7 @@ TunnelInfo* TunnelHandler::DecapsulateTunnel(const IP_Hdr *ip_hdr, int len, int // TODO: check if IP6 header makes sense tunnel_info = new TunnelInfo(); tunnel_info->child = new IP_Hdr((const struct ip6_hdr*)ip_hdr->Payload()); - tunnel_info->tunneltype = BifEnum::Tunnel::IP6inIP; + tunnel_info->tunneltype = BifEnum::Tunnel::IP6_IN_IP; tunnel_info->hdr_len = tunnel_info->child->HdrLen(); tunnel_info->SetParentIPs(ip_hdr); return tunnel_info; @@ -86,7 +86,7 @@ TunnelInfo* TunnelHandler::HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen) const u_char *data = ip_hdr->Payload(); const struct udphdr* uh = (const struct udphdr*)data; IP_Hdr *cand_ip_hdr = 0; - BifEnum::Tunnel::tunneltype_t tunneltype = BifEnum::Tunnel::NONE; + BifEnum::Tunnel::Tunneltype tunneltype = BifEnum::Tunnel::NONE; int hdr_len = sizeof(struct udphdr); data += hdr_len; @@ -103,7 +103,7 @@ TunnelInfo* TunnelHandler::HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen) if (cand_ip_hdr) { tunneltype = (cand_ip_hdr->IP4_Hdr()) ? - BifEnum::Tunnel::IP4inUDP : BifEnum::Tunnel::IP6inUDP; + BifEnum::Tunnel::IP4_IN_UDP : BifEnum::Tunnel::IP6_IN_UDP; } else if (datalen >= 8) { @@ -129,7 +129,7 @@ TunnelInfo* TunnelHandler::HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen) { hdr_len += 8 + id_len + sig_len; tunneltype = (cand_ip_hdr->IP4_Hdr()) ? - BifEnum::Tunnel::IP4inAYIAY : BifEnum::Tunnel::IP6inAYIAY; + BifEnum::Tunnel::IP4_IN_AYIAY : BifEnum::Tunnel::IP6_IN_AYIAY; } } if (cand_ip_hdr) diff --git a/src/TunnelHandler.h b/src/TunnelHandler.h index 31c9791a1c..d88e6ff2b4 100644 --- a/src/TunnelHandler.h +++ b/src/TunnelHandler.h @@ -39,11 +39,11 @@ public: RecordVal* GetRecordVal() const { - RecordVal *rv = new RecordVal(BifType::Record::Tunnel::parent_t); + RecordVal *rv = new RecordVal(BifType::Record::Tunnel::Parent); TransportProto tproto; switch(tunneltype) { - case BifEnum::Tunnel::IP6inIP: - case BifEnum::Tunnel::IP4inIP: + case BifEnum::Tunnel::IP6_IN_IP: + case BifEnum::Tunnel::IP4_IN_IP: tproto = TRANSPORT_UNKNOWN; break; default: @@ -56,14 +56,14 @@ public: id_val->Assign(2, new AddrVal(parent.dst_addr)); id_val->Assign(3, new PortVal(ntohs(parent.dst_port), tproto)); rv->Assign(0, id_val); - rv->Assign(1, new EnumVal(tunneltype, BifType::Enum::Tunnel::tunneltype_t)); + rv->Assign(1, new EnumVal(tunneltype, BifType::Enum::Tunnel::Tunneltype)); return rv; } IP_Hdr *child; ConnID parent; int hdr_len; - BifEnum::Tunnel::tunneltype_t tunneltype; + BifEnum::Tunnel::Tunneltype tunneltype; }; class TunnelHandler { diff --git a/src/types.bif b/src/types.bif index 35c4db0daf..5f1c4b850b 100644 --- a/src/types.bif +++ b/src/types.bif @@ -168,16 +168,16 @@ enum ID %{ module Tunnel; -enum tunneltype_t %{ +enum Tunneltype %{ NONE, - IP6inIP, - IP4inIP, - IP6inUDP, - IP4inUDP, - IP6inAYIAY, - IP4inAYIAY, + IP6_IN_IP, + IP4_IN_IP, + IP6_IN_UDP, + IP4_IN_UDP, + IP6_IN_AYIAY, + IP4_IN_AYIAY, %} -type parent_t: record; +type Parent: record; module GLOBAL; From 65921bc61da9a27dffeba46a8d6fe706cdc31e75 Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Fri, 12 Aug 2011 21:42:47 -0700 Subject: [PATCH 010/651] Bugfix: an #ifdef BROv6 was missing in the tunnel code --- src/TunnelHandler.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc index 6b1f78e0c0..4df3f489ad 100644 --- a/src/TunnelHandler.cc +++ b/src/TunnelHandler.cc @@ -102,6 +102,7 @@ TunnelInfo* TunnelHandler::HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen) cand_ip_hdr = LookForIPHdr(data, datalen); if (cand_ip_hdr) { + // Found and IP hdr directly in the UDP payload tunneltype = (cand_ip_hdr->IP4_Hdr()) ? BifEnum::Tunnel::IP4_IN_UDP : BifEnum::Tunnel::IP6_IN_UDP; } @@ -155,8 +156,10 @@ IP_Hdr* TunnelHandler::LookForIPHdr(const u_char *data, int datalen) const struct ip *ip4 = (const struct ip*)(data); if (ip4->ip_v == 4) cand_ip_hdr = new IP_Hdr((const struct ip*)ip4); +#ifdef BROv6 else if (ip4->ip_v == 6 && (datalen > (int)sizeof(struct ip6_hdr))) cand_ip_hdr = new IP_Hdr((const struct ip6_hdr*)data); +#endif if (cand_ip_hdr) { From 08dc84a2504483a3b5827dbf65e7cc68bf9e9516 Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Tue, 16 Aug 2011 20:41:36 -0700 Subject: [PATCH 011/651] Tunnel decapsulation bugfix when FlipRoles is called. If FlipRoles() is called the conn_val in Conn.cc gets Unref'ed and thus my tunnel_partent RecordVal was lost. Fixing this. --- src/Conn.cc | 9 ++++-- src/Conn.h | 5 +-- src/Sessions.cc | 8 ++--- src/TunnelHandler.cc | 4 +-- src/TunnelHandler.h | 75 ++++++++++++++++++++++++++++---------------- 5 files changed, 64 insertions(+), 37 deletions(-) diff --git a/src/Conn.cc b/src/Conn.cc index 67e337fda9..e3e56c5eef 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -15,6 +15,7 @@ #include "Timer.h" #include "PIA.h" #include "binpac.h" +#include "TunnelHandler.h" HashKey* ConnID::BuildConnKey() const { @@ -139,7 +140,7 @@ unsigned int Connection::external_connections = 0; IMPLEMENT_SERIAL(Connection, SER_CONNECTION); -Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, RecordVal *arg_tunnel_parent) +Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, TunnelParent* arg_tunnel_parent) { sessions = s; key = k; @@ -209,6 +210,9 @@ Connection::~Connection() Unref(conn_val); } + if ( tunnel_parent ) + delete tunnel_parent; + delete key; delete root_analyzer; delete conn_timer_mgr; @@ -372,7 +376,8 @@ RecordVal* Connection::BuildConnVal() char tmp[20]; conn_val->Assign(9, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62))); - conn_val->Assign(10, tunnel_parent); + if ( tunnel_parent ) + conn_val->Assign(10, tunnel_parent->GetRecordVal()); } if ( root_analyzer ) diff --git a/src/Conn.h b/src/Conn.h index e22c0b83ec..828ad66e7c 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -23,6 +23,7 @@ class RuleHdrTest; class Specific_RE_Matcher; class TransportLayerAnalyzer; class RuleEndpointState; +class TunnelParent; typedef enum { NUL_IN_LINE, @@ -86,7 +87,7 @@ class Analyzer; class Connection : public BroObj { public: - Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, RecordVal *arg_tunnel_parent); + Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, TunnelParent *arg_tunnel_parent); virtual ~Connection(); // Invoked when connection is about to be removed. Use Ref(this) @@ -335,7 +336,7 @@ protected: double inactivity_timeout; RecordVal* conn_val; LoginConn* login_conn; // either nil, or this - RecordVal* tunnel_parent; // nil if not tunneled + TunnelParent* tunnel_parent; // nil if not tunneled int suppress_event; // suppress certain events to once per conn. unsigned int installed_status_timer:1; diff --git a/src/Sessions.cc b/src/Sessions.cc index fa0d573660..34bfa360dd 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -1018,11 +1018,8 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, int src_h = ntohs(id->src_port); int dst_h = ntohs(id->dst_port); int flags = 0; - RecordVal *tunnel_parent = 0; + TunnelParent *tunnel_parent = 0; - if ( tunnel_info ) - tunnel_parent = tunnel_info->GetRecordVal(); - // Hmm... This is not great. TransportProto tproto; switch ( proto ) { @@ -1068,6 +1065,9 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, id = &flip_id; } + if ( tunnel_info ) + tunnel_parent = new TunnelParent(&(tunnel_info->parent)); + Connection* conn = new Connection(this, k, t, id, tunnel_parent); conn->SetTransport(tproto); dpm->BuildInitialAnalyzerTree(tproto, conn, data); diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc index 4df3f489ad..a5ee9cae3c 100644 --- a/src/TunnelHandler.cc +++ b/src/TunnelHandler.cc @@ -54,7 +54,7 @@ TunnelInfo* TunnelHandler::DecapsulateTunnel(const IP_Hdr *ip_hdr, int len, int // TODO: check if IP6 header makes sense tunnel_info = new TunnelInfo(); tunnel_info->child = new IP_Hdr((const struct ip6_hdr*)ip_hdr->Payload()); - tunnel_info->tunneltype = BifEnum::Tunnel::IP6_IN_IP; + tunnel_info->parent.tunneltype = BifEnum::Tunnel::IP6_IN_IP; tunnel_info->hdr_len = tunnel_info->child->HdrLen(); tunnel_info->SetParentIPs(ip_hdr); return tunnel_info; @@ -137,7 +137,7 @@ TunnelInfo* TunnelHandler::HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen) { TunnelInfo *tunnel_info = new TunnelInfo(); tunnel_info->child = cand_ip_hdr; - tunnel_info->tunneltype = tunneltype; + tunnel_info->parent.tunneltype = tunneltype; tunnel_info->SetParentIPs(ip_hdr); tunnel_info->SetParentPorts(uh); tunnel_info->hdr_len = hdr_len + cand_ip_hdr->HdrLen(); diff --git a/src/TunnelHandler.h b/src/TunnelHandler.h index d88e6ff2b4..b5c69b7eff 100644 --- a/src/TunnelHandler.h +++ b/src/TunnelHandler.h @@ -4,37 +4,29 @@ #define tunnelhandler_h #include +#include "net_util.h" #include "IP.h" #include "Conn.h" #include "Sessions.h" #include "Val.h" - -class TunnelInfo { +class TunnelParent { public: - TunnelInfo() + TunnelParent() { - child = 0; tunneltype = BifEnum::Tunnel::NONE; - hdr_len = 0; - parent.src_addr = parent.dst_addr = 0; - parent.src_port = parent.dst_port = 0; - parent.is_one_way = 0; - } - ~TunnelInfo() - { - if (child) delete child; + src_port = dst_port = 0; + for (int i=0; iSrcAddr(); - parent.dst_addr = ip_hdr->DstAddr(); - } - void SetParentPorts(const struct udphdr *uh) - { - parent.src_port = uh->uh_sport; - parent.dst_port = uh->uh_dport; + tunneltype = other->tunneltype; + copy_addr(other->src_addr, src_addr); + copy_addr(other->dst_addr, dst_addr); + src_port = other->src_port; + dst_port = other->dst_port; } RecordVal* GetRecordVal() const @@ -51,21 +43,50 @@ public: } // end switch RecordVal* id_val = new RecordVal(conn_id); - id_val->Assign(0, new AddrVal(parent.src_addr)); - id_val->Assign(1, new PortVal(ntohs(parent.src_port), tproto)); - id_val->Assign(2, new AddrVal(parent.dst_addr)); - id_val->Assign(3, new PortVal(ntohs(parent.dst_port), tproto)); + id_val->Assign(0, new AddrVal(src_addr)); + id_val->Assign(1, new PortVal(ntohs(src_port), tproto)); + id_val->Assign(2, new AddrVal(dst_addr)); + id_val->Assign(3, new PortVal(ntohs(dst_port), tproto)); rv->Assign(0, id_val); rv->Assign(1, new EnumVal(tunneltype, BifType::Enum::Tunnel::Tunneltype)); return rv; } - IP_Hdr *child; - ConnID parent; - int hdr_len; + uint32 src_addr[NUM_ADDR_WORDS]; + uint32 dst_addr[NUM_ADDR_WORDS]; + uint16 src_port; + uint16 dst_port; BifEnum::Tunnel::Tunneltype tunneltype; }; +class TunnelInfo { +public: + TunnelInfo() + { + child = 0; + hdr_len = 0; + } + ~TunnelInfo() + { + if (child) delete child; + } + + void SetParentIPs(const IP_Hdr *ip_hdr) + { + copy_addr(ip_hdr->SrcAddr(), parent.src_addr); + copy_addr(ip_hdr->DstAddr(), parent.dst_addr); + } + void SetParentPorts(const struct udphdr *uh) + { + parent.src_port = uh->uh_sport; + parent.dst_port = uh->uh_dport; + } + + IP_Hdr *child; + TunnelParent parent; + int hdr_len; +}; + class TunnelHandler { public: TunnelHandler(NetSessions *arg_s); From 4214d8d9057d491cb6066d0b4e579c83fbbfc4cb Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Wed, 17 Aug 2011 07:31:25 -0700 Subject: [PATCH 012/651] Fixing memleak in tunnel code. --- src/Sessions.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/Sessions.cc b/src/Sessions.cc index 34bfa360dd..b6e19f4cf4 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -355,6 +355,12 @@ static bool looks_like_IPv4_packet(int len, const struct ip* ip_hdr) return false; } +static inline void delete_tunnel_info(TunnelInfo *ti) + { + if ( ti ) + delete ti; + } + void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, int hdr_size) @@ -438,6 +444,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, proto != IPPROTO_ICMP ) { dump_this_packet = 1; + delete_tunnel_info(tunnel_info); return; } @@ -449,6 +456,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, Weird("truncated_header", hdr, pkt); if ( f ) Remove(f); // ### + delete_tunnel_info(tunnel_info); return; } if ( caplen < min_hdr_len ) @@ -456,6 +464,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, Weird("internally_truncated_header", hdr, pkt); if ( f ) Remove(f); // ### + delete_tunnel_info(tunnel_info); return; } @@ -507,6 +516,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, default: Weird(fmt("unknown_protocol %d", proto), hdr, pkt); + delete_tunnel_info(tunnel_info); return; } @@ -536,6 +546,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( consistent < 0 ) { delete h; + delete_tunnel_info(tunnel_info); return; } @@ -558,7 +569,10 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } if ( ! conn ) + { + delete_tunnel_info(tunnel_info); return; + } int record_packet = 1; // whether to record the packet at all int record_content = 1; // whether to record its data From f3a92ec30b94be0fca8dfec5447e901b8847c7cc Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Tue, 23 Aug 2011 20:28:11 -0700 Subject: [PATCH 013/651] Minor fixes for possible leaks on uncommon code path or on initialization. --- src/Sessions.cc | 8 ++++++++ src/TunnelHandler.cc | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/Sessions.cc b/src/Sessions.cc index b6e19f4cf4..382bd9c5b3 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -444,6 +444,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, proto != IPPROTO_ICMP ) { dump_this_packet = 1; + if ( f ) + Remove(t); delete_tunnel_info(tunnel_info); return; } @@ -516,6 +518,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, default: Weird(fmt("unknown_protocol %d", proto), hdr, pkt); + if ( f ) + Remove(f); delete_tunnel_info(tunnel_info); return; } @@ -546,6 +550,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( consistent < 0 ) { delete h; + if ( f ) + Remove(f); delete_tunnel_info(tunnel_info); return; } @@ -571,6 +577,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( ! conn ) { delete_tunnel_info(tunnel_info); + if ( f ) + Remove(f); return; } diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc index a5ee9cae3c..a7fbd4ce29 100644 --- a/src/TunnelHandler.cc +++ b/src/TunnelHandler.cc @@ -24,7 +24,8 @@ TunnelHandler::TunnelHandler(NetSessions *arg_s) // lookup. for (int i=0; i< 65536; i++) { - Unref(pv); + if (pv) + Unref(pv); pv = new PortVal(i, TRANSPORT_UDP); if (udp_tunnel_ports->Lookup(pv, false)) { From 1936989422311d62bb08dc1466f6e82bea8409c8 Mon Sep 17 00:00:00 2001 From: Gregor Maier Date: Tue, 23 Aug 2011 20:46:33 -0700 Subject: [PATCH 014/651] Fix compile error due to typo. --- src/Sessions.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Sessions.cc b/src/Sessions.cc index 382bd9c5b3..47696a1651 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -445,7 +445,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, { dump_this_packet = 1; if ( f ) - Remove(t); + Remove(f); delete_tunnel_info(tunnel_info); return; } From 6e6073ff4c4e01ab3171d3724dd6fe5f44df463e Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 17 Oct 2011 14:19:17 -0700 Subject: [PATCH 015/651] it compiles (but doesn't do anything useful) --- src/CMakeLists.txt | 4 ++ src/Func.cc | 3 ++ src/InputMgr.cc | 90 +++++++++++++++++++++++++++++++++++++++++ src/InputMgr.h | 24 +++++++++++ src/InputReader.cc | 12 ++++++ src/InputReader.h | 29 +++++++++++++ src/InputReaderAscii.cc | 12 ++++++ src/InputReaderAscii.h | 20 +++++++++ src/NetVar.cc | 2 + src/input.bif | 16 ++++++++ src/main.cc | 4 ++ src/types.bif | 7 ++++ 12 files changed, 223 insertions(+) create mode 100644 src/InputMgr.cc create mode 100644 src/InputMgr.h create mode 100644 src/InputReader.cc create mode 100644 src/InputReader.h create mode 100644 src/InputReaderAscii.cc create mode 100644 src/InputReaderAscii.h create mode 100644 src/input.bif diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b4779e1557..1693bad4eb 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -141,6 +141,7 @@ endmacro(GET_BIF_OUTPUT_FILES) set(BIF_SRCS bro.bif logging.bif + input.bif event.bif const.bif types.bif @@ -332,6 +333,9 @@ set(bro_SRCS IRC.cc List.cc Reporter.cc + InputMgr.cc + InputReader.cc + InputReaderAscii.cc LogMgr.cc LogWriter.cc LogWriterAscii.cc diff --git a/src/Func.cc b/src/Func.cc index 65cb22b09d..829bc89238 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -523,11 +523,13 @@ void builtin_error(const char* msg, BroObj* arg) #include "bro.bif.func_h" #include "logging.bif.func_h" +#include "input.bif.func_h" #include "reporter.bif.func_h" #include "strings.bif.func_h" #include "bro.bif.func_def" #include "logging.bif.func_def" +#include "input.bif.func_def" #include "reporter.bif.func_def" #include "strings.bif.func_def" @@ -542,6 +544,7 @@ void init_builtin_funcs() #include "bro.bif.func_init" #include "logging.bif.func_init" +#include "input.bif.func_init" #include "reporter.bif.func_init" #include "strings.bif.func_init" diff --git a/src/InputMgr.cc b/src/InputMgr.cc new file mode 100644 index 0000000000..cacc512bcf --- /dev/null +++ b/src/InputMgr.cc @@ -0,0 +1,90 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include + +#include "InputMgr.h" +#include "Event.h" +#include "EventHandler.h" +#include "NetVar.h" +#include "Net.h" + + +#include "InputReader.h" + +#include "InputReaderAscii.h" + + +struct InputReaderDefinition { + bro_int_t type; // the type + const char *name; // descriptive name for error messages + bool (*init)(); // optional one-time inifializing function + InputReader* (*factory)(); // factory function for creating instances +}; + +InputReaderDefinition input_readers[] = { + { BifEnum::Input::READER_ASCII, "Ascii", 0, InputReaderAscii::Instantiate }, + + // End marker + { BifEnum::Input::READER_DEFAULT, "None", 0, (InputReader* (*)())0 } +}; + +InputMgr::InputMgr() +{ + DBG_LOG(DBG_LOGGING, "this has to happen"); +} + + +// create a new input reader object to be used at whomevers leisure lateron. +InputReader* InputMgr::CreateReader(EnumVal* reader, string source) +{ + InputReaderDefinition* ir = input_readers; + exit(12); + + while ( true ) { + if ( ir->type == BifEnum::Input::READER_DEFAULT ) + { + DBG_LOG(DBG_LOGGING, "unknown reader when creating reader"); + reporter->Error("unknown reader when creating reader"); + return 0; + } + + if ( ir->type != reader->AsEnum() ) { + // no, didn't find the right one... + ++ir; + continue; + } + + + // call init function of writer if presnt + if ( ir->init ) + { + if ( (*ir->init)() ) + { + //clear it to be not called again + ir->init = 0; + } else { + // ohok. init failed, kill factory for all eternity + ir->factory = 0; + DBG_LOG(DBG_LOGGING, "failed to init input class %s", ir->name); + return 0; + } + + } + + if ( !ir->factory ) + // no factory? + return 0; + + // all done. break. + break; + } + + assert(ir->factory); + InputReader* reader_obj = (*ir->factory)(); + assert(reader_obj); + + return reader_obj; + +} + + diff --git a/src/InputMgr.h b/src/InputMgr.h new file mode 100644 index 0000000000..978481afba --- /dev/null +++ b/src/InputMgr.h @@ -0,0 +1,24 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUTMGR_H +#define INPUTMGR_H + +#include "InputReader.h" +#include "BroString.h" + +#include "Val.h" +#include "EventHandler.h" +#include "RemoteSerializer.h" + + +class InputMgr { +public: + InputMgr(); + + InputReader* CreateReader(EnumVal* reader, string source); +}; + +extern InputMgr* input_mgr; + + +#endif /* INPUTMGR_H */ diff --git a/src/InputReader.cc b/src/InputReader.cc new file mode 100644 index 0000000000..f2b3b05801 --- /dev/null +++ b/src/InputReader.cc @@ -0,0 +1,12 @@ + +#include "InputReader.h" + +InputReader::InputReader() +{ + +} + +InputReader::~InputReader() +{ + +} \ No newline at end of file diff --git a/src/InputReader.h b/src/InputReader.h new file mode 100644 index 0000000000..ce8303383a --- /dev/null +++ b/src/InputReader.h @@ -0,0 +1,29 @@ +// See the file "COPYING" in the main distribution directory for copyright. +// +// Same notes about thread safety as in LogWriter.h apply. + + +#ifndef INPUTREADER_H +#define INPUTREADER_H + +class InputReader { +public: + InputReader(); + virtual ~InputReader(); + +protected: + // Methods that have to be overwritten by the individual readers + +private: + friend class InputMgr; + + // When an error occurs, this method is called to set a flag marking the + // writer as disabled. + + bool disabled; + + bool Disabled() { return disabled; } +}; + + +#endif /* INPUTREADER_H */ diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc new file mode 100644 index 0000000000..97933c2a6e --- /dev/null +++ b/src/InputReaderAscii.cc @@ -0,0 +1,12 @@ + +#include "InputReaderAscii.h" +#include "DebugLogger.h" + +InputReaderAscii::InputReaderAscii() +{ + DBG_LOG(DBG_LOGGING, "input reader initialized"); +} + +InputReaderAscii::~InputReaderAscii() +{ +} \ No newline at end of file diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h new file mode 100644 index 0000000000..c471639fd4 --- /dev/null +++ b/src/InputReaderAscii.h @@ -0,0 +1,20 @@ + +#ifndef INPUTREADERASCII_H +#define INPUTREADERASCII_H + +#include "InputReader.h" + +class InputReaderAscii : public InputReader { +public: + InputReaderAscii(); + ~InputReaderAscii(); + + static InputReader* Instantiate() { return new InputReaderAscii; } + +protected: + +private: +}; + + +#endif /* INPUTREADERASCII_H */ diff --git a/src/NetVar.cc b/src/NetVar.cc index 25e4f7a0bc..5a6ac96fba 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -255,6 +255,7 @@ StringVal* cmd_line_bpf_filter; #include "types.bif.netvar_def" #include "event.bif.netvar_def" #include "logging.bif.netvar_def" +#include "input.bif.netvar_def" #include "reporter.bif.netvar_def" void init_event_handlers() @@ -315,6 +316,7 @@ void init_net_var() #include "const.bif.netvar_init" #include "types.bif.netvar_init" #include "logging.bif.netvar_init" +#include "input.bif.netvar_init" #include "reporter.bif.netvar_init" conn_id = internal_type("conn_id")->AsRecordType(); diff --git a/src/input.bif b/src/input.bif new file mode 100644 index 0000000000..edb6b4e9bb --- /dev/null +++ b/src/input.bif @@ -0,0 +1,16 @@ +# functions and types for the input framework + +module Input; + +%%{ +#include "InputMgr.h" +#include "NetVar.h" +%%} + +function Input::__create_reader%(reader: Input::Reader, source: string%) : bool + %{ + exit(5); + InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), source->AsString()->CheckString()); + return new Val( the_reader != 0, TYPE_BOOL ); + %} + diff --git a/src/main.cc b/src/main.cc index dfa46c3050..b3f2512b40 100644 --- a/src/main.cc +++ b/src/main.cc @@ -30,6 +30,7 @@ extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "File.h" #include "Reporter.h" #include "LogMgr.h" +#include "InputMgr.h" #include "Net.h" #include "NetVar.h" #include "Var.h" @@ -72,6 +73,7 @@ name_list prefixes; DNS_Mgr* dns_mgr; TimerMgr* timer_mgr; LogMgr* log_mgr; +InputMgr* input_mgr; Stmt* stmts; EventHandlerPtr net_done = 0; RuleMatcher* rule_matcher = 0; @@ -724,6 +726,8 @@ int main(int argc, char** argv) remote_serializer = new RemoteSerializer(); event_registry = new EventRegistry(); log_mgr = new LogMgr(); + + input_mgr = new InputMgr(); if ( events_file ) event_player = new EventPlayer(events_file); diff --git a/src/types.bif b/src/types.bif index da6bd6e031..ee43207ddd 100644 --- a/src/types.bif +++ b/src/types.bif @@ -167,4 +167,11 @@ enum ID %{ Unknown, %} +module Input; + +enum Reader %{ + READER_DEFAULT, + READER_ASCII, +%} + module GLOBAL; From 0eafeb03693e3134bb47219df43066a390a42d55 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 19 Oct 2011 13:16:09 -0700 Subject: [PATCH 016/651] works (thanks to robin) --- scripts/base/init-bare.bro | 2 ++ src/NetVar.h | 1 + src/input.bif | 1 - 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 859a69f2dc..8d1d2a312f 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1508,3 +1508,5 @@ const parse_udp_tunnels = F &redef; # Load the logging framework here because it uses fairly deep integration with # BiFs and script-land defined types. @load base/frameworks/logging + +@load base/input.bif diff --git a/src/NetVar.h b/src/NetVar.h index f8def230c0..957a86aeb3 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -264,6 +264,7 @@ extern void init_net_var(); #include "types.bif.netvar_h" #include "event.bif.netvar_h" #include "logging.bif.netvar_h" +#include "input.bif.netvar_h" #include "reporter.bif.netvar_h" #endif diff --git a/src/input.bif b/src/input.bif index edb6b4e9bb..3ff5284c63 100644 --- a/src/input.bif +++ b/src/input.bif @@ -9,7 +9,6 @@ module Input; function Input::__create_reader%(reader: Input::Reader, source: string%) : bool %{ - exit(5); InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), source->AsString()->CheckString()); return new Val( the_reader != 0, TYPE_BOOL ); %} From f8be3519c7b868cd23829141b9588396df50e6ac Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 19 Oct 2011 15:41:07 -0700 Subject: [PATCH 017/651] well, it compiles. and perhaps it sends an event. billiant. --- src/InputMgr.cc | 20 +++++++++++++++++--- src/InputMgr.h | 10 +++++++++- src/InputReader.cc | 19 +++++++++++++++++++ src/InputReader.h | 15 ++++++++++++++- src/input.bif | 6 ++++-- 5 files changed, 63 insertions(+), 7 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index cacc512bcf..5ab602f4f3 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -35,11 +35,17 @@ InputMgr::InputMgr() // create a new input reader object to be used at whomevers leisure lateron. -InputReader* InputMgr::CreateReader(EnumVal* reader, string source) +InputReader* InputMgr::CreateReader(EnumVal* reader, string source, string eventName, RecordVal* eventDescription) { InputReaderDefinition* ir = input_readers; - exit(12); - + + RecordType* rtype = eventDescription->Type()->AsRecordType(); + if ( ! same_type(rtype, BifType::Record::Input::Event, 0) ) + { + reporter->Error("eventDescription argument not of right type"); + return 0; + } + while ( true ) { if ( ir->type == BifEnum::Input::READER_DEFAULT ) { @@ -86,5 +92,13 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, string source) return reader_obj; } + +void InputMgr::Error(InputReader* reader, const char* msg) +{ + reporter->Error(fmt("error with input reader for %s: %s", + reader->Source().c_str(), msg)); +} + + diff --git a/src/InputMgr.h b/src/InputMgr.h index 978481afba..3d3d0f2d3f 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -10,12 +10,20 @@ #include "EventHandler.h" #include "RemoteSerializer.h" +class InputReader; class InputMgr { public: InputMgr(); - InputReader* CreateReader(EnumVal* reader, string source); + InputReader* CreateReader(EnumVal* reader, string source, string eventName, RecordVal* eventDescription); + +protected: + friend class InputReader; + + // Reports an error for the given reader. + void Error(InputReader* reader, const char* msg); + }; extern InputMgr* input_mgr; diff --git a/src/InputReader.cc b/src/InputReader.cc index f2b3b05801..ef47bb1e10 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -1,5 +1,7 @@ #include "InputReader.h" +#include "EventRegistry.h" +#include "Event.h" InputReader::InputReader() { @@ -9,4 +11,21 @@ InputReader::InputReader() InputReader::~InputReader() { +} + +void InputReader::Error(const char *msg) +{ + input_mgr->Error(this, msg); +} + +bool InputReader::Init(string source, string eventName) { + EventHandler* handler = event_registry->Lookup(eventName.c_str()); + + if ( handler == 0 ) { + reporter->Error("Event %s not found", eventName.c_str()); + return false; + } + + mgr.Dispatch(new Event(handler, 0)); + return true; } \ No newline at end of file diff --git a/src/InputReader.h b/src/InputReader.h index ce8303383a..58a56e8221 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -6,16 +6,29 @@ #ifndef INPUTREADER_H #define INPUTREADER_H +#include "InputMgr.h" +#include "BroString.h" + class InputReader { public: InputReader(); virtual ~InputReader(); + + bool Init(string source, string eventName); protected: // Methods that have to be overwritten by the individual readers - + + // Reports an error to the user. + void Error(const char *msg); + + // The following methods return the information as passed to Init(). + const string Source() const { return source; } + private: friend class InputMgr; + + string source; // When an error occurs, this method is called to set a flag marking the // writer as disabled. diff --git a/src/input.bif b/src/input.bif index 3ff5284c63..e2fc2ae91e 100644 --- a/src/input.bif +++ b/src/input.bif @@ -7,9 +7,11 @@ module Input; #include "NetVar.h" %%} -function Input::__create_reader%(reader: Input::Reader, source: string%) : bool +type Event: record; + +function Input::__create_reader%(reader: Input::Reader, source: string, eventName: string, eventDescription: Input::Event%) : bool %{ - InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), source->AsString()->CheckString()); + InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), source->AsString()->CheckString(), eventName->AsString()->CheckString(), eventDescription->AsRecordVal()); return new Val( the_reader != 0, TYPE_BOOL ); %} From 9c8b0dec3b8622a592aa1c787d9bde7d53f45f5f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 19 Oct 2011 16:35:34 -0700 Subject: [PATCH 018/651] event from c++ to script works (at last...) --- scripts/base/frameworks/input/__load__.bro | 1 + scripts/base/frameworks/input/main.bro | 11 +++++++++++ scripts/base/init-bare.bro | 3 ++- src/InputMgr.cc | 7 ++++--- src/InputMgr.h | 2 +- src/InputReader.cc | 19 +++++++++++-------- src/LogMgr.cc | 2 +- src/input.bif | 4 ++-- 8 files changed, 33 insertions(+), 16 deletions(-) create mode 100644 scripts/base/frameworks/input/__load__.bro create mode 100644 scripts/base/frameworks/input/main.bro diff --git a/scripts/base/frameworks/input/__load__.bro b/scripts/base/frameworks/input/__load__.bro new file mode 100644 index 0000000000..a10fe855df --- /dev/null +++ b/scripts/base/frameworks/input/__load__.bro @@ -0,0 +1 @@ +@load ./main diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro new file mode 100644 index 0000000000..cb071872ac --- /dev/null +++ b/scripts/base/frameworks/input/main.bro @@ -0,0 +1,11 @@ + +module Input; + +export { + type Event: record { + name: string; + columns: any; + }; +} + +@load base/input.bif diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 8d1d2a312f..ade92bfd6e 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1509,4 +1509,5 @@ const parse_udp_tunnels = F &redef; # BiFs and script-land defined types. @load base/frameworks/logging -@load base/input.bif +@load base/frameworks/input + diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 5ab602f4f3..f3fb2ff935 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -35,11 +35,11 @@ InputMgr::InputMgr() // create a new input reader object to be used at whomevers leisure lateron. -InputReader* InputMgr::CreateReader(EnumVal* reader, string source, string eventName, RecordVal* eventDescription) +InputReader* InputMgr::CreateReader(EnumVal* reader, string source, RecordVal* event) { InputReaderDefinition* ir = input_readers; - RecordType* rtype = eventDescription->Type()->AsRecordType(); + RecordType* rtype = InputReaderDefinition->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::Event, 0) ) { reporter->Error("eventDescription argument not of right type"); @@ -49,7 +49,6 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, string source, string event while ( true ) { if ( ir->type == BifEnum::Input::READER_DEFAULT ) { - DBG_LOG(DBG_LOGGING, "unknown reader when creating reader"); reporter->Error("unknown reader when creating reader"); return 0; } @@ -89,6 +88,8 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, string source, string event InputReader* reader_obj = (*ir->factory)(); assert(reader_obj); + reader_obj->Init(source, eventName); + return reader_obj; } diff --git a/src/InputMgr.h b/src/InputMgr.h index 3d3d0f2d3f..e98f3d77b7 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -16,7 +16,7 @@ class InputMgr { public: InputMgr(); - InputReader* CreateReader(EnumVal* reader, string source, string eventName, RecordVal* eventDescription); + InputReader* CreateReader(EnumVal* reader, string source, RecordVal* event); protected: friend class InputReader; diff --git a/src/InputReader.cc b/src/InputReader.cc index ef47bb1e10..fc9be7f2b6 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -1,7 +1,7 @@ #include "InputReader.h" -#include "EventRegistry.h" -#include "Event.h" +// #include "EventRegistry.h" +// #include "Event.h" InputReader::InputReader() { @@ -19,13 +19,16 @@ void InputReader::Error(const char *msg) } bool InputReader::Init(string source, string eventName) { - EventHandler* handler = event_registry->Lookup(eventName.c_str()); + //EventHandler* handler = event_registry->Lookup(eventName.c_str()); + + //if ( handler == 0 ) { + // reporter->Error("Event %s not found", eventName.c_str()); + // return false; + //} - if ( handler == 0 ) { - reporter->Error("Event %s not found", eventName.c_str()); - return false; - } + //val_list* vl = new val_list; + //vl->append(new Val(12, TYPE_COUNT)); - mgr.Dispatch(new Event(handler, 0)); + //mgr.Dispatch(new Event(handler, vl)); return true; } \ No newline at end of file diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 9e320f8810..fc4b89b0ed 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -475,7 +475,7 @@ void LogMgr::RemoveDisabledWriters(Stream* stream) stream->writers.erase(*j); } -bool LogMgr::CreateStream(EnumVal* id, RecordVal* sval) +bool LogMgr::(EnumVal* id, RecordVal* sval) { RecordType* rtype = sval->Type()->AsRecordType(); diff --git a/src/input.bif b/src/input.bif index e2fc2ae91e..88d4e32129 100644 --- a/src/input.bif +++ b/src/input.bif @@ -9,9 +9,9 @@ module Input; type Event: record; -function Input::__create_reader%(reader: Input::Reader, source: string, eventName: string, eventDescription: Input::Event%) : bool +function Input::__create_reader%(reader: Input::Reader, source: string, eventDescription: Input::Event%) : bool %{ - InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), source->AsString()->CheckString(), eventName->AsString()->CheckString(), eventDescription->AsRecordVal()); + InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), source->AsString()->CheckString(), eventDescription->AsRecordVal()); return new Val( the_reader != 0, TYPE_BOOL ); %} From 365406024627c8c2b0b3d41263840a7fbaf029ed Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 21 Oct 2011 14:01:18 -0700 Subject: [PATCH 019/651] compiles. sill doesn't do much. --- scripts/base/frameworks/input/main.bro | 8 ++++-- src/InputMgr.cc | 35 ++++++++++++++++++++---- src/InputMgr.h | 8 +++++- src/InputReader.cc | 29 ++++++++++---------- src/InputReader.h | 10 ++++++- src/InputReaderAscii.cc | 38 +++++++++++++++++++++++++- src/InputReaderAscii.h | 10 +++++++ src/LogMgr.cc | 2 +- src/input.bif | 6 ++-- 9 files changed, 117 insertions(+), 29 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index cb071872ac..b5a05af3b6 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -2,9 +2,11 @@ module Input; export { - type Event: record { - name: string; - columns: any; + type ReaderDescription: record { + source: string; + idx: any; + val: any; + destination: any; }; } diff --git a/src/InputMgr.cc b/src/InputMgr.cc index f3fb2ff935..500a325249 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -35,14 +35,14 @@ InputMgr::InputMgr() // create a new input reader object to be used at whomevers leisure lateron. -InputReader* InputMgr::CreateReader(EnumVal* reader, string source, RecordVal* event) +InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) { InputReaderDefinition* ir = input_readers; - RecordType* rtype = InputReaderDefinition->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::Event, 0) ) + RecordType* rtype = description->Type()->AsRecordType(); + if ( ! same_type(rtype, BifType::Record::Input::ReaderDescription, 0) ) { - reporter->Error("eventDescription argument not of right type"); + reporter->Error("readerDescription argument not of right type"); return 0; } @@ -88,7 +88,11 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, string source, RecordVal* e InputReader* reader_obj = (*ir->factory)(); assert(reader_obj); - reader_obj->Init(source, eventName); + // get the source... + const BroString* bsource = description->Lookup(rtype->FieldOffset("source"))->AsString(); + string source((const char*) bsource->Bytes(), bsource->Len()); + + reader_obj->Init(source, 0, NULL); return reader_obj; @@ -100,6 +104,27 @@ void InputMgr::Error(InputReader* reader, const char* msg) reader->Source().c_str(), msg)); } +/* + TODO: + +void InputMgr::SendEvent(string name) { + //EventHandler* handler = event_registry->Lookup(eventName.c_str()); + + //if ( handler == 0 ) { + // reporter->Error("Event %s not found", eventName.c_str()); + // return false; + //} + + //val_list* vl = new val_list; + //vl->append(new Val(12, TYPE_COUNT)); + + //mgr.Dispatch(new Event(handler, vl)); + + +} + +*/ + diff --git a/src/InputMgr.h b/src/InputMgr.h index e98f3d77b7..255d61fe5e 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -9,20 +9,26 @@ #include "Val.h" #include "EventHandler.h" #include "RemoteSerializer.h" +#include "LogMgr.h" // for the LogVal and LogType data types class InputReader; + class InputMgr { public: InputMgr(); - InputReader* CreateReader(EnumVal* reader, string source, RecordVal* event); + InputReader* CreateReader(EnumVal* reader, RecordVal* description); protected: friend class InputReader; // Reports an error for the given reader. void Error(InputReader* reader, const char* msg); + +private: + // required functionality + // InputValsToRecord to convert received inputvals back to bro records / tables / whatever }; diff --git a/src/InputReader.cc b/src/InputReader.cc index fc9be7f2b6..4d29040e60 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -5,7 +5,7 @@ InputReader::InputReader() { - + disabled = true; // disabled will be set correcty in init. } InputReader::~InputReader() @@ -18,17 +18,18 @@ void InputReader::Error(const char *msg) input_mgr->Error(this, msg); } -bool InputReader::Init(string source, string eventName) { - //EventHandler* handler = event_registry->Lookup(eventName.c_str()); - - //if ( handler == 0 ) { - // reporter->Error("Event %s not found", eventName.c_str()); - // return false; - //} - - //val_list* vl = new val_list; - //vl->append(new Val(12, TYPE_COUNT)); - - //mgr.Dispatch(new Event(handler, vl)); - return true; +bool InputReader::Init(string arg_source, int arg_num_fields, + const LogField* const * arg_fields) +{ + source = arg_source; + num_fields = arg_num_fields; + fields = arg_fields; + + // disable if DoInit returns error. + disabled = !DoInit(arg_source, arg_num_fields, arg_fields); + return !disabled; +} + +void InputReader::Finish() { + DoFinish(); } \ No newline at end of file diff --git a/src/InputReader.h b/src/InputReader.h index 58a56e8221..f3638c7246 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -8,16 +8,22 @@ #include "InputMgr.h" #include "BroString.h" +#include "LogMgr.h" class InputReader { public: InputReader(); virtual ~InputReader(); - bool Init(string source, string eventName); + bool Init(string arg_source, int num_fields, const LogField* const* fields); + void Finish(); + protected: // Methods that have to be overwritten by the individual readers + virtual bool DoInit(string arg_source, int num_fields, const LogField* const * fields) = 0; + + virtual void DoFinish() = 0; // Reports an error to the user. void Error(const char *msg); @@ -29,6 +35,8 @@ private: friend class InputMgr; string source; + int num_fields; + const LogField* const * fields; // When an error occurs, this method is called to set a flag marking the // writer as disabled. diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 97933c2a6e..b9aab16815 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -2,11 +2,47 @@ #include "InputReaderAscii.h" #include "DebugLogger.h" +#include + InputReaderAscii::InputReaderAscii() { - DBG_LOG(DBG_LOGGING, "input reader initialized"); + //DBG_LOG(DBG_LOGGING, "input reader initialized"); + file = 0; } InputReaderAscii::~InputReaderAscii() { +} + +void InputReaderAscii::DoFinish() +{ +} + +bool InputReaderAscii::DoInit(string path, int num_fields, + const LogField* const * fields) +{ + fname = path; + + file = new ifstream(path.c_str()); + if ( !file->is_open() ) { + return false; + } + + // try to read the header line... + string line; + if ( !getline(*file, line) ) + return false; + + // split on tabs... + istringstream ss(line); + while ( ss ) { + string s; + if ( !getline(ss, s, '\t')) + break; + + + } + + + return false; } \ No newline at end of file diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index c471639fd4..0d2008ed7f 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -3,6 +3,9 @@ #define INPUTREADERASCII_H #include "InputReader.h" +#include +#include + class InputReaderAscii : public InputReader { public: @@ -12,8 +15,15 @@ public: static InputReader* Instantiate() { return new InputReaderAscii; } protected: + + virtual bool DoInit(string path, int num_fields, + const LogField* const * fields); + virtual void DoFinish(); private: + + ifstream* file; + string fname; }; diff --git a/src/LogMgr.cc b/src/LogMgr.cc index fc4b89b0ed..9e320f8810 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -475,7 +475,7 @@ void LogMgr::RemoveDisabledWriters(Stream* stream) stream->writers.erase(*j); } -bool LogMgr::(EnumVal* id, RecordVal* sval) +bool LogMgr::CreateStream(EnumVal* id, RecordVal* sval) { RecordType* rtype = sval->Type()->AsRecordType(); diff --git a/src/input.bif b/src/input.bif index 88d4e32129..3da869ea08 100644 --- a/src/input.bif +++ b/src/input.bif @@ -7,11 +7,11 @@ module Input; #include "NetVar.h" %%} -type Event: record; +type ReaderDescription: record; -function Input::__create_reader%(reader: Input::Reader, source: string, eventDescription: Input::Event%) : bool +function Input::__create_reader%(reader: Input::Reader, description: Input::ReaderDescription%) : bool %{ - InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), source->AsString()->CheckString(), eventDescription->AsRecordVal()); + InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), description->AsRecordVal()); return new Val( the_reader != 0, TYPE_BOOL ); %} From d7a3b85fcda42a1485ef7a53c5419100e4b5ea95 Mon Sep 17 00:00:00 2001 From: amannb Date: Tue, 25 Oct 2011 11:47:23 -0700 Subject: [PATCH 020/651] many helper functions --- src/InputMgr.cc | 71 +++++++++++++++------ src/InputMgr.h | 4 +- src/InputReader.cc | 33 +++++++++- src/InputReader.h | 12 ++++ src/InputReaderAscii.cc | 134 +++++++++++++++++++++++++++++++++++++--- src/InputReaderAscii.h | 22 +++++++ 6 files changed, 245 insertions(+), 31 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 500a325249..ec46c55813 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -100,31 +100,62 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) void InputMgr::Error(InputReader* reader, const char* msg) { - reporter->Error(fmt("error with input reader for %s: %s", - reader->Source().c_str(), msg)); + reporter->Error("error with input reader for %s: %s", reader->Source().c_str(), msg); } -/* - TODO: -void InputMgr::SendEvent(string name) { - //EventHandler* handler = event_registry->Lookup(eventName.c_str()); - - //if ( handler == 0 ) { - // reporter->Error("Event %s not found", eventName.c_str()); - // return false; - //} - - //val_list* vl = new val_list; - //vl->append(new Val(12, TYPE_COUNT)); - - //mgr.Dispatch(new Event(handler, vl)); +void InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) +{ + EventHandler* handler = event_registry->Lookup(name.c_str()); + if ( handler == 0 ) { + reporter->Error("Event %s not found", name.c_str()); + return; + } + val_list* vl = new val_list; + for ( int i = 0; i < num_vals; i++) { + vl->append(LogValToVal(vals[i])); + } + + mgr.Dispatch(new Event(handler, vl)); +} + +Val* InputMgr::LogValToVal(const LogVal* val) { + switch ( val->type ) { + case TYPE_BOOL: + case TYPE_INT: + return new Val(val->val.int_val, val->type); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + return new Val(val->val.uint_val, val->type); + break; + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + return new Val(val->val.double_val, val->type); + break; + + case TYPE_STRING: + { + BroString *s = new BroString(*(val->val.string_val)); + return new StringVal(s); + break; + } + + case TYPE_PORT: + return new PortVal(val->val.uint_val); + break; + + default: + reporter->InternalError("unsupported type for input_read"); + } + + + reporter->InternalError("Impossible error"); + return NULL; } - -*/ - - diff --git a/src/InputMgr.h b/src/InputMgr.h index 255d61fe5e..79f76a1e6f 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -13,7 +13,6 @@ class InputReader; - class InputMgr { public: InputMgr(); @@ -29,6 +28,9 @@ protected: private: // required functionality // InputValsToRecord to convert received inputvals back to bro records / tables / whatever + Val* LogValToVal(const LogVal* val); + + void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); }; diff --git a/src/InputReader.cc b/src/InputReader.cc index 4d29040e60..d812349733 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -32,4 +32,35 @@ bool InputReader::Init(string arg_source, int arg_num_fields, void InputReader::Finish() { DoFinish(); -} \ No newline at end of file +} + +bool InputReader::Update() { + return DoUpdate(); +} + +// stolen from logwriter +const char* InputReader::Fmt(const char* format, ...) + { + if ( ! buf ) + buf = (char*) malloc(buf_len); + + va_list al; + va_start(al, format); + int n = safe_vsnprintf(buf, buf_len, format, al); + va_end(al); + + if ( (unsigned int) n >= buf_len ) + { // Not enough room, grow the buffer. + buf_len = n + 32; + buf = (char*) realloc(buf, buf_len); + + // Is it portable to restart? + va_start(al, format); + n = safe_vsnprintf(buf, buf_len, format, al); + va_end(al); + } + + return buf; + } + + diff --git a/src/InputReader.h b/src/InputReader.h index f3638c7246..9d776276fa 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -18,12 +18,17 @@ public: bool Init(string arg_source, int num_fields, const LogField* const* fields); void Finish(); + + bool Update(); protected: // Methods that have to be overwritten by the individual readers virtual bool DoInit(string arg_source, int num_fields, const LogField* const * fields) = 0; virtual void DoFinish() = 0; + + // update file contents to logmgr + virtual bool DoUpdate() = 0; // Reports an error to the user. void Error(const char *msg); @@ -31,6 +36,9 @@ protected: // The following methods return the information as passed to Init(). const string Source() const { return source; } + // A thread-safe version of fmt(). (stolen from logwriter) + const char* Fmt(const char* format, ...); + private: friend class InputMgr; @@ -44,6 +52,10 @@ private: bool disabled; bool Disabled() { return disabled; } + + // For implementing Fmt(). + char* buf; + unsigned int buf_len; }; diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index b9aab16815..4bc4b81cda 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -1,9 +1,22 @@ +// See the file "COPYING" in the main distribution directory for copyright. #include "InputReaderAscii.h" #include "DebugLogger.h" #include +FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position) + : name(arg_name), type(arg_type) +{ + position = arg_position; +} + +FieldMapping::FieldMapping(const FieldMapping& arg) + : name(arg.name), type(arg.type) +{ + position = arg.position; +} + InputReaderAscii::InputReaderAscii() { //DBG_LOG(DBG_LOGGING, "input reader initialized"); @@ -18,31 +31,134 @@ void InputReaderAscii::DoFinish() { } -bool InputReaderAscii::DoInit(string path, int num_fields, - const LogField* const * fields) +bool InputReaderAscii::DoInit(string path, int num_fields, const LogField* const * fields) { fname = path; file = new ifstream(path.c_str()); if ( !file->is_open() ) { + Error(Fmt("cannot open %s", path.c_str())); return false; } // try to read the header line... string line; - if ( !getline(*file, line) ) + if ( !getline(*file, line) ) { + Error("could not read first line"); return false; + } // split on tabs... - istringstream ss(line); - while ( ss ) { + istringstream splitstream(line); + unsigned int currTab = 0; + int wantFields = 0; + while ( splitstream ) { string s; - if ( !getline(ss, s, '\t')) + if ( !getline(splitstream, s, '\t')) break; - + // current found heading in s... compare if we want it + for ( int i = 0; i < num_fields; i++ ) { + const LogField* field = fields[i]; + if ( field->name == s ) { + // cool, found field. note position + FieldMapping f(field->name, field->type, i); + columnMap.push_back(f); + wantFields++; + break; // done with searching + } + } + + // look if we did push something... + if ( columnMap.size() == currTab ) { + // no, we didn't. note that... + FieldMapping empty; + columnMap.push_back(empty); + } + + // done + currTab++; } + + if ( wantFields != num_fields ) { + // we did not find all fields? + // :( + Error("wantFields != num_fields"); + return false; + } + + this->num_fields = num_fields; - return false; -} \ No newline at end of file + // well, that seems to have worked... + return true; +} + +// read the entire file and send appropriate thingies back to InputMgr +bool InputReaderAscii::DoUpdate() { + // TODO: all the stuff we need for a second reading. + // *cough* + // + + + string line; + while ( getline(*file, line ) ) { + // split on tabs + + istringstream splitstream(line); + string s; + + LogVal fields[num_fields]; + + unsigned int currTab = 0; + unsigned int currField = 0; + while ( splitstream ) { + if ( !getline(splitstream, s, '\t') ) + break; + + + if ( currTab >= columnMap.size() ) { + Error("Tabs in heading do not match tabs in data?"); + //disabled = true; + return false; + } + + FieldMapping currMapping = columnMap[currTab]; + currTab++; + + if ( currMapping.IsEmpty() ) { + // well, that was easy + continue; + } + + if ( currField >= num_fields ) { + Error("internal error - fieldnum greater as possible"); + return false; + } + + LogVal val(currMapping.type, true); + + switch ( currMapping.type ) { + case TYPE_STRING: + val.val.string_val = new string(s); + + default: + Error(Fmt("unsupported field format %d for %s", currMapping.type, + currMapping.name.c_str())); + return false; + } + + currField++; + } + + if ( currField != num_fields ) { + Error("curr_field != num_fields in DoUpdate"); + return false; + } + + // ok, now we have built our line. send it back to... whomever. + + } + + return true; +} diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index 0d2008ed7f..551a08b02e 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -1,3 +1,4 @@ +// See the file "COPYING" in the main distribution directory for copyright. #ifndef INPUTREADERASCII_H #define INPUTREADERASCII_H @@ -5,6 +6,19 @@ #include "InputReader.h" #include #include +#include + +// Description for input field mapping +struct FieldMapping { + string name; + TypeTag type; + int position; + + FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); + FieldMapping(const FieldMapping& arg); + FieldMapping() { position = -1; } + bool IsEmpty() { return position == -1; } +}; class InputReaderAscii : public InputReader { @@ -19,11 +33,19 @@ protected: virtual bool DoInit(string path, int num_fields, const LogField* const * fields); virtual void DoFinish(); + + virtual bool DoUpdate(); private: ifstream* file; string fname; + + unsigned int num_fields; + + // map columns in the file to columns to send back to the manager + vector columnMap; + }; From 5b0c307f87a7213951e586f6e881501dc658f03d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 25 Oct 2011 14:11:21 -0700 Subject: [PATCH 021/651] very basic input to event working... --- scripts/base/frameworks/input/main.bro | 4 ++-- src/InputMgr.cc | 18 ++++++++++++++++-- src/InputReader.cc | 11 +++++++++++ src/InputReader.h | 3 +++ src/InputReaderAscii.cc | 14 ++++++++++---- 5 files changed, 42 insertions(+), 8 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index b5a05af3b6..4bea9d73d2 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -5,8 +5,8 @@ export { type ReaderDescription: record { source: string; idx: any; - val: any; - destination: any; + val: any &optional; + destination: any &optional; }; } diff --git a/src/InputMgr.cc b/src/InputMgr.cc index ec46c55813..5cd82b613f 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -91,8 +91,22 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) // get the source... const BroString* bsource = description->Lookup(rtype->FieldOffset("source"))->AsString(); string source((const char*) bsource->Bytes(), bsource->Len()); - - reader_obj->Init(source, 0, NULL); + + RecordType *idx = description->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); + + LogField** fields = new LogField*[idx->NumFields()]; + for ( int i = 0; i < idx->NumFields(); i++ ) + { + // FIXME: do type checking... + LogField* field = new LogField(); + field->name = idx->FieldName(i); + field->type = idx->FieldType(i)->Tag(); + fields[i] = field; + } + + + reader_obj->Init(source, idx->NumFields(), fields); + reader_obj->Update(); return reader_obj; diff --git a/src/InputReader.cc b/src/InputReader.cc index d812349733..d512bc1699 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -5,6 +5,8 @@ InputReader::InputReader() { + buf = 0; + buf_len = 1024; disabled = true; // disabled will be set correcty in init. } @@ -18,6 +20,11 @@ void InputReader::Error(const char *msg) input_mgr->Error(this, msg); } +void InputReader::Error(const string &msg) + { + input_mgr->Error(this, msg.c_str()); + } + bool InputReader::Init(string arg_source, int arg_num_fields, const LogField* const * arg_fields) { @@ -38,6 +45,10 @@ bool InputReader::Update() { return DoUpdate(); } +void InputReader::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) { + input_mgr->SendEvent(name, num_vals, vals); +} + // stolen from logwriter const char* InputReader::Fmt(const char* format, ...) { diff --git a/src/InputReader.h b/src/InputReader.h index 9d776276fa..3bfb0adf91 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -31,6 +31,7 @@ protected: virtual bool DoUpdate() = 0; // Reports an error to the user. + void Error(const string &msg); void Error(const char *msg); // The following methods return the information as passed to Init(). @@ -39,6 +40,8 @@ protected: // A thread-safe version of fmt(). (stolen from logwriter) const char* Fmt(const char* format, ...); + void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + private: friend class InputMgr; diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 4bc4b81cda..5d6b23416a 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -37,7 +37,7 @@ bool InputReaderAscii::DoInit(string path, int num_fields, const LogField* const file = new ifstream(path.c_str()); if ( !file->is_open() ) { - Error(Fmt("cannot open %s", path.c_str())); + Error(Fmt("cannot open %s", fname.c_str())); return false; } @@ -108,7 +108,7 @@ bool InputReaderAscii::DoUpdate() { istringstream splitstream(line); string s; - LogVal fields[num_fields]; + LogVal** fields = new LogVal*[num_fields]; unsigned int currTab = 0; unsigned int currField = 0; @@ -136,11 +136,12 @@ bool InputReaderAscii::DoUpdate() { return false; } - LogVal val(currMapping.type, true); + LogVal* val = new LogVal(currMapping.type, true); switch ( currMapping.type ) { case TYPE_STRING: - val.val.string_val = new string(s); + val->val.string_val = new string(s); + break; default: Error(Fmt("unsupported field format %d for %s", currMapping.type, @@ -148,6 +149,8 @@ bool InputReaderAscii::DoUpdate() { return false; } + fields[currField] = val; + currField++; } @@ -157,6 +160,9 @@ bool InputReaderAscii::DoUpdate() { } // ok, now we have built our line. send it back to... whomever. + // for testing purposes: fixed event. + + SendEvent("inputEvent", num_fields, fields); } From b245d4168a6c9d9ce9243a34f7fcc704d526a7d9 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 26 Oct 2011 17:02:57 -0700 Subject: [PATCH 022/651] yay, basic table assignment. --- scripts/base/frameworks/input/main.bro | 4 +- src/InputMgr.cc | 60 ++++++++++++++++++++++++-- src/InputMgr.h | 8 +++- src/InputReader.cc | 5 +++ src/InputReader.h | 2 + src/InputReaderAscii.cc | 16 +++++++ 6 files changed, 87 insertions(+), 8 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 4bea9d73d2..b5a05af3b6 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -5,8 +5,8 @@ export { type ReaderDescription: record { source: string; idx: any; - val: any &optional; - destination: any &optional; + val: any; + destination: any; }; } diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 5cd82b613f..5c30922863 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -13,6 +13,16 @@ #include "InputReaderAscii.h" +struct InputMgr::ReaderInfo { + EnumVal* id; + EnumVal* type; + InputReader* reader; + unsigned int num_idx_fields; + unsigned int num_val_fields; + + TableVal* tab; + + }; struct InputReaderDefinition { bro_int_t type; // the type @@ -30,7 +40,7 @@ InputReaderDefinition input_readers[] = { InputMgr::InputMgr() { - DBG_LOG(DBG_LOGGING, "this has to happen"); + //DBG_LOG(DBG_LOGGING, "this has to happen"); } @@ -93,8 +103,10 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) string source((const char*) bsource->Bytes(), bsource->Len()); RecordType *idx = description->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); + RecordType *val = description->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); + TableVal *dst = description->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); - LogField** fields = new LogField*[idx->NumFields()]; + LogField** fields = new LogField*[idx->NumFields() + val->NumFields()]; for ( int i = 0; i < idx->NumFields(); i++ ) { // FIXME: do type checking... @@ -103,15 +115,43 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) field->type = idx->FieldType(i)->Tag(); fields[i] = field; } + for ( int i = 0; i < val->NumFields(); i++ ) + { + // FIXME: do type checking... + LogField* field = new LogField(); + field->name = val->FieldName(i); + field->type = val->FieldType(i)->Tag(); + fields[idx->NumFields() + i] = field; + } + + ReaderInfo* info = new ReaderInfo; + info->reader = reader_obj; + info->type = reader; + info->num_idx_fields = idx->NumFields(); + info->num_val_fields = val->NumFields(); + info->tab = dst; + readers.push_back(info); - reader_obj->Init(source, idx->NumFields(), fields); + reader_obj->Init(source, idx->NumFields() + val->NumFields(), fields); reader_obj->Update(); return reader_obj; } +void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { + ReaderInfo *i = FindReader(reader); + if ( i == 0 ) { + reporter->InternalError("Unknown reader"); + return; + } + + i->tab->Assign(LogValToVal(vals[0]), LogValToVal(vals[1])); + reporter->Error("assigned"); +} + + void InputMgr::Error(InputReader* reader, const char* msg) { reporter->Error("error with input reader for %s: %s", reader->Source().c_str(), msg); @@ -172,4 +212,16 @@ Val* InputMgr::LogValToVal(const LogVal* val) { return NULL; } - +InputMgr::ReaderInfo* InputMgr::FindReader(const InputReader* reader) + { + for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) + { + if ( (*s)->reader == reader ) + { + return *s; + } + } + + return 0; + } + diff --git a/src/InputMgr.h b/src/InputMgr.h index 79f76a1e6f..136be2d608 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -24,14 +24,18 @@ protected: // Reports an error for the given reader. void Error(InputReader* reader, const char* msg); + + void Put(const InputReader* reader, const LogVal* const *vals); private: - // required functionality - // InputValsToRecord to convert received inputvals back to bro records / tables / whatever + struct ReaderInfo; + Val* LogValToVal(const LogVal* val); void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + ReaderInfo* FindReader(const InputReader* reader); + vector readers; }; extern InputMgr* input_mgr; diff --git a/src/InputReader.cc b/src/InputReader.cc index d512bc1699..6502fdb421 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -25,6 +25,11 @@ void InputReader::Error(const string &msg) input_mgr->Error(this, msg.c_str()); } +void InputReader::Put(const LogVal* const *val) +{ + input_mgr->Put(this, val); +} + bool InputReader::Init(string arg_source, int arg_num_fields, const LogField* const * arg_fields) { diff --git a/src/InputReader.h b/src/InputReader.h index 3bfb0adf91..3725c3d461 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -42,6 +42,8 @@ protected: void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + void Put(const LogVal* const *val); + private: friend class InputMgr; diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 5d6b23416a..d0d4a3014c 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -143,6 +143,21 @@ bool InputReaderAscii::DoUpdate() { val->val.string_val = new string(s); break; + case TYPE_BOOL: + case TYPE_INT: + val->val.int_val = atoi(s.c_str()); + break; + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + val->val.double_val = atof(s.c_str()); + break; + + case TYPE_COUNT: + val->val.uint_val = atoi(s.c_str()); + break; + default: Error(Fmt("unsupported field format %d for %s", currMapping.type, currMapping.name.c_str())); @@ -163,6 +178,7 @@ bool InputReaderAscii::DoUpdate() { // for testing purposes: fixed event. SendEvent("inputEvent", num_fields, fields); + Put(fields); } From 86730c13dda7df06fa96b151c17ec437aa742b4f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 26 Oct 2011 17:46:43 -0700 Subject: [PATCH 023/651] more complex types... --- scripts/base/frameworks/input/main.bro | 3 + src/InputMgr.cc | 314 +++++++++++++++++++++++-- src/InputMgr.h | 17 +- src/InputReader.cc | 11 + src/InputReader.h | 2 + src/InputReaderAscii.cc | 35 ++- src/input.bif | 17 +- src/types.bif | 4 + 8 files changed, 372 insertions(+), 31 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index b5a05af3b6..4bb7129d03 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -2,11 +2,14 @@ module Input; export { + const default_reader = READER_ASCII &redef; + type ReaderDescription: record { source: string; idx: any; val: any; destination: any; + reader: Reader &default=default_reader; }; } diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 5c30922863..648a933a22 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -21,6 +21,8 @@ struct InputMgr::ReaderInfo { unsigned int num_val_fields; TableVal* tab; + RecordType* rtype; + RecordType* itype; }; @@ -45,7 +47,7 @@ InputMgr::InputMgr() // create a new input reader object to be used at whomevers leisure lateron. -InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) +InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) { InputReaderDefinition* ir = input_readers; @@ -55,6 +57,8 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) reporter->Error("readerDescription argument not of right type"); return 0; } + + EnumVal* reader = description->Lookup(rtype->FieldOffset("reader"))->AsEnumVal(); while ( true ) { if ( ir->type == BifEnum::Input::READER_DEFAULT ) @@ -106,38 +110,191 @@ InputReader* InputMgr::CreateReader(EnumVal* reader, RecordVal* description) RecordType *val = description->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); TableVal *dst = description->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); - LogField** fields = new LogField*[idx->NumFields() + val->NumFields()]; - for ( int i = 0; i < idx->NumFields(); i++ ) - { - // FIXME: do type checking... - LogField* field = new LogField(); - field->name = idx->FieldName(i); - field->type = idx->FieldType(i)->Tag(); - fields[i] = field; + + vector fieldsV; // vector, because we don't know the length beforehands + + + bool status = !UnrollRecordType(&fieldsV, idx, ""); + + int idxfields = fieldsV.size(); + + status = status || !UnrollRecordType(&fieldsV, val, ""); + int valfields = fieldsV.size() - idxfields; + + if ( status ) { + reporter->Error("Problem unrolling"); + return 0; } - for ( int i = 0; i < val->NumFields(); i++ ) - { - // FIXME: do type checking... - LogField* field = new LogField(); - field->name = val->FieldName(i); - field->type = val->FieldType(i)->Tag(); - fields[idx->NumFields() + i] = field; + + + LogField** fields = new LogField*[fieldsV.size()]; + for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { + fields[i] = fieldsV[i]; } ReaderInfo* info = new ReaderInfo; info->reader = reader_obj; info->type = reader; - info->num_idx_fields = idx->NumFields(); - info->num_val_fields = val->NumFields(); + Ref(reader); + info->num_idx_fields = idxfields; + info->num_val_fields = valfields; info->tab = dst; + Ref(dst); + info->rtype = val; + Ref(val); // we save a pointer of it... I really hope that this wasn't already done anywhere. + info->id = id; + Ref(id); // ditto... + info->itype = idx; + Ref(idx); readers.push_back(info); - reader_obj->Init(source, idx->NumFields() + val->NumFields(), fields); + reader_obj->Init(source, fieldsV.size(), fields); reader_obj->Update(); return reader_obj; +} +bool InputMgr::IsCompatibleType(BroType* t) + { + if ( ! t ) + return false; + + switch ( t->Tag() ) { + case TYPE_BOOL: + case TYPE_INT: + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + case TYPE_SUBNET: + case TYPE_ADDR: + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_RECORD: + // for record: check, if all elements are compatible? But... LogMgr also doesn't do this. + // ^ recursive checking is done in UnrollRecordType. + return true; + + case TYPE_FILE: + case TYPE_FUNC: + return false; + + + case TYPE_TABLE: + return false; + + case TYPE_VECTOR: + { + return IsCompatibleType(t->AsVectorType()->YieldType()); + } + + default: + return false; + } + + return false; + } + + +bool InputMgr::RemoveReader(EnumVal* id) { + ReaderInfo *i = 0; + for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) + { + if ( (*s)->id == id ) + { + i = (*s); + readers.erase(s); // remove from vector + break; + } + } + + if ( i == 0 ) { + return false; // not found + } + + Unref(i->type); + Unref(i->tab); + Unref(i->itype); + Unref(i->rtype); + Unref(i->id); + + delete(i->reader); + delete(i); + + return true; +} + +bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { + for ( int i = 0; i < rec->NumFields(); i++ ) + { + + if ( !IsCompatibleType(rec->FieldType(i)) ) { + reporter->Error("Incompatible type \"%s\" in table definition for InputReader", type_name(rec->FieldType(i)->Tag())); + return false; + } + + if ( rec->FieldType(i)->Tag() == TYPE_RECORD ) + { + + string prep = nameprepend + rec->FieldName(i) + "."; + + if ( !UnrollRecordType(fields, rec->FieldType(i)->AsRecordType(), prep) ) + { + return false; + } + + } else { + LogField* field = new LogField(); + field->name = nameprepend + rec->FieldName(i); + field->type = rec->FieldType(i)->Tag(); + + fields->push_back(field); + } + } + + return true; + +} + +bool InputMgr::ForceUpdate(EnumVal* id) +{ + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->Error("Reader not found"); + return false; + } + + i->reader->Update(); + return true; +} + +Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const LogVal* const *vals) { + Val* idxval; + int position = 0; + + + if ( num_fields == 1 ) { + idxval = LogValToVal(vals[0]); + } else { + ListVal *l = new ListVal(TYPE_ANY); + for ( int j = 0 ; j < type->NumFields(); j++ ) { + if ( type->FieldType(j)->Tag() == TYPE_RECORD ) { + l->Append(LogValToRecordVal(vals, type->FieldType(j)->AsRecordType(), &position)); + } else { + l->Append(LogValToVal(vals[position], type->FieldType(j)->Tag())); + position++; + } + } + idxval = l; + } + + assert ( position == num_fields ); + + return idxval; + } void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { @@ -147,10 +304,65 @@ void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { return; } - i->tab->Assign(LogValToVal(vals[0]), LogValToVal(vals[1])); - reporter->Error("assigned"); + Val* idxval = LogValToIndexVal(i->num_idx_fields, i->itype, vals); + Val* valval; + + int position = i->num_idx_fields; + if ( i->num_val_fields == 1 ) { + valval = LogValToVal(vals[i->num_idx_fields]); + } else { + RecordVal * r = new RecordVal(i->rtype); + + /* if ( i->rtype->NumFields() != (int) i->num_val_fields ) { + reporter->InternalError("Type mismatch"); + return; + } */ + + for ( int j = 0; j < i->rtype->NumFields(); j++) { + + Val* val = 0; + if ( i->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { + val = LogValToRecordVal(vals, i->rtype->FieldType(j)->AsRecordType(), &position); + } else { + val = LogValToVal(vals[position], i->rtype->FieldType(j)->Tag()); + position++; + } + + if ( val == 0 ) { + reporter->InternalError("conversion error"); + return; + } + + r->Assign(j,val); + + } + valval = r; + } + + i->tab->Assign(idxval, valval); } +void InputMgr::Clear(const InputReader* reader) { + ReaderInfo *i = FindReader(reader); + if ( i == 0 ) { + reporter->InternalError("Unknown reader"); + return; + } + + i->tab->RemoveAll(); +} + +bool InputMgr::Delete(const InputReader* reader, const LogVal* const *vals) { + ReaderInfo *i = FindReader(reader); + if ( i == 0 ) { + reporter->InternalError("Unknown reader"); + return false; + } + + Val* idxval = LogValToIndexVal(i->num_idx_fields, i->itype, vals); + + return ( i->tab->Delete(idxval) != 0 ); +} void InputMgr::Error(InputReader* reader, const char* msg) { @@ -174,7 +386,46 @@ void InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* c mgr.Dispatch(new Event(handler, vl)); } -Val* InputMgr::LogValToVal(const LogVal* val) { + +Val* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position) { + if ( position == 0 ) { + reporter->InternalError("Need position"); + return 0; + } + + /* + if ( request_type->Tag() != TYPE_RECORD ) { + reporter->InternalError("I only work with records"); + return 0; + } */ + + + RecordVal* rec = new RecordVal(request_type->AsRecordType()); + for ( int i = 0; i < request_type->NumFields(); i++ ) { + + Val* fieldVal = 0; + if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) { + fieldVal = LogValToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); + } else { + fieldVal = LogValToVal(vals[*position], request_type->FieldType(i)->Tag()); + (*position)++; + } + + rec->Assign(i, fieldVal); + } + + return rec; + +} + +Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { + + if ( request_type != TYPE_ANY && request_type != val->type ) { + reporter->InternalError("Typetags don't match: %d vs %d", request_type, val->type); + return 0; + } + + switch ( val->type ) { case TYPE_BOOL: case TYPE_INT: @@ -203,6 +454,10 @@ Val* InputMgr::LogValToVal(const LogVal* val) { return new PortVal(val->val.uint_val); break; + case TYPE_ADDR: + return new AddrVal(val->val.addr_val); + break; + default: reporter->InternalError("unsupported type for input_read"); } @@ -225,3 +480,18 @@ InputMgr::ReaderInfo* InputMgr::FindReader(const InputReader* reader) return 0; } + +InputMgr::ReaderInfo* InputMgr::FindReader(const EnumVal* id) + { + for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) + { + if ( (*s)->id == id ) + { + return *s; + } + } + + return 0; + } + + diff --git a/src/InputMgr.h b/src/InputMgr.h index 136be2d608..d5f732935c 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -11,13 +11,17 @@ #include "RemoteSerializer.h" #include "LogMgr.h" // for the LogVal and LogType data types +#include + class InputReader; class InputMgr { public: InputMgr(); - InputReader* CreateReader(EnumVal* reader, RecordVal* description); + InputReader* CreateReader(EnumVal* id, RecordVal* description); + bool ForceUpdate(EnumVal* id); + bool RemoveReader(EnumVal* id); protected: friend class InputReader; @@ -26,14 +30,23 @@ protected: void Error(InputReader* reader, const char* msg); void Put(const InputReader* reader, const LogVal* const *vals); + void Clear(const InputReader* reader); + bool Delete(const InputReader* reader, const LogVal* const *vals); private: struct ReaderInfo; - Val* LogValToVal(const LogVal* val); + bool IsCompatibleType(BroType* t); + + bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); + + Val* LogValToVal(const LogVal* val, TypeTag request_type = TYPE_ANY); + Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); + Val* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); ReaderInfo* FindReader(const InputReader* reader); + ReaderInfo* FindReader(const EnumVal* id); vector readers; }; diff --git a/src/InputReader.cc b/src/InputReader.cc index 6502fdb421..1facc57c7f 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -30,6 +30,17 @@ void InputReader::Put(const LogVal* const *val) input_mgr->Put(this, val); } +void InputReader::Clear() +{ + input_mgr->Clear(this); +} + +void InputReader::Delete(const LogVal* const *val) +{ + input_mgr->Delete(this, val); +} + + bool InputReader::Init(string arg_source, int arg_num_fields, const LogField* const * arg_fields) { diff --git a/src/InputReader.h b/src/InputReader.h index 3725c3d461..0e93344e1a 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -43,6 +43,8 @@ protected: void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); void Put(const LogVal* const *val); + void Clear(); + void Delete(const LogVal* const *val); private: friend class InputMgr; diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index d0d4a3014c..e4d581c0d8 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -144,6 +144,13 @@ bool InputReaderAscii::DoUpdate() { break; case TYPE_BOOL: + if ( s == "T" ) { + val->val.int_val = 1; + } else { + val->val.int_val = 0; + } + break; + case TYPE_INT: val->val.int_val = atoi(s.c_str()); break; @@ -155,16 +162,37 @@ bool InputReaderAscii::DoUpdate() { break; case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: val->val.uint_val = atoi(s.c_str()); break; + case TYPE_SUBNET: { + int pos = s.find("/"); + string width = s.substr(pos); + val->val.subnet_val.width = atoi(width.c_str()); + string addr = s.substr(0, pos); + s = addr; + // fallthrough + } + case TYPE_ADDR: { + addr_type t = dotted_to_addr(s.c_str()); +#ifdef BROv6 + copy_addr(t, val->val.addr_val); +#else + copy_addr(&t, val->val.addr_val); +#endif + break; + } + + default: Error(Fmt("unsupported field format %d for %s", currMapping.type, currMapping.name.c_str())); return false; } - fields[currField] = val; + fields[currMapping.position] = val; currField++; } @@ -174,10 +202,7 @@ bool InputReaderAscii::DoUpdate() { return false; } - // ok, now we have built our line. send it back to... whomever. - // for testing purposes: fixed event. - - SendEvent("inputEvent", num_fields, fields); + // ok, now we have built our line. send it back to the input manager Put(fields); } diff --git a/src/input.bif b/src/input.bif index 3da869ea08..4e2cbf07b5 100644 --- a/src/input.bif +++ b/src/input.bif @@ -9,9 +9,22 @@ module Input; type ReaderDescription: record; -function Input::__create_reader%(reader: Input::Reader, description: Input::ReaderDescription%) : bool +function Input::__create_reader%(id: ID, description: Input::ReaderDescription%) : bool %{ - InputReader *the_reader = input_mgr->CreateReader(reader->AsEnumVal(), description->AsRecordVal()); + InputReader *the_reader = input_mgr->CreateReader(id->AsEnumVal(), description->AsRecordVal()); return new Val( the_reader != 0, TYPE_BOOL ); %} + +function Input::__force_update%(id: ID%) : bool + %{ + bool res = input_mgr->ForceUpdate(id->AsEnumVal()); + return new Val( res, TYPE_BOOL ); + %} + +function Input::__remove_reader%(id: ID%) : bool + %{ + bool res = input_mgr->RemoveReader(id->AsEnumVal()); + return new Val( res, TYPE_BOOL ); + %} + diff --git a/src/types.bif b/src/types.bif index ee43207ddd..7b81a7f631 100644 --- a/src/types.bif +++ b/src/types.bif @@ -174,4 +174,8 @@ enum Reader %{ READER_ASCII, %} +enum ID %{ + Unknown, +%} + module GLOBAL; From f20125d22de20a7147026602f83fa1ed710700ac Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 28 Oct 2011 13:11:47 -0700 Subject: [PATCH 024/651] little snag with hashing functionality... --- src/InputMgr.cc | 232 +++++++++++++++++++++++++++++++++++++++- src/InputMgr.h | 11 +- src/InputReader.cc | 13 ++- src/InputReader.h | 9 +- src/InputReaderAscii.cc | 80 ++++++++++++-- src/InputReaderAscii.h | 5 +- src/Val.h | 5 +- 7 files changed, 333 insertions(+), 22 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 648a933a22..5270c83d5a 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -13,6 +13,18 @@ #include "InputReaderAscii.h" +#include "CompHash.h" + + +class InputHash { +public: + HashKey* valhash; + HashKey* idxkey; // does not need ref or whatever - if it is present here, it is also still present in the TableVal. +}; + +declare(PDict, InputHash); + + struct InputMgr::ReaderInfo { EnumVal* id; EnumVal* type; @@ -24,6 +36,9 @@ struct InputMgr::ReaderInfo { RecordType* rtype; RecordType* itype; + PDict(InputHash)* currDict; + PDict(InputHash)* lastDict; + }; struct InputReaderDefinition { @@ -147,10 +162,20 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) info->itype = idx; Ref(idx); readers.push_back(info); + info->currDict = new PDict(InputHash); + info->lastDict = new PDict(InputHash); - reader_obj->Init(source, fieldsV.size(), fields); - reader_obj->Update(); + int success = reader_obj->Init(source, fieldsV.size(), idxfields, fields); + if ( success == false ) { + RemoveReader(id); + return 0; + } + success = reader_obj->Update(); + if ( success == false ) { + RemoveReader(id); + return 0; + } return reader_obj; @@ -198,7 +223,6 @@ bool InputMgr::IsCompatibleType(BroType* t) return false; } - bool InputMgr::RemoveReader(EnumVal* id) { ReaderInfo *i = 0; for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) @@ -215,6 +239,9 @@ bool InputMgr::RemoveReader(EnumVal* id) { return false; // not found } + i->reader->Finish(); + + Unref(i->type); Unref(i->tab); Unref(i->itype); @@ -267,8 +294,7 @@ bool InputMgr::ForceUpdate(EnumVal* id) return false; } - i->reader->Update(); - return true; + return i->reader->Update(); } Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const LogVal* const *vals) { @@ -297,6 +323,105 @@ Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const Lo } + +void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { + ReaderInfo *i = FindReader(reader); + if ( i == 0 ) { + reporter->InternalError("Unknown reader"); + return; + } + + HashKey* idxhash = HashLogVals(i->num_idx_fields, vals); + HashKey* valhash = HashLogVals(i->num_val_fields, vals+i->num_idx_fields); + + InputHash *h = i->lastDict->Lookup(idxhash); + if ( h != 0 ) { + // seen before + if ( h->valhash->Hash() == valhash->Hash() ) { + // ok, double. + i->lastDict->Remove(idxhash); + i->currDict->Insert(idxhash, h); + return; + } else { + // updated + i->lastDict->Remove(idxhash); + delete(h); + } + } + + + Val* idxval = LogValToIndexVal(i->num_idx_fields, i->itype, vals); + Val* valval; + + int position = i->num_idx_fields; + if ( i->num_val_fields == 1 ) { + valval = LogValToVal(vals[i->num_idx_fields]); + } else { + RecordVal * r = new RecordVal(i->rtype); + + /* if ( i->rtype->NumFields() != (int) i->num_val_fields ) { + reporter->InternalError("Type mismatch"); + return; + } */ + + for ( int j = 0; j < i->rtype->NumFields(); j++) { + + Val* val = 0; + if ( i->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { + val = LogValToRecordVal(vals, i->rtype->FieldType(j)->AsRecordType(), &position); + } else { + val = LogValToVal(vals[position], i->rtype->FieldType(j)->Tag()); + position++; + } + + if ( val == 0 ) { + reporter->InternalError("conversion error"); + return; + } + + r->Assign(j,val); + + } + valval = r; + } + + //i->tab->Assign(idxval, valval); + HashKey* k = i->tab->ComputeHash(idxval); + if ( !k ) { + reporter->InternalError("could not hash"); + return; + } + + i->tab->Assign(idxval, k, valval); + InputHash* ih = new InputHash(); + ih->idxkey = k; + ih->valhash = valhash; + + i->currDict->Insert(idxhash, ih); + +} + +void InputMgr::EndCurrentSend(const InputReader* reader) { + ReaderInfo *i = FindReader(reader); + if ( i == 0 ) { + reporter->InternalError("Unknown reader"); + return; + } + + // lastdict contains all deleted entries + IterCookie *c = i->lastDict->InitForIteration(); + InputHash* ih; + while ( ( ih = i->lastDict->NextEntry(c )) ) { + i->tab->Delete(ih->idxkey); + } + + i->lastDict->Clear(); + delete(i->lastDict); + + i->lastDict = i->currDict; + i->currDict = new PDict(InputHash); +} + void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { @@ -418,6 +543,95 @@ Val* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_ } +HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals) { + int length = 0; + + for ( int i = 0; i < num_elements; i++ ) { + const LogVal* val = vals[i]; + switch (val->type) { + case TYPE_BOOL: + case TYPE_INT: + length += sizeof(val->val.int_val); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + length += sizeof(val->val.uint_val); + break; + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + length += sizeof(val->val.double_val); + break; + + case TYPE_STRING: + { + length += val->val.string_val->size(); + break; + } + + case TYPE_ADDR: + length += NUM_ADDR_WORDS*sizeof(uint32_t); + break; + + default: + reporter->InternalError("unsupported type for hashlogvals"); + } + + } + + int position = 0; + char *data = (char*) malloc(length); + for ( int i = 0; i < num_elements; i++ ) { + const LogVal* val = vals[i]; + switch ( val->type ) { + case TYPE_BOOL: + case TYPE_INT: + *(data+position) = val->val.int_val; + position += sizeof(val->val.int_val); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + *(data+position) = val->val.uint_val; + position += sizeof(val->val.uint_val); + break; + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + *(data+position) = val->val.double_val; + position += sizeof(val->val.double_val); + break; + + case TYPE_STRING: + { + memcpy(data+position, val->val.string_val->c_str(), val->val.string_val->length()); + position += val->val.string_val->size(); + break; + } + + case TYPE_ADDR: + memcpy(data+position, val->val.addr_val, NUM_ADDR_WORDS*sizeof(uint32_t)); + position += NUM_ADDR_WORDS*sizeof(uint32_t); + break; + + default: + reporter->InternalError("unsupported type for hashlogvals2"); + } + + + } + + assert(position == length); + return new HashKey(data, length); + + +} + Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { if ( request_type != TYPE_ANY && request_type != val->type ) { @@ -495,3 +709,11 @@ InputMgr::ReaderInfo* InputMgr::FindReader(const EnumVal* id) } +string InputMgr::Hash(const string &input) { + unsigned char digest[16]; + hash_md5(input.length(), (const unsigned char*) input.c_str(), digest); + string out((const char*) digest, 16); + return out; +} + + diff --git a/src/InputMgr.h b/src/InputMgr.h index d5f732935c..d147fa262a 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -15,9 +15,10 @@ class InputReader; + class InputMgr { public: - InputMgr(); + InputMgr(); InputReader* CreateReader(EnumVal* id, RecordVal* description); bool ForceUpdate(EnumVal* id); @@ -32,6 +33,9 @@ protected: void Put(const InputReader* reader, const LogVal* const *vals); void Clear(const InputReader* reader); bool Delete(const InputReader* reader, const LogVal* const *vals); + + void SendEntry(const InputReader* reader, const LogVal* const *vals); + void EndCurrentSend(const InputReader* reader); private: struct ReaderInfo; @@ -40,6 +44,8 @@ private: bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); + HashKey* HashLogVals(const int num_elements, const LogVal* const *vals); + Val* LogValToVal(const LogVal* val, TypeTag request_type = TYPE_ANY); Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); Val* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); @@ -49,6 +55,9 @@ private: ReaderInfo* FindReader(const EnumVal* id); vector readers; + + string Hash(const string &input); + }; extern InputMgr* input_mgr; diff --git a/src/InputReader.cc b/src/InputReader.cc index 1facc57c7f..494df3fb81 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -41,20 +41,22 @@ void InputReader::Delete(const LogVal* const *val) } -bool InputReader::Init(string arg_source, int arg_num_fields, +bool InputReader::Init(string arg_source, int arg_num_fields, int arg_idx_fields, const LogField* const * arg_fields) { source = arg_source; num_fields = arg_num_fields; + index_fields = arg_idx_fields; fields = arg_fields; // disable if DoInit returns error. - disabled = !DoInit(arg_source, arg_num_fields, arg_fields); + disabled = !DoInit(arg_source, arg_num_fields, arg_idx_fields, arg_fields); return !disabled; } void InputReader::Finish() { DoFinish(); + disabled = true; } bool InputReader::Update() { @@ -91,3 +93,10 @@ const char* InputReader::Fmt(const char* format, ...) } +void InputReader::SendEntry(const LogVal* const *vals) { + input_mgr->SendEntry(this, vals); +} + +void InputReader::EndCurrentSend() { + input_mgr->EndCurrentSend(this); +} diff --git a/src/InputReader.h b/src/InputReader.h index 0e93344e1a..b547d29506 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -15,7 +15,7 @@ public: InputReader(); virtual ~InputReader(); - bool Init(string arg_source, int num_fields, const LogField* const* fields); + bool Init(string arg_source, int arg_num_fields, int arg_idx_fields, const LogField* const* fields); void Finish(); @@ -23,7 +23,7 @@ public: protected: // Methods that have to be overwritten by the individual readers - virtual bool DoInit(string arg_source, int num_fields, const LogField* const * fields) = 0; + virtual bool DoInit(string arg_source, int arg_num_fields, int arg_idx_fields, const LogField* const * fields) = 0; virtual void DoFinish() = 0; @@ -46,11 +46,16 @@ protected: void Clear(); void Delete(const LogVal* const *val); + void SendEntry(const LogVal* const *vals); + void EndCurrentSend(); + + private: friend class InputMgr; string source; int num_fields; + int index_fields; const LogField* const * fields; // When an error occurs, this method is called to set a flag marking the diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index e4d581c0d8..e434f7e750 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -21,17 +21,26 @@ InputReaderAscii::InputReaderAscii() { //DBG_LOG(DBG_LOGGING, "input reader initialized"); file = 0; + + //keyMap = new map(); } InputReaderAscii::~InputReaderAscii() { + DoFinish(); } void InputReaderAscii::DoFinish() { + columnMap.empty(); + if ( file != 0 ) { + file->close(); + delete(file); + file = 0; + } } -bool InputReaderAscii::DoInit(string path, int num_fields, const LogField* const * fields) +bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const LogField* const * fields) { fname = path; @@ -47,6 +56,9 @@ bool InputReaderAscii::DoInit(string path, int num_fields, const LogField* const Error("could not read first line"); return false; } + + this->num_fields = num_fields; + this->idx_fields = idx_fields; // split on tabs... istringstream splitstream(line); @@ -83,12 +95,10 @@ bool InputReaderAscii::DoInit(string path, int num_fields, const LogField* const if ( wantFields != num_fields ) { // we did not find all fields? // :( - Error("wantFields != num_fields"); + Error("One of the requested fields could not be found in the input data file"); return false; } - - this->num_fields = num_fields; // well, that seems to have worked... return true; @@ -101,6 +111,9 @@ bool InputReaderAscii::DoUpdate() { // + // new keymap + //map *newKeyMap = new map(); + string line; while ( getline(*file, line ) ) { // split on tabs @@ -109,10 +122,12 @@ bool InputReaderAscii::DoUpdate() { string s; LogVal** fields = new LogVal*[num_fields]; + //string string_fields[num_fields]; unsigned int currTab = 0; unsigned int currField = 0; while ( splitstream ) { + if ( !getline(splitstream, s, '\t') ) break; @@ -146,8 +161,11 @@ bool InputReaderAscii::DoUpdate() { case TYPE_BOOL: if ( s == "T" ) { val->val.int_val = 1; - } else { + } else if ( s == "F" ) { val->val.int_val = 0; + } else { + Error(Fmt("Invalid value for boolean: %s", s.c_str())); + return false; } break; @@ -173,9 +191,15 @@ bool InputReaderAscii::DoUpdate() { val->val.subnet_val.width = atoi(width.c_str()); string addr = s.substr(0, pos); s = addr; - // fallthrough + // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. + // Solve this some other time.... + val->val.subnet_val.net = dotted_to_addr(s.c_str()); + break; + } case TYPE_ADDR: { + // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. + // Solve this some other time.... addr_type t = dotted_to_addr(s.c_str()); #ifdef BROv6 copy_addr(t, val->val.addr_val); @@ -193,19 +217,57 @@ bool InputReaderAscii::DoUpdate() { } fields[currMapping.position] = val; + //string_fields[currMapping.position] = s; currField++; } if ( currField != num_fields ) { - Error("curr_field != num_fields in DoUpdate"); + Error("curr_field != num_fields in DoUpdate. Columns in file do not match column definition."); return false; } - // ok, now we have built our line. send it back to the input manager - Put(fields); + + SendEntry(fields); + + /* + string indexstring = ""; + string valstring = ""; + for ( unsigned int i = 0; i < idx_fields; i++ ) { + indexstring.append(string_fields[i]); + } + + for ( unsigned int i = idx_fields; i < num_fields; i++ ) { + valstring.append(string_fields[i]); + } + + string valhash = Hash(valstring); + string indexhash = Hash(indexstring); + + if ( keyMap->find(indexhash) == keyMap->end() ) { + // new key + Put(fields); + } else if ( (*keyMap)[indexhash] != valhash ) { + // changed key + Put(fields); + keyMap->erase(indexhash); + } else { + // field not changed + keyMap->erase(indexhash); + } + + + (*newKeyMap)[indexhash] = valhash; + */ + + for ( unsigned int i = 0; i < num_fields; i++ ) { + delete fields[i]; + } + delete [] fields; } + + EndCurrentSend(); return true; } diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index 551a08b02e..c26a139dcd 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -30,7 +30,7 @@ public: protected: - virtual bool DoInit(string path, int num_fields, + virtual bool DoInit(string path, int arg_num_fields, int arg_idx_fields, const LogField* const * fields); virtual void DoFinish(); @@ -42,9 +42,12 @@ private: string fname; unsigned int num_fields; + unsigned int idx_fields; // map columns in the file to columns to send back to the manager vector columnMap; + + //map *keyMap; }; diff --git a/src/Val.h b/src/Val.h index d851be311b..3ae0bc3334 100644 --- a/src/Val.h +++ b/src/Val.h @@ -841,6 +841,9 @@ public: timer = 0; } + HashKey* ComputeHash(const Val* index) const + { return table_hash->ComputeHash(index, 1); } + protected: friend class Val; friend class StateAccess; @@ -851,8 +854,6 @@ protected: void CheckExpireAttr(attr_tag at); int ExpandCompoundAndInit(val_list* vl, int k, Val* new_val); int CheckAndAssign(Val* index, Val* new_val, Opcode op = OP_ASSIGN); - HashKey* ComputeHash(const Val* index) const - { return table_hash->ComputeHash(index, 1); } bool AddProperties(Properties arg_state); bool RemoveProperties(Properties arg_state); From 638976791efb3b7d9d491266d83bb30b73d4bd53 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 2 Nov 2011 14:00:19 -0700 Subject: [PATCH 025/651] hashing seems to work _correctly_ now... --- src/InputMgr.cc | 44 ++++++++++++++++++++++++++++++----------- src/InputReaderAscii.cc | 41 +++++++++++++++++++++++++++++++++----- src/InputReaderAscii.h | 3 +++ 3 files changed, 72 insertions(+), 16 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 5270c83d5a..e3acfb9505 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -290,10 +290,10 @@ bool InputMgr::ForceUpdate(EnumVal* id) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { - reporter->Error("Reader not found"); + reporter->InternalError("Reader not found"); return false; } - + return i->reader->Update(); } @@ -302,8 +302,9 @@ Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const Lo int position = 0; - if ( num_fields == 1 ) { + if ( num_fields == 1 && type->FieldType(0)->Tag() != TYPE_RECORD ) { idxval = LogValToVal(vals[0]); + position = 1; } else { ListVal *l = new ListVal(TYPE_ANY); for ( int j = 0 ; j < type->NumFields(); j++ ) { @@ -317,6 +318,7 @@ Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const Lo idxval = l; } + //reporter->Error("Position: %d, num_fields: %d", position, num_fields); assert ( position == num_fields ); return idxval; @@ -331,8 +333,15 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { return; } + + reporter->Error("Hashing %d index fields", i->num_idx_fields); HashKey* idxhash = HashLogVals(i->num_idx_fields, vals); + reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); + reporter->Error("Hashing %d val fields", i->num_val_fields); HashKey* valhash = HashLogVals(i->num_val_fields, vals+i->num_idx_fields); + reporter->Error("Result: %d", (uint64_t) valhash->Hash()); + + //reporter->Error("received entry with idxhash %d and valhash %d", (uint64_t) idxhash->Hash(), (uint64_t) valhash->Hash()); InputHash *h = i->lastDict->Lookup(idxhash); if ( h != 0 ) { @@ -393,9 +402,12 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { } i->tab->Assign(idxval, k, valval); + InputHash* ih = new InputHash(); + k = i->tab->ComputeHash(idxval); ih->idxkey = k; ih->valhash = valhash; + //i->tab->Delete(k); i->currDict->Insert(idxhash, ih); @@ -407,11 +419,12 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { reporter->InternalError("Unknown reader"); return; } - - // lastdict contains all deleted entries + // lastdict contains all deleted entries and should be empty apart from that IterCookie *c = i->lastDict->InitForIteration(); InputHash* ih; - while ( ( ih = i->lastDict->NextEntry(c )) ) { + reporter->Error("ending"); + while ( ( ih = i->lastDict->NextEntry(c) ) ) { + reporter->Error("Expiring element"); i->tab->Delete(ih->idxkey); } @@ -582,28 +595,37 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals } + //reporter->Error("Length: %d", length); + int position = 0; char *data = (char*) malloc(length); + if ( data == 0 ) { + reporter->InternalError("Could not malloc?"); + } for ( int i = 0; i < num_elements; i++ ) { const LogVal* val = vals[i]; switch ( val->type ) { case TYPE_BOOL: case TYPE_INT: - *(data+position) = val->val.int_val; + //reporter->Error("Adding field content to pos %d: %lld", val->val.int_val, position); + memcpy(data+position, (const void*) &(val->val.int_val), sizeof(val->val.int_val)); + //*(data+position) = val->val.int_val; position += sizeof(val->val.int_val); break; case TYPE_COUNT: case TYPE_COUNTER: case TYPE_PORT: - *(data+position) = val->val.uint_val; + //*(data+position) = val->val.uint_val; + memcpy(data+position, (const void*) &(val->val.uint_val), sizeof(val->val.uint_val)); position += sizeof(val->val.uint_val); break; case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: - *(data+position) = val->val.double_val; + //*(data+position) = val->val.double_val; + memcpy(data+position, (const void*) &(val->val.double_val), sizeof(val->val.double_val)); position += sizeof(val->val.double_val); break; @@ -685,7 +707,7 @@ InputMgr::ReaderInfo* InputMgr::FindReader(const InputReader* reader) { for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) { - if ( (*s)->reader == reader ) + if ( (*s)->reader && (*s)->reader == reader ) { return *s; } @@ -699,7 +721,7 @@ InputMgr::ReaderInfo* InputMgr::FindReader(const EnumVal* id) { for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) { - if ( (*s)->id == id ) + if ( (*s)->id && (*s)->id->AsEnum() == id->AsEnum() ) { return *s; } diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index e434f7e750..4755b2b74f 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -50,6 +50,16 @@ bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const return false; } + + this->num_fields = num_fields; + this->idx_fields = idx_fields; + this->fields = fields; + + return true; +} + + +bool InputReaderAscii::ReadHeader() { // try to read the header line... string line; if ( !getline(*file, line) ) { @@ -57,9 +67,6 @@ bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const return false; } - this->num_fields = num_fields; - this->idx_fields = idx_fields; - // split on tabs... istringstream splitstream(line); unsigned int currTab = 0; @@ -70,7 +77,7 @@ bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const break; // current found heading in s... compare if we want it - for ( int i = 0; i < num_fields; i++ ) { + for ( unsigned int i = 0; i < num_fields; i++ ) { const LogField* field = fields[i]; if ( field->name == s ) { // cool, found field. note position @@ -92,7 +99,7 @@ bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const currTab++; } - if ( wantFields != num_fields ) { + if ( wantFields != (int) num_fields ) { // we did not find all fields? // :( Error("One of the requested fields could not be found in the input data file"); @@ -106,9 +113,30 @@ bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const // read the entire file and send appropriate thingies back to InputMgr bool InputReaderAscii::DoUpdate() { + + + // dirty, fix me. (well, apparently after trying seeking, etc - this is not that bad) + if ( file && file->is_open() ) { + file->close(); + } + file = new ifstream(fname.c_str()); + if ( !file->is_open() ) { + Error(Fmt("cannot open %s", fname.c_str())); + return false; + } + // + + // file->seekg(0, ios::beg); // do not forget clear. + + + if ( ReadHeader() == false ) { + return false; + } + // TODO: all the stuff we need for a second reading. // *cough* // + // // new keymap @@ -152,6 +180,7 @@ bool InputReaderAscii::DoUpdate() { } LogVal* val = new LogVal(currMapping.type, true); + //bzero(val, sizeof(LogVal)); switch ( currMapping.type ) { case TYPE_STRING: @@ -267,6 +296,8 @@ bool InputReaderAscii::DoUpdate() { } + //file->clear(); // remove end of file evil bits + //file->seekg(0, ios::beg); // and seek to start. EndCurrentSend(); return true; diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index c26a139dcd..673a2cdae2 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -37,6 +37,8 @@ protected: virtual bool DoUpdate(); private: + + bool ReadHeader(); ifstream* file; string fname; @@ -46,6 +48,7 @@ private: // map columns in the file to columns to send back to the manager vector columnMap; + const LogField* const * fields; // raw mapping //map *keyMap; From b5a77aa77baf69b788adb7b40f4246f0692939a0 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 2 Nov 2011 14:29:58 -0700 Subject: [PATCH 026/651] reading seems to work with all atomic types + records... --- src/InputMgr.cc | 25 ++++++++++++++++++++----- src/InputReaderAscii.cc | 2 +- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index e3acfb9505..bbfd354189 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -334,12 +334,12 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { } - reporter->Error("Hashing %d index fields", i->num_idx_fields); + //reporter->Error("Hashing %d index fields", i->num_idx_fields); HashKey* idxhash = HashLogVals(i->num_idx_fields, vals); - reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); - reporter->Error("Hashing %d val fields", i->num_val_fields); + //reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); + //reporter->Error("Hashing %d val fields", i->num_val_fields); HashKey* valhash = HashLogVals(i->num_val_fields, vals+i->num_idx_fields); - reporter->Error("Result: %d", (uint64_t) valhash->Hash()); + //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); //reporter->Error("received entry with idxhash %d and valhash %d", (uint64_t) idxhash->Hash(), (uint64_t) valhash->Hash()); @@ -422,7 +422,6 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { // lastdict contains all deleted entries and should be empty apart from that IterCookie *c = i->lastDict->InitForIteration(); InputHash* ih; - reporter->Error("ending"); while ( ( ih = i->lastDict->NextEntry(c) ) ) { reporter->Error("Expiring element"); i->tab->Delete(ih->idxkey); @@ -589,6 +588,11 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals length += NUM_ADDR_WORDS*sizeof(uint32_t); break; + case TYPE_SUBNET: + length += sizeof(val->val.subnet_val.width); + length += sizeof(val->val.subnet_val.net); + break; + default: reporter->InternalError("unsupported type for hashlogvals"); } @@ -641,6 +645,13 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals position += NUM_ADDR_WORDS*sizeof(uint32_t); break; + case TYPE_SUBNET: + memcpy(data+position,(const char*) &(val->val.subnet_val.width), sizeof(val->val.subnet_val.width) ); + position += sizeof(val->val.subnet_val.width); + memcpy(data+position, (const char*) &(val->val.subnet_val.net), sizeof(val->val.subnet_val.net) ); + position += sizeof(val->val.subnet_val.net); + break; + default: reporter->InternalError("unsupported type for hashlogvals2"); } @@ -694,6 +705,10 @@ Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { return new AddrVal(val->val.addr_val); break; + case TYPE_SUBNET: + return new SubNetVal(val->val.subnet_val.net, val->val.subnet_val.width); + break; + default: reporter->InternalError("unsupported type for input_read"); } diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 4755b2b74f..39635de407 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -216,7 +216,7 @@ bool InputReaderAscii::DoUpdate() { case TYPE_SUBNET: { int pos = s.find("/"); - string width = s.substr(pos); + string width = s.substr(pos+1); val->val.subnet_val.width = atoi(width.c_str()); string addr = s.substr(0, pos); s = addr; From 4845c3a9a61d95dd0246f3beb6fcaa6e74c88f8c Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 3 Nov 2011 14:04:13 -0700 Subject: [PATCH 027/651] send events when input entries change --- src/InputMgr.cc | 97 +++++++++++++++++++++++++++++++++++++++++++++- src/InputMgr.h | 8 +++- src/InputReader.cc | 24 +++++++----- src/input.bif | 11 ++++++ src/types.bif | 7 ++++ 5 files changed, 133 insertions(+), 14 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index bbfd354189..e987dda81e 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -38,6 +38,8 @@ struct InputMgr::ReaderInfo { PDict(InputHash)* currDict; PDict(InputHash)* lastDict; + + list* events; // events we fire when "something" happens }; @@ -165,6 +167,7 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) info->currDict = new PDict(InputHash); info->lastDict = new PDict(InputHash); + info->events = new list(); int success = reader_obj->Init(source, fieldsV.size(), idxfields, fields); if ( success == false ) { @@ -223,7 +226,7 @@ bool InputMgr::IsCompatibleType(BroType* t) return false; } -bool InputMgr::RemoveReader(EnumVal* id) { +bool InputMgr::RemoveReader(const EnumVal* id) { ReaderInfo *i = 0; for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) { @@ -254,6 +257,43 @@ bool InputMgr::RemoveReader(EnumVal* id) { return true; } +bool InputMgr::RegisterEvent(const EnumVal* id, string eventName) { + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->InternalError("Reader not found"); + return false; + } + + i->events->push_back(eventName); + + return true; +} + +bool InputMgr::UnregisterEvent(const EnumVal* id, string eventName) { + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->InternalError("Reader not found"); + return false; + } + + bool erased = false; + + std::list::iterator it = i->events->begin(); + while ( it != i->events->end() ) + { + if ( *it == eventName ) { + it = i->events->erase(it); + erased = true; + } + else + ++it; + } + + + return erased; +} + + bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { for ( int i = 0; i < rec->NumFields(); i++ ) { @@ -286,7 +326,7 @@ bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec } -bool InputMgr::ForceUpdate(EnumVal* id) +bool InputMgr::ForceUpdate(const EnumVal* id) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { @@ -333,6 +373,8 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { return; } + bool updated = false; + //reporter->Error("Hashing %d index fields", i->num_idx_fields); HashKey* idxhash = HashLogVals(i->num_idx_fields, vals); @@ -355,6 +397,7 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { // updated i->lastDict->Remove(idxhash); delete(h); + updated = true; } } @@ -411,8 +454,25 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { i->currDict->Insert(idxhash, ih); + std::list::iterator it = i->events->begin(); + while ( it != i->events->end() ) { + EnumVal* ev; + if ( updated ) { + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); + } else { + ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); + } + + + Ref(idxval); + Ref(valval); + SendEvent(*it, ev, idxval, valval); + ++it; + } + } + void InputMgr::EndCurrentSend(const InputReader* reader) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { @@ -423,6 +483,23 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { IterCookie *c = i->lastDict->InitForIteration(); InputHash* ih; while ( ( ih = i->lastDict->NextEntry(c) ) ) { + + if ( i->events->size() > 0 ) { + ListVal *idx = i->tab->RecoverIndex(ih->idxkey); + assert(idx != 0); + Val *val = i->tab->Lookup(idx); + assert(val != 0); + + std::list::iterator it = i->events->begin(); + while ( it != i->events->end() ) { + Ref(idx); + Ref(val); + EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + SendEvent(*it, ev, idx, val); + ++it; + } + } + reporter->Error("Expiring element"); i->tab->Delete(ih->idxkey); } @@ -523,6 +600,22 @@ void InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* c mgr.Dispatch(new Event(handler, vl)); } +void InputMgr::SendEvent(const string& name, EnumVal* event, Val* left, Val* right) +{ + EventHandler* handler = event_registry->Lookup(name.c_str()); + if ( handler == 0 ) { + reporter->Error("Event %s not found", name.c_str()); + return; + } + + val_list* vl = new val_list; + vl->append(event); + vl->append(left); + vl->append(right); + + mgr.Dispatch(new Event(handler, vl)); +} + Val* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position) { if ( position == 0 ) { diff --git a/src/InputMgr.h b/src/InputMgr.h index d147fa262a..378838c2cf 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -21,8 +21,11 @@ public: InputMgr(); InputReader* CreateReader(EnumVal* id, RecordVal* description); - bool ForceUpdate(EnumVal* id); - bool RemoveReader(EnumVal* id); + bool ForceUpdate(const EnumVal* id); + bool RemoveReader(const EnumVal* id); + bool RegisterEvent(const EnumVal* id, string eventName); + bool UnregisterEvent(const EnumVal* id, string eventName); + protected: friend class InputReader; @@ -43,6 +46,7 @@ private: bool IsCompatibleType(BroType* t); bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); + void SendEvent(const string& name, EnumVal* event, Val* left, Val* right); HashKey* HashLogVals(const int num_elements, const LogVal* const *vals); diff --git a/src/InputReader.cc b/src/InputReader.cc index 494df3fb81..994f8b9b97 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -1,13 +1,12 @@ +// See the file "COPYING" in the main distribution directory for copyright. #include "InputReader.h" -// #include "EventRegistry.h" -// #include "Event.h" InputReader::InputReader() { buf = 0; buf_len = 1024; - disabled = true; // disabled will be set correcty in init. + disabled = true; // disabled will be set correcty in init. } InputReader::~InputReader() @@ -21,9 +20,9 @@ void InputReader::Error(const char *msg) } void InputReader::Error(const string &msg) - { +{ input_mgr->Error(this, msg.c_str()); - } +} void InputReader::Put(const LogVal* const *val) { @@ -54,16 +53,19 @@ bool InputReader::Init(string arg_source, int arg_num_fields, int arg_idx_fields return !disabled; } -void InputReader::Finish() { +void InputReader::Finish() +{ DoFinish(); disabled = true; } -bool InputReader::Update() { +bool InputReader::Update() +{ return DoUpdate(); } -void InputReader::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) { +void InputReader::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) +{ input_mgr->SendEvent(name, num_vals, vals); } @@ -93,10 +95,12 @@ const char* InputReader::Fmt(const char* format, ...) } -void InputReader::SendEntry(const LogVal* const *vals) { +void InputReader::SendEntry(const LogVal* const *vals) +{ input_mgr->SendEntry(this, vals); } -void InputReader::EndCurrentSend() { +void InputReader::EndCurrentSend() +{ input_mgr->EndCurrentSend(this); } diff --git a/src/input.bif b/src/input.bif index 4e2cbf07b5..90dd2386b1 100644 --- a/src/input.bif +++ b/src/input.bif @@ -21,6 +21,17 @@ function Input::__force_update%(id: ID%) : bool return new Val( res, TYPE_BOOL ); %} +function Input::__add_event%(id: ID, name: string%) : bool + %{ + bool res = input_mgr->RegisterEvent(id->AsEnumVal(), name->AsString()->CheckString()); + return new Val( res, TYPE_BOOL ); + %} + +function Input::__remove_event%(id: ID, name: string%) : bool + %{ + bool res = input_mgr->UnregisterEvent(id->AsEnumVal(), name->AsString()->CheckString()); + return new Val( res, TYPE_BOOL ); + %} function Input::__remove_reader%(id: ID%) : bool %{ diff --git a/src/types.bif b/src/types.bif index 7b81a7f631..f90a954224 100644 --- a/src/types.bif +++ b/src/types.bif @@ -174,6 +174,13 @@ enum Reader %{ READER_ASCII, %} +enum Event %{ + EVENT_NEW, + EVENT_CHANGED, + EVENT_REMOVED, +%} + + enum ID %{ Unknown, %} From 2e3874331d0540724a47e91c8b0146b7b3fa6846 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 4 Nov 2011 12:41:10 -0700 Subject: [PATCH 028/651] support for filters and little event fix --- scripts/base/frameworks/input/main.bro | 10 ++ src/InputMgr.cc | 204 +++++++++++++++++++++---- src/InputMgr.h | 5 + src/input.bif | 22 ++- 4 files changed, 210 insertions(+), 31 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 4bb7129d03..35ca6dfa4e 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -11,6 +11,16 @@ export { destination: any; reader: Reader &default=default_reader; }; + + type Filter: record { + name: string; + ## descriptive name. for later removal + + + pred: function(typ: Input::Event, left: any, right: any): bool &optional; + ## decision function, that decides if an inserton, update or removal should really be executed + }; + } @load base/input.bif diff --git a/src/InputMgr.cc b/src/InputMgr.cc index e987dda81e..0b87a561ad 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -24,6 +24,11 @@ public: declare(PDict, InputHash); +struct InputMgr::Filter { + EnumVal* id; + string name; + Func* pred; +}; struct InputMgr::ReaderInfo { EnumVal* id; @@ -39,10 +44,17 @@ struct InputMgr::ReaderInfo { PDict(InputHash)* currDict; PDict(InputHash)* lastDict; - list* events; // events we fire when "something" happens + list events; // events we fire when "something" happens + list filters; // events we fire when "something" happens + +// ~ReaderInfo(); }; +//void InputMgr::~ReaderInfo() { +// +//} + struct InputReaderDefinition { bro_int_t type; // the type const char *name; // descriptive name for error messages @@ -167,8 +179,6 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) info->currDict = new PDict(InputHash); info->lastDict = new PDict(InputHash); - info->events = new list(); - int success = reader_obj->Init(source, fieldsV.size(), idxfields, fields); if ( success == false ) { RemoveReader(id); @@ -264,7 +274,7 @@ bool InputMgr::RegisterEvent(const EnumVal* id, string eventName) { return false; } - i->events->push_back(eventName); + i->events.push_back(eventName); return true; } @@ -276,21 +286,23 @@ bool InputMgr::UnregisterEvent(const EnumVal* id, string eventName) { return false; } - bool erased = false; + //bool erased = false; - std::list::iterator it = i->events->begin(); - while ( it != i->events->end() ) + std::list::iterator it = i->events.begin(); + while ( it != i->events.end() ) { if ( *it == eventName ) { - it = i->events->erase(it); - erased = true; + it = i->events.erase(it); + return true; + // erased = true; } else ++it; } - return erased; + return false; + //return erased; } @@ -337,6 +349,59 @@ bool InputMgr::ForceUpdate(const EnumVal* id) return i->reader->Update(); } +bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->InternalError("Reader not found"); + return false; + } + + RecordType* rtype = fval->Type()->AsRecordType(); + if ( ! same_type(rtype, BifType::Record::Input::Filter, 0) ) + { + reporter->Error("filter argument not of right type"); + return false; + } + + + Val* name = fval->Lookup(rtype->FieldOffset("name")); + Val* pred = fval->Lookup(rtype->FieldOffset("pred")); + + Filter filter; + filter.name = name->AsString()->CheckString(); + filter.id = id->Ref()->AsEnumVal(); + filter.pred = pred ? pred->AsFunc() : 0; + + i->filters.push_back(filter); + + return true; +} + +bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->InternalError("Reader not found"); + return false; + } + + + std::list::iterator it = i->filters.begin(); + while ( it != i->filters.end() ) + { + if ( (*it).name == name ) { + it = i->filters.erase(it); + return true; + break; + } + else + ++it; + } + + return false;; +} + + + Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const LogVal* const *vals) { Val* idxval; int position = 0; @@ -398,6 +463,7 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { i->lastDict->Remove(idxhash); delete(h); updated = true; + } } @@ -437,6 +503,48 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { valval = r; } + + Val* oldval = 0; + if ( updated == true ) { + // in that case, we need the old value to send the event (if we send an event). + oldval = i->tab->Lookup(idxval); + } + + + // call filters first do determine if we really add / change the entry + std::list::iterator it = i->filters.begin(); + while ( it != i->filters.end() ) { + if (! (*it).pred ) { + continue; + } + + EnumVal* ev; + Ref(idxval); + Ref(valval); + + if ( updated ) { + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); + } else { + ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); + } + + val_list vl(3); + vl.append(ev); + vl.append(idxval); + vl.append(valval); + Val* v = (*it).pred->Call(&vl); + bool result = v->AsBool(); + Unref(v); + + if ( result == false ) { + // throw away. Hence - we quit. + return; + } + + ++it; + } + + //i->tab->Assign(idxval, valval); HashKey* k = i->tab->ComputeHash(idxval); if ( !k ) { @@ -454,21 +562,28 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { i->currDict->Insert(idxhash, ih); - std::list::iterator it = i->events->begin(); - while ( it != i->events->end() ) { + // send events now that we are kind of finished. + std::list::iterator filter_iterator = i->events.begin(); + while ( filter_iterator != i->events.end() ) { EnumVal* ev; - if ( updated ) { + Ref(idxval); + + if ( updated ) { // in case of update send back the old value. ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); + assert ( oldval != 0 ); + Ref(oldval); + SendEvent(*filter_iterator, ev, idxval, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); + Ref(valval); + SendEvent(*filter_iterator, ev, idxval, valval); } - Ref(idxval); - Ref(valval); - SendEvent(*it, ev, idxval, valval); - ++it; + ++filter_iterator; } + + } @@ -483,24 +598,61 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { IterCookie *c = i->lastDict->InitForIteration(); InputHash* ih; while ( ( ih = i->lastDict->NextEntry(c) ) ) { + + if ( i->events.size() != 0 || i->filters.size() != 0 ) // we have a filter or an event + { - if ( i->events->size() > 0 ) { ListVal *idx = i->tab->RecoverIndex(ih->idxkey); assert(idx != 0); Val *val = i->tab->Lookup(idx); assert(val != 0); - std::list::iterator it = i->events->begin(); - while ( it != i->events->end() ) { - Ref(idx); - Ref(val); - EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - SendEvent(*it, ev, idx, val); - ++it; + + { + // ask filter, if we want to expire this element... + std::list::iterator it = i->filters.begin(); + while ( it != i->filters.end() ) { + if (! (*it).pred ) { + continue; + } + + EnumVal* ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + Ref(idx); + Ref(val); + + val_list vl(3); + vl.append(ev); + vl.append(idx); + vl.append(val); + Val* v = (*it).pred->Call(&vl); + bool result = v->AsBool(); + Unref(v); + + if ( result == false ) { + // throw away. Hence - we quit and simply go to the next entry of lastDict + continue; + } + + ++it; + } } + + // + + { + std::list::iterator it = i->events.begin(); + while ( it != i->events.end() ) { + Ref(idx); + Ref(val); + EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + SendEvent(*it, ev, idx, val); + ++it; + } + } + } - reporter->Error("Expiring element"); + //reporter->Error("Expiring element"); i->tab->Delete(ih->idxkey); } diff --git a/src/InputMgr.h b/src/InputMgr.h index 378838c2cf..74914e65ad 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -26,6 +26,9 @@ public: bool RegisterEvent(const EnumVal* id, string eventName); bool UnregisterEvent(const EnumVal* id, string eventName); + bool AddFilter(EnumVal *id, RecordVal* filter); + bool RemoveFilter(EnumVal* id, const string &name); + protected: friend class InputReader; @@ -62,6 +65,8 @@ private: string Hash(const string &input); + struct Filter; + }; extern InputMgr* input_mgr; diff --git a/src/input.bif b/src/input.bif index 90dd2386b1..2301482506 100644 --- a/src/input.bif +++ b/src/input.bif @@ -8,34 +8,46 @@ module Input; %%} type ReaderDescription: record; +type Filter: record; -function Input::__create_reader%(id: ID, description: Input::ReaderDescription%) : bool +function Input::__create_reader%(id: Log::ID, description: Input::ReaderDescription%) : bool %{ InputReader *the_reader = input_mgr->CreateReader(id->AsEnumVal(), description->AsRecordVal()); return new Val( the_reader != 0, TYPE_BOOL ); %} -function Input::__force_update%(id: ID%) : bool +function Input::__force_update%(id: Log::ID%) : bool %{ bool res = input_mgr->ForceUpdate(id->AsEnumVal()); return new Val( res, TYPE_BOOL ); %} -function Input::__add_event%(id: ID, name: string%) : bool +function Input::__add_event%(id: Log::ID, name: string%) : bool %{ bool res = input_mgr->RegisterEvent(id->AsEnumVal(), name->AsString()->CheckString()); return new Val( res, TYPE_BOOL ); %} -function Input::__remove_event%(id: ID, name: string%) : bool +function Input::__remove_event%(id: Log::ID, name: string%) : bool %{ bool res = input_mgr->UnregisterEvent(id->AsEnumVal(), name->AsString()->CheckString()); return new Val( res, TYPE_BOOL ); %} -function Input::__remove_reader%(id: ID%) : bool +function Input::__remove_reader%(id: Log::ID%) : bool %{ bool res = input_mgr->RemoveReader(id->AsEnumVal()); return new Val( res, TYPE_BOOL ); %} +function Input::__add_filter%(id: Log::ID, filter: Input::Filter%) : bool + %{ + bool res = input_mgr->AddFilter(id->AsEnumVal(), filter->AsRecordVal()); + return new Val( res, TYPE_BOOL ); + %} + +function Input::__remove_filter%(id: Log::ID, name: string%) : bool + %{ + bool res = input_mgr->RemoveFilter(id->AsEnumVal(), name->AsString()->CheckString()); + return new Val( res, TYPE_BOOL); + %} From 5f37040c962a9f3d524ddcccfaeba982acb0d500 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 4 Nov 2011 13:59:43 -0700 Subject: [PATCH 029/651] filters really working as intented (though probably still memleaky) --- src/InputMgr.cc | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 0b87a561ad..7ca1e1b485 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -537,8 +537,14 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { Unref(v); if ( result == false ) { - // throw away. Hence - we quit. - return; + if ( !updated ) { + // throw away. Hence - we quit. + return; + } else { + // keep old one + i->currDict->Insert(idxhash, h); + return; + } } ++it; @@ -596,8 +602,11 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { } // lastdict contains all deleted entries and should be empty apart from that IterCookie *c = i->lastDict->InitForIteration(); + i->lastDict->MakeRobustCookie(c); InputHash* ih; - while ( ( ih = i->lastDict->NextEntry(c) ) ) { + HashKey *lastDictIdxKey; + //while ( ( ih = i->lastDict->NextEntry(c) ) ) { + while ( ( ih = i->lastDict->NextEntry(lastDictIdxKey, c) ) ) { if ( i->events.size() != 0 || i->filters.size() != 0 ) // we have a filter or an event { @@ -609,6 +618,7 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { { + bool doBreak = false; // ask filter, if we want to expire this element... std::list::iterator it = i->filters.begin(); while ( it != i->filters.end() ) { @@ -627,13 +637,21 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { Val* v = (*it).pred->Call(&vl); bool result = v->AsBool(); Unref(v); + + ++it; if ( result == false ) { - // throw away. Hence - we quit and simply go to the next entry of lastDict + // Keep it. Hence - we quit and simply go to the next entry of lastDict + // ah well - and we have to add the entry to currDict... + i->currDict->Insert(lastDictIdxKey, i->lastDict->RemoveEntry(lastDictIdxKey)); + doBreak = true; continue; } - ++it; + } + + if ( doBreak ) { + continue; } } @@ -654,6 +672,8 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { //reporter->Error("Expiring element"); i->tab->Delete(ih->idxkey); + i->lastDict->Remove(lastDictIdxKey); + delete(ih); } i->lastDict->Clear(); From 2aa0f6da5795aa90c83c9fa1872776737f1e7416 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 4 Nov 2011 14:33:34 -0700 Subject: [PATCH 030/651] beautify script calls, track filters --- scripts/base/frameworks/input/main.bro | 63 +++++++++++++++++++++++++- src/input.bif | 12 ++--- 2 files changed, 68 insertions(+), 7 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 35ca6dfa4e..e0c41f19be 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -20,7 +20,68 @@ export { pred: function(typ: Input::Event, left: any, right: any): bool &optional; ## decision function, that decides if an inserton, update or removal should really be executed }; - + + const no_filter: Filter = [$name=""]; # Sentinel. + + global create_reader: function(id: Log::ID, description: Input::ReaderDescription) : bool; + global remove_reader: function(id: Log::ID) : bool; + global force_update: function(id: Log::ID) : bool; + global add_event: function(id: Log::ID, name: string) : bool; + global remove_event: function(id: Log::ID, name: string) : bool; + global add_filter: function(id: Log::ID, filter: Input::Filter) : bool; + global remove_filter: function(id: Log::ID, name: string) : bool; + global get_filter: function(id: ID, name: string) : Filter; + } @load base/input.bif + + +module Input; + +global filters: table[ID, string] of Filter; + +function create_reader(id: Log::ID, description: Input::ReaderDescription) : bool + { + return __create_reader(id, description); + } + +function remove_reader(id: Log::ID) : bool + { + return __remove_reader(id); + } + +function force_update(id: Log::ID) : bool + { + return __force_update(id); + } + +function add_event(id: Log::ID, name: string) : bool + { + return __add_event(id, name); + } + +function remove_event(id: Log::ID, name: string) : bool + { + return __remove_event(id, name); + } + +function add_filter(id: Log::ID, filter: Input::Filter) : bool + { + filters[id, filter$name] = filter; + return __add_filter(id, filter); + } + +function remove_filter(id: Log::ID, name: string) : bool + { + delete filters[id, name]; + return __remove_filter(id, name); + } + +function get_filter(id: ID, name: string) : Filter + { + if ( [id, name] in filters ) + return filters[id, name]; + + return no_filter; + } diff --git a/src/input.bif b/src/input.bif index 2301482506..c4bf14e3ed 100644 --- a/src/input.bif +++ b/src/input.bif @@ -16,6 +16,12 @@ function Input::__create_reader%(id: Log::ID, description: Input::ReaderDescript return new Val( the_reader != 0, TYPE_BOOL ); %} +function Input::__remove_reader%(id: Log::ID%) : bool + %{ + bool res = input_mgr->RemoveReader(id->AsEnumVal()); + return new Val( res, TYPE_BOOL ); + %} + function Input::__force_update%(id: Log::ID%) : bool %{ bool res = input_mgr->ForceUpdate(id->AsEnumVal()); @@ -34,12 +40,6 @@ function Input::__remove_event%(id: Log::ID, name: string%) : bool return new Val( res, TYPE_BOOL ); %} -function Input::__remove_reader%(id: Log::ID%) : bool - %{ - bool res = input_mgr->RemoveReader(id->AsEnumVal()); - return new Val( res, TYPE_BOOL ); - %} - function Input::__add_filter%(id: Log::ID, filter: Input::Filter%) : bool %{ bool res = input_mgr->AddFilter(id->AsEnumVal(), filter->AsRecordVal()); From 1d39eaf32dff51ef779188f3123e3d63a28f71f8 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 4 Nov 2011 15:03:40 -0700 Subject: [PATCH 031/651] small fixes, less leakiness --- scripts/base/frameworks/input/main.bro | 1 - src/InputMgr.cc | 98 +++++++++++--------------- 2 files changed, 41 insertions(+), 58 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index e0c41f19be..6e19452f7a 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -16,7 +16,6 @@ export { name: string; ## descriptive name. for later removal - pred: function(typ: Input::Event, left: any, right: any): bool &optional; ## decision function, that decides if an inserton, update or removal should really be executed }; diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 7ca1e1b485..18b17c7576 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -28,8 +28,14 @@ struct InputMgr::Filter { EnumVal* id; string name; Func* pred; + + ~Filter(); }; +InputMgr::Filter::~Filter() { + Unref(id); +} + struct InputMgr::ReaderInfo { EnumVal* id; EnumVal* type; @@ -45,15 +51,20 @@ struct InputMgr::ReaderInfo { PDict(InputHash)* lastDict; list events; // events we fire when "something" happens - list filters; // events we fire when "something" happens - -// ~ReaderInfo(); + list filters; // filters that can prevent our actions + ~ReaderInfo(); }; -//void InputMgr::~ReaderInfo() { -// -//} +InputMgr::ReaderInfo::~ReaderInfo() { + Unref(type); + Unref(tab); + Unref(itype); + Unref(rtype); + Unref(id); + + delete(reader); +} struct InputReaderDefinition { bro_int_t type; // the type @@ -71,10 +82,8 @@ InputReaderDefinition input_readers[] = { InputMgr::InputMgr() { - //DBG_LOG(DBG_LOGGING, "this has to happen"); } - // create a new input reader object to be used at whomevers leisure lateron. InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) { @@ -163,30 +172,26 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) ReaderInfo* info = new ReaderInfo; info->reader = reader_obj; - info->type = reader; - Ref(reader); + info->type = reader->Ref()->AsEnumVal(); info->num_idx_fields = idxfields; info->num_val_fields = valfields; - info->tab = dst; - Ref(dst); - info->rtype = val; - Ref(val); // we save a pointer of it... I really hope that this wasn't already done anywhere. - info->id = id; - Ref(id); // ditto... - info->itype = idx; - Ref(idx); - readers.push_back(info); + info->tab = dst->Ref()->AsTableVal(); + info->rtype = val->Ref()->AsRecordType(); + info->id = id->Ref()->AsEnumVal(); + info->itype = idx->Ref()->AsRecordType(); info->currDict = new PDict(InputHash); info->lastDict = new PDict(InputHash); + readers.push_back(info); + int success = reader_obj->Init(source, fieldsV.size(), idxfields, fields); if ( success == false ) { - RemoveReader(id); + assert( RemoveReader(id) ); return 0; } success = reader_obj->Update(); if ( success == false ) { - RemoveReader(id); + assert ( RemoveReader(id) ); return 0; } @@ -240,12 +245,12 @@ bool InputMgr::RemoveReader(const EnumVal* id) { ReaderInfo *i = 0; for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) { - if ( (*s)->id == id ) - { - i = (*s); - readers.erase(s); // remove from vector - break; - } + if ( (*s)->id == id ) + { + i = (*s); + readers.erase(s); // remove from vector + break; + } } if ( i == 0 ) { @@ -254,14 +259,6 @@ bool InputMgr::RemoveReader(const EnumVal* id) { i->reader->Finish(); - - Unref(i->type); - Unref(i->tab); - Unref(i->itype); - Unref(i->rtype); - Unref(i->id); - - delete(i->reader); delete(i); return true; @@ -279,6 +276,8 @@ bool InputMgr::RegisterEvent(const EnumVal* id, string eventName) { return true; } +// remove first event with name eventName +// (though there shouldn't really be several events with the same name... bool InputMgr::UnregisterEvent(const EnumVal* id, string eventName) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { @@ -286,23 +285,18 @@ bool InputMgr::UnregisterEvent(const EnumVal* id, string eventName) { return false; } - //bool erased = false; - std::list::iterator it = i->events.begin(); while ( it != i->events.end() ) { if ( *it == eventName ) { it = i->events.erase(it); return true; - // erased = true; } else ++it; } - return false; - //return erased; } @@ -335,14 +329,13 @@ bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec } return true; - } bool InputMgr::ForceUpdate(const EnumVal* id) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { - reporter->InternalError("Reader not found"); + reporter->Error("Reader not found"); return false; } @@ -352,7 +345,7 @@ bool InputMgr::ForceUpdate(const EnumVal* id) bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { - reporter->InternalError("Reader not found"); + reporter->Error("Reader not found"); return false; } @@ -380,7 +373,7 @@ bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { - reporter->InternalError("Reader not found"); + reporter->Error("Reader not found"); return false; } @@ -427,7 +420,6 @@ Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const Lo assert ( position == num_fields ); return idxval; - } @@ -538,7 +530,8 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { if ( result == false ) { if ( !updated ) { - // throw away. Hence - we quit. + // throw away. Hence - we quit. And remove the entry from the current dictionary... + delete(i->currDict->RemoveEntry(idxhash)); return; } else { // keep old one @@ -588,9 +581,6 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { ++filter_iterator; } - - - } @@ -670,13 +660,12 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { } - //reporter->Error("Expiring element"); i->tab->Delete(ih->idxkey); - i->lastDict->Remove(lastDictIdxKey); + i->lastDict->Remove(lastDictIdxKey); // deletex in next line delete(ih); } - i->lastDict->Clear(); + i->lastDict->Clear(); // should be empty... but... well... who knows... delete(i->lastDict); i->lastDict = i->currDict; @@ -699,11 +688,6 @@ void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { } else { RecordVal * r = new RecordVal(i->rtype); - /* if ( i->rtype->NumFields() != (int) i->num_val_fields ) { - reporter->InternalError("Type mismatch"); - return; - } */ - for ( int j = 0; j < i->rtype->NumFields(); j++) { Val* val = 0; From 5983d44d950d2b93d91c36846668b35fb24d437a Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 8 Nov 2011 15:33:32 -0800 Subject: [PATCH 032/651] read header line in bro logfile format --- src/InputReaderAscii.cc | 19 +++++++++++++++++-- src/InputReaderAscii.h | 3 ++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 39635de407..e4770421c6 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -62,7 +62,7 @@ bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const bool InputReaderAscii::ReadHeader() { // try to read the header line... string line; - if ( !getline(*file, line) ) { + if ( !GetLine(line) ) { Error("could not read first line"); return false; } @@ -111,6 +111,21 @@ bool InputReaderAscii::ReadHeader() { return true; } +bool InputReaderAscii::GetLine(string& str) { + while ( getline(*file, str) ) { + if ( str[0] != '#' ) { + return true; + } + + if ( str.compare(0,8, "#fields\t") == 0 ) { + str = str.substr(8); + return true; + } + } + + return false; +} + // read the entire file and send appropriate thingies back to InputMgr bool InputReaderAscii::DoUpdate() { @@ -143,7 +158,7 @@ bool InputReaderAscii::DoUpdate() { //map *newKeyMap = new map(); string line; - while ( getline(*file, line ) ) { + while ( GetLine(line ) ) { // split on tabs istringstream splitstream(line); diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index 673a2cdae2..f86cfd0062 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -37,8 +37,9 @@ protected: virtual bool DoUpdate(); private: - bool ReadHeader(); + + bool GetLine(string& str); ifstream* file; string fname; From 1a642f3568c3c8c445f76a5b7438f385b880bbe7 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 14 Nov 2011 17:18:28 -0800 Subject: [PATCH 033/651] tried enum support - doesn't yet work due to internal bro interface problems... --- src/InputMgr.cc | 6 ++++++ src/InputReaderAscii.cc | 2 +- src/input.bif | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 18b17c7576..8db5a70c66 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -828,6 +828,7 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals break; case TYPE_STRING: + case TYPE_ENUM: { length += val->val.string_val->size(); break; @@ -883,6 +884,7 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals break; case TYPE_STRING: + case TYPE_ENUM: { memcpy(data+position, val->val.string_val->c_str(), val->val.string_val->length()); position += val->val.string_val->size(); @@ -958,6 +960,10 @@ Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { return new SubNetVal(val->val.subnet_val.net, val->val.subnet_val.width); break; + case TYPE_ENUM: + reporter->InternalError("Sorry, Enums reading does not yet work, missing internal inferface"); + + default: reporter->InternalError("unsupported type for input_read"); } diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index e4770421c6..f91c55b666 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -128,7 +128,6 @@ bool InputReaderAscii::GetLine(string& str) { // read the entire file and send appropriate thingies back to InputMgr bool InputReaderAscii::DoUpdate() { - // dirty, fix me. (well, apparently after trying seeking, etc - this is not that bad) if ( file && file->is_open() ) { @@ -198,6 +197,7 @@ bool InputReaderAscii::DoUpdate() { //bzero(val, sizeof(LogVal)); switch ( currMapping.type ) { + case TYPE_ENUM: case TYPE_STRING: val->val.string_val = new string(s); break; diff --git a/src/input.bif b/src/input.bif index c4bf14e3ed..7b051fba16 100644 --- a/src/input.bif +++ b/src/input.bif @@ -51,3 +51,4 @@ function Input::__remove_filter%(id: Log::ID, name: string%) : bool bool res = input_mgr->RemoveFilter(id->AsEnumVal(), name->AsString()->CheckString()); return new Val( res, TYPE_BOOL); %} + From cde8153c1868f6bd3984a2c35aeb645b1023968d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 15 Nov 2011 08:36:03 -0800 Subject: [PATCH 034/651] switch to set if record or simple value is desired. --- scripts/base/frameworks/input/main.bro | 1 + src/InputMgr.cc | 14 ++++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 6e19452f7a..2b87ac980c 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -9,6 +9,7 @@ export { idx: any; val: any; destination: any; + want_record: bool &default=T; reader: Reader &default=default_reader; }; diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 8db5a70c66..5f99583c10 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -42,6 +42,7 @@ struct InputMgr::ReaderInfo { InputReader* reader; unsigned int num_idx_fields; unsigned int num_val_fields; + bool want_record; TableVal* tab; RecordType* rtype; @@ -147,6 +148,7 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) RecordType *idx = description->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); RecordType *val = description->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); TableVal *dst = description->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); + Val *want_record = description->Lookup(rtype->FieldOffset("want_record")); vector fieldsV; // vector, because we don't know the length beforehands @@ -181,6 +183,11 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) info->itype = idx->Ref()->AsRecordType(); info->currDict = new PDict(InputHash); info->lastDict = new PDict(InputHash); + info->want_record = ( want_record->InternalInt() == 1 ); + + if ( valfields > 1 ) { + assert(info->want_record); + } readers.push_back(info); @@ -311,7 +318,6 @@ bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec if ( rec->FieldType(i)->Tag() == TYPE_RECORD ) { - string prep = nameprepend + rec->FieldName(i) + "."; if ( !UnrollRecordType(fields, rec->FieldType(i)->AsRecordType(), prep) ) @@ -464,7 +470,7 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { Val* valval; int position = i->num_idx_fields; - if ( i->num_val_fields == 1 ) { + if ( i->num_val_fields == 1 && !i->want_record ) { valval = LogValToVal(vals[i->num_idx_fields]); } else { RecordVal * r = new RecordVal(i->rtype); @@ -683,7 +689,7 @@ void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { Val* valval; int position = i->num_idx_fields; - if ( i->num_val_fields == 1 ) { + if ( i->num_val_fields == 1 && !i->want_record ) { valval = LogValToVal(vals[i->num_idx_fields]); } else { RecordVal * r = new RecordVal(i->rtype); @@ -961,7 +967,7 @@ Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { break; case TYPE_ENUM: - reporter->InternalError("Sorry, Enums reading does not yet work, missing internal inferface"); + reporter->InternalError("Sorry, Enum reading does not yet work, missing internal inferface"); default: From 4a3c9923253c8c26973a24c537a3ca231961db5e Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 15 Nov 2011 10:57:45 -0800 Subject: [PATCH 035/651] InputReader can read Sets. --- src/InputMgr.cc | 244 +++++++++++++++++++++++++--------------- src/InputMgr.h | 2 + src/InputReaderAscii.cc | 211 ++++++++++++++++++++++------------ src/InputReaderAscii.h | 5 + src/LogMgr.cc | 6 +- src/LogMgr.h | 4 +- 6 files changed, 310 insertions(+), 162 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 5f99583c10..ba17d8448f 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -234,11 +234,14 @@ bool InputMgr::IsCompatibleType(BroType* t) case TYPE_TABLE: - return false; + { + return IsCompatibleType(t->AsSetType()->Indices()->PureType()); + } case TYPE_VECTOR: { - return IsCompatibleType(t->AsVectorType()->YieldType()); + return false; // do me... + //return IsCompatibleType(t->AsVectorType()->YieldType()); } default: @@ -329,6 +332,9 @@ bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec LogField* field = new LogField(); field->name = nameprepend + rec->FieldName(i); field->type = rec->FieldType(i)->Tag(); + if ( field->type == TYPE_TABLE ) { + field->set_type = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); + } fields->push_back(field); } @@ -810,49 +816,133 @@ Val* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_ } + +int InputMgr::GetLogValLength(const LogVal* val) { + int length = 0; + + switch (val->type) { + case TYPE_BOOL: + case TYPE_INT: + length += sizeof(val->val.int_val); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + length += sizeof(val->val.uint_val); + break; + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + length += sizeof(val->val.double_val); + break; + + case TYPE_STRING: + case TYPE_ENUM: + { + length += val->val.string_val->size(); + break; + } + + case TYPE_ADDR: + length += NUM_ADDR_WORDS*sizeof(uint32_t); + break; + + case TYPE_SUBNET: + length += sizeof(val->val.subnet_val.width); + length += sizeof(val->val.subnet_val.net); + break; + + case TYPE_TABLE: { + for ( int i = 0; i < val->val.set_val.size; i++ ) { + length += GetLogValLength(val->val.set_val.vals[i]); + } + break; + } + + default: + reporter->InternalError("unsupported type %d for GetLogValLength", val->type); + } + + return length; + +} + +int InputMgr::CopyLogVal(char *data, const int startpos, const LogVal* val) { + switch ( val->type ) { + case TYPE_BOOL: + case TYPE_INT: + //reporter->Error("Adding field content to pos %d: %lld", val->val.int_val, startpos); + memcpy(data+startpos, (const void*) &(val->val.int_val), sizeof(val->val.int_val)); + //*(data+startpos) = val->val.int_val; + return sizeof(val->val.int_val); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + //*(data+startpos) = val->val.uint_val; + memcpy(data+startpos, (const void*) &(val->val.uint_val), sizeof(val->val.uint_val)); + return sizeof(val->val.uint_val); + break; + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + //*(data+startpos) = val->val.double_val; + memcpy(data+startpos, (const void*) &(val->val.double_val), sizeof(val->val.double_val)); + return sizeof(val->val.double_val); + break; + + case TYPE_STRING: + case TYPE_ENUM: + { + memcpy(data+startpos, val->val.string_val->c_str(), val->val.string_val->length()); + return val->val.string_val->size(); + break; + } + + case TYPE_ADDR: + memcpy(data+startpos, val->val.addr_val, NUM_ADDR_WORDS*sizeof(uint32_t)); + return NUM_ADDR_WORDS*sizeof(uint32_t); + break; + + case TYPE_SUBNET: { + int length = 0; + memcpy(data+startpos,(const char*) &(val->val.subnet_val.width), sizeof(val->val.subnet_val.width) ); + length += sizeof(val->val.subnet_val.width); + memcpy(data+startpos, (const char*) &(val->val.subnet_val.net), sizeof(val->val.subnet_val.net) ); + length += sizeof(val->val.subnet_val.net); + return length; + break; + } + + case TYPE_TABLE: { + int length = 0; + for ( int i = 0; i < val->val.set_val.size; i++ ) { + length += CopyLogVal(data, startpos+length, val->val.set_val.vals[i]); + } + return length; + break; + } + + default: + reporter->InternalError("unsupported type %d for CopyLogVal", val->type); + return 0; + } + + reporter->InternalError("internal error"); + return 0; + +} + HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals) { int length = 0; for ( int i = 0; i < num_elements; i++ ) { const LogVal* val = vals[i]; - switch (val->type) { - case TYPE_BOOL: - case TYPE_INT: - length += sizeof(val->val.int_val); - break; - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - length += sizeof(val->val.uint_val); - break; - - case TYPE_DOUBLE: - case TYPE_TIME: - case TYPE_INTERVAL: - length += sizeof(val->val.double_val); - break; - - case TYPE_STRING: - case TYPE_ENUM: - { - length += val->val.string_val->size(); - break; - } - - case TYPE_ADDR: - length += NUM_ADDR_WORDS*sizeof(uint32_t); - break; - - case TYPE_SUBNET: - length += sizeof(val->val.subnet_val.width); - length += sizeof(val->val.subnet_val.net); - break; - - default: - reporter->InternalError("unsupported type for hashlogvals"); - } - + length += GetLogValLength(val); } //reporter->Error("Length: %d", length); @@ -864,56 +954,7 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals } for ( int i = 0; i < num_elements; i++ ) { const LogVal* val = vals[i]; - switch ( val->type ) { - case TYPE_BOOL: - case TYPE_INT: - //reporter->Error("Adding field content to pos %d: %lld", val->val.int_val, position); - memcpy(data+position, (const void*) &(val->val.int_val), sizeof(val->val.int_val)); - //*(data+position) = val->val.int_val; - position += sizeof(val->val.int_val); - break; - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - //*(data+position) = val->val.uint_val; - memcpy(data+position, (const void*) &(val->val.uint_val), sizeof(val->val.uint_val)); - position += sizeof(val->val.uint_val); - break; - - case TYPE_DOUBLE: - case TYPE_TIME: - case TYPE_INTERVAL: - //*(data+position) = val->val.double_val; - memcpy(data+position, (const void*) &(val->val.double_val), sizeof(val->val.double_val)); - position += sizeof(val->val.double_val); - break; - - case TYPE_STRING: - case TYPE_ENUM: - { - memcpy(data+position, val->val.string_val->c_str(), val->val.string_val->length()); - position += val->val.string_val->size(); - break; - } - - case TYPE_ADDR: - memcpy(data+position, val->val.addr_val, NUM_ADDR_WORDS*sizeof(uint32_t)); - position += NUM_ADDR_WORDS*sizeof(uint32_t); - break; - - case TYPE_SUBNET: - memcpy(data+position,(const char*) &(val->val.subnet_val.width), sizeof(val->val.subnet_val.width) ); - position += sizeof(val->val.subnet_val.width); - memcpy(data+position, (const char*) &(val->val.subnet_val.net), sizeof(val->val.subnet_val.net) ); - position += sizeof(val->val.subnet_val.net); - break; - - default: - reporter->InternalError("unsupported type for hashlogvals2"); - } - - + position += CopyLogVal(data, position, val); } assert(position == length); @@ -966,6 +1007,29 @@ Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { return new SubNetVal(val->val.subnet_val.net, val->val.subnet_val.width); break; + case TYPE_TABLE: { + if ( val->val.set_val.size == 0 ) { + // empty table + TypeList* set_index = new TypeList(base_type(TYPE_ANY)); + // iim quite sure this does not work... we probably need the internal set type for this... + reporter->InternalError("Implement me."); + return new TableVal(new SetType(set_index, 0)); + } else { + // all entries have to have the same type... + TypeTag type = val->val.set_val.vals[0]->type; + TypeList* set_index = new TypeList(base_type(type)); + set_index->Append(base_type(type)); + SetType* s = new SetType(set_index, 0); + TableVal* t = new TableVal(s); + for ( int i = 0; i < val->val.set_val.size; i++ ) { + assert( val->val.set_val.vals[i]->type == type); + t->Assign(LogValToVal( val->val.set_val.vals[i], type ), 0); + } + return t; + } + break; + } + case TYPE_ENUM: reporter->InternalError("Sorry, Enum reading does not yet work, missing internal inferface"); diff --git a/src/InputMgr.h b/src/InputMgr.h index 74914e65ad..d4bfa5c355 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -52,6 +52,8 @@ private: void SendEvent(const string& name, EnumVal* event, Val* left, Val* right); HashKey* HashLogVals(const int num_elements, const LogVal* const *vals); + int GetLogValLength(const LogVal* val); + int CopyLogVal(char *data, const int startpos, const LogVal* val); Val* LogValToVal(const LogVal* val, TypeTag request_type = TYPE_ANY); Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index f91c55b666..60a8c5685a 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -11,12 +11,22 @@ FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int position = arg_position; } +FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_set_type, int arg_position) + : name(arg_name), type(arg_type), set_type(arg_set_type) +{ + position = arg_position; +} + FieldMapping::FieldMapping(const FieldMapping& arg) - : name(arg.name), type(arg.type) + : name(arg.name), type(arg.type), set_type(arg.set_type) { position = arg.position; } +FieldMapping FieldMapping::setType() { + return FieldMapping(name, set_type, position); +} + InputReaderAscii::InputReaderAscii() { //DBG_LOG(DBG_LOGGING, "input reader initialized"); @@ -81,7 +91,7 @@ bool InputReaderAscii::ReadHeader() { const LogField* field = fields[i]; if ( field->name == s ) { // cool, found field. note position - FieldMapping f(field->name, field->type, i); + FieldMapping f(field->name, field->type, field->set_type, i); columnMap.push_back(f); wantFields++; break; // done with searching @@ -126,6 +136,132 @@ bool InputReaderAscii::GetLine(string& str) { return false; } + +LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { + + LogVal* val = new LogVal(field.type, true); + //bzero(val, sizeof(LogVal)); + + switch ( field.type ) { + case TYPE_ENUM: + case TYPE_STRING: + val->val.string_val = new string(s); + break; + + case TYPE_BOOL: + if ( s == "T" ) { + val->val.int_val = 1; + } else if ( s == "F" ) { + val->val.int_val = 0; + } else { + Error(Fmt("Invalid value for boolean: %s", s.c_str())); + return false; + } + break; + + case TYPE_INT: + val->val.int_val = atoi(s.c_str()); + break; + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + val->val.double_val = atof(s.c_str()); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + val->val.uint_val = atoi(s.c_str()); + break; + + case TYPE_SUBNET: { + int pos = s.find("/"); + string width = s.substr(pos+1); + val->val.subnet_val.width = atoi(width.c_str()); + string addr = s.substr(0, pos); + s = addr; + // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. + // Solve this some other time.... + val->val.subnet_val.net = dotted_to_addr(s.c_str()); + break; + + } + case TYPE_ADDR: { + // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. + // Solve this some other time.... + addr_type t = dotted_to_addr(s.c_str()); +#ifdef BROv6 + copy_addr(t, val->val.addr_val); +#else + copy_addr(&t, val->val.addr_val); +#endif + break; + } + + case TYPE_TABLE: { + // construct a table from entry... + // for the moment assume, that entries are split by ",". + + if ( s == "-" ) { + // empty + val->val.set_val.size = 0; + break; + } + + // how many entries do we have... + unsigned int length = 1; + for ( unsigned int i = 0; i < s.size(); i++ ) + if ( s[i] == ',') length++; + + unsigned int pos = 0; + LogVal** lvals = new LogVal* [length]; + val->val.set_val.vals = lvals; + val->val.set_val.size = length; + + istringstream splitstream(s); + while ( splitstream ) { + string element; + + if ( pos >= length ) { + Error(Fmt("Internal error while parsing set. pos %d > length %d", pos, length)); + break; + } + + if ( !getline(splitstream, element, ',') ) + break; + + + LogVal* newval = EntryToVal(element, field.setType()); + if ( newval == 0 ) { + Error("Error while reading set"); + return 0; + } + lvals[pos] = newval; + + pos++; + + } + + if ( pos != length ) { + Error("Internal error while parsing set: did not find all elements"); + return 0; + } + + break; + } + + + default: + Error(Fmt("unsupported field format %d for %s", field.type, + field.name.c_str())); + return 0; + } + + return val; + +} + // read the entire file and send appropriate thingies back to InputMgr bool InputReaderAscii::DoUpdate() { @@ -161,7 +297,6 @@ bool InputReaderAscii::DoUpdate() { // split on tabs istringstream splitstream(line); - string s; LogVal** fields = new LogVal*[num_fields]; //string string_fields[num_fields]; @@ -170,6 +305,7 @@ bool InputReaderAscii::DoUpdate() { unsigned int currField = 0; while ( splitstream ) { + string s; if ( !getline(splitstream, s, '\t') ) break; @@ -193,73 +329,10 @@ bool InputReaderAscii::DoUpdate() { return false; } - LogVal* val = new LogVal(currMapping.type, true); - //bzero(val, sizeof(LogVal)); - - switch ( currMapping.type ) { - case TYPE_ENUM: - case TYPE_STRING: - val->val.string_val = new string(s); - break; - - case TYPE_BOOL: - if ( s == "T" ) { - val->val.int_val = 1; - } else if ( s == "F" ) { - val->val.int_val = 0; - } else { - Error(Fmt("Invalid value for boolean: %s", s.c_str())); - return false; - } - break; - - case TYPE_INT: - val->val.int_val = atoi(s.c_str()); - break; - - case TYPE_DOUBLE: - case TYPE_TIME: - case TYPE_INTERVAL: - val->val.double_val = atof(s.c_str()); - break; - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - val->val.uint_val = atoi(s.c_str()); - break; - - case TYPE_SUBNET: { - int pos = s.find("/"); - string width = s.substr(pos+1); - val->val.subnet_val.width = atoi(width.c_str()); - string addr = s.substr(0, pos); - s = addr; - // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. - // Solve this some other time.... - val->val.subnet_val.net = dotted_to_addr(s.c_str()); - break; - - } - case TYPE_ADDR: { - // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. - // Solve this some other time.... - addr_type t = dotted_to_addr(s.c_str()); -#ifdef BROv6 - copy_addr(t, val->val.addr_val); -#else - copy_addr(&t, val->val.addr_val); -#endif - break; - } - - - default: - Error(Fmt("unsupported field format %d for %s", currMapping.type, - currMapping.name.c_str())); + LogVal* val = EntryToVal(s, currMapping); + if ( val == 0 ) { return false; - } - + } fields[currMapping.position] = val; //string_fields[currMapping.position] = s; diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index f86cfd0062..ab2b89339c 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -12,11 +12,15 @@ struct FieldMapping { string name; TypeTag type; + TypeTag set_type; int position; FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); + FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_set_type, int arg_position); FieldMapping(const FieldMapping& arg); FieldMapping() { position = -1; } + + FieldMapping setType(); bool IsEmpty() { return position == -1; } }; @@ -38,6 +42,7 @@ protected: private: bool ReadHeader(); + LogVal* EntryToVal(string s, FieldMapping type); bool GetLine(string& str); diff --git a/src/LogMgr.cc b/src/LogMgr.cc index f78a2a19e0..9818d9cdfb 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -81,16 +81,18 @@ struct LogMgr::Stream { bool LogField::Read(SerializationFormat* fmt) { int t; + int it; - bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type")); + bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type") && fmt->Read(&it, "set_type") ); type = (TypeTag) t; + set_type = (TypeTag) it; return success; } bool LogField::Write(SerializationFormat* fmt) const { - return (fmt->Write(name, "name") && fmt->Write((int)type, "type")); + return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)set_type, "set_type")); } LogVal::~LogVal() diff --git a/src/LogMgr.h b/src/LogMgr.h index 10530960cb..40dab8677b 100644 --- a/src/LogMgr.h +++ b/src/LogMgr.h @@ -15,10 +15,12 @@ class SerializationFormat; struct LogField { string name; TypeTag type; + // needed by input framework. otherwise it cannot determine the inner type of a set. + TypeTag set_type; LogField() { } LogField(const LogField& other) - : name(other.name), type(other.type) { } + : name(other.name), type(other.type), set_type(other.set_type) { } // (Un-)serialize. bool Read(SerializationFormat* fmt); From 3d0162bcdc2e2a3e4923ac22ec5597cb4d167304 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 15 Nov 2011 11:18:48 -0800 Subject: [PATCH 036/651] isCompatibleType works correctly for tables. --- src/InputMgr.cc | 27 +++++++++++++++++---------- src/InputMgr.h | 2 +- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index ba17d8448f..4506501c94 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -205,7 +205,8 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) return reader_obj; } -bool InputMgr::IsCompatibleType(BroType* t) + +bool InputMgr::IsCompatibleType(BroType* t, bool atomic_only) { if ( ! t ) return false; @@ -223,24 +224,29 @@ bool InputMgr::IsCompatibleType(BroType* t) case TYPE_INTERVAL: case TYPE_ENUM: case TYPE_STRING: - case TYPE_RECORD: - // for record: check, if all elements are compatible? But... LogMgr also doesn't do this. - // ^ recursive checking is done in UnrollRecordType. return true; - - case TYPE_FILE: - case TYPE_FUNC: - return false; + case TYPE_RECORD: + return ! atomic_only; case TYPE_TABLE: { - return IsCompatibleType(t->AsSetType()->Indices()->PureType()); + if ( atomic_only ) + return false; + + if ( ! t->IsSet() ) + return false; + + return IsCompatibleType(t->AsSetType()->Indices()->PureType(), true); } case TYPE_VECTOR: { return false; // do me... + + //if ( atomic_only ) + // return false; + // //return IsCompatibleType(t->AsVectorType()->YieldType()); } @@ -249,7 +255,8 @@ bool InputMgr::IsCompatibleType(BroType* t) } return false; - } +} + bool InputMgr::RemoveReader(const EnumVal* id) { ReaderInfo *i = 0; diff --git a/src/InputMgr.h b/src/InputMgr.h index d4bfa5c355..93c6447467 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -46,7 +46,7 @@ protected: private: struct ReaderInfo; - bool IsCompatibleType(BroType* t); + bool IsCompatibleType(BroType* t, bool atomic_only=false); bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); void SendEvent(const string& name, EnumVal* event, Val* left, Val* right); From fb5f26e7fcf5d3bda5c25472e2933e7ed975ddb1 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 15 Nov 2011 15:23:46 -0800 Subject: [PATCH 037/651] make default values work (thanks to robin) --- src/InputMgr.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 4506501c94..a0a9d6a35c 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -97,7 +97,7 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) return 0; } - EnumVal* reader = description->Lookup(rtype->FieldOffset("reader"))->AsEnumVal(); + EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); while ( true ) { if ( ir->type == BifEnum::Input::READER_DEFAULT ) @@ -148,7 +148,6 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) RecordType *idx = description->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); RecordType *val = description->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); TableVal *dst = description->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); - Val *want_record = description->Lookup(rtype->FieldOffset("want_record")); vector fieldsV; // vector, because we don't know the length beforehands @@ -163,9 +162,11 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) if ( status ) { reporter->Error("Problem unrolling"); + Unref(reader); return 0; } + Val *want_record = description->LookupWithDefault(rtype->FieldOffset("want_record")); LogField** fields = new LogField*[fieldsV.size()]; for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { @@ -174,7 +175,7 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) ReaderInfo* info = new ReaderInfo; info->reader = reader_obj; - info->type = reader->Ref()->AsEnumVal(); + info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault info->num_idx_fields = idxfields; info->num_val_fields = valfields; info->tab = dst->Ref()->AsTableVal(); @@ -184,6 +185,7 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) info->currDict = new PDict(InputHash); info->lastDict = new PDict(InputHash); info->want_record = ( want_record->InternalInt() == 1 ); + Unref(want_record); // ref'd by lookupwithdefault if ( valfields > 1 ) { assert(info->want_record); From 821878835a800edb794c25c32a59ef67958a87da Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 15 Nov 2011 16:32:35 -0800 Subject: [PATCH 038/651] read vector. still missing: enums, empty fields for optional parameters. --- src/InputMgr.cc | 45 +++++++++++++++++++++++++++------ src/InputReaderAscii.cc | 56 ++++++++++++++++++++++++++--------------- src/InputReaderAscii.h | 6 ++--- src/LogMgr.cc | 6 ++--- src/LogMgr.h | 6 ++--- 5 files changed, 83 insertions(+), 36 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index a0a9d6a35c..4c21268c84 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -244,12 +244,10 @@ bool InputMgr::IsCompatibleType(BroType* t, bool atomic_only) case TYPE_VECTOR: { - return false; // do me... - - //if ( atomic_only ) - // return false; - // - //return IsCompatibleType(t->AsVectorType()->YieldType()); + if ( atomic_only ) + return false; + + return IsCompatibleType(t->AsVectorType()->YieldType(), true); } default: @@ -342,7 +340,9 @@ bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec field->name = nameprepend + rec->FieldName(i); field->type = rec->FieldType(i)->Tag(); if ( field->type == TYPE_TABLE ) { - field->set_type = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); + field->subtype = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); + } else if ( field->type == TYPE_VECTOR ) { + field->subtype = rec->FieldType(i)->AsVectorType()->YieldType()->Tag(); } fields->push_back(field); @@ -870,6 +870,13 @@ int InputMgr::GetLogValLength(const LogVal* val) { break; } + case TYPE_VECTOR: { + for ( int i = 0; i < val->val.vector_val.size; i++ ) { + length += GetLogValLength(val->val.vector_val.vals[i]); + } + break; + } + default: reporter->InternalError("unsupported type %d for GetLogValLength", val->type); } @@ -936,6 +943,15 @@ int InputMgr::CopyLogVal(char *data, const int startpos, const LogVal* val) { break; } + case TYPE_VECTOR: { + int length = 0; + for ( int i = 0; i < val->val.vector_val.size; i++ ) { + length += CopyLogVal(data, startpos+length, val->val.vector_val.vals[i]); + } + return length; + break; + } + default: reporter->InternalError("unsupported type %d for CopyLogVal", val->type); return 0; @@ -1039,6 +1055,21 @@ Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { break; } + case TYPE_VECTOR: { + assert ( val->val.vector_val.size > 1 ); // implement empty vector... + + // all entries have to have the same type... + TypeTag type = val->val.vector_val.vals[0]->type; + VectorType* vt = new VectorType(base_type(type)); + VectorVal* v = new VectorVal(vt); + for ( int i = 0; i < val->val.vector_val.size; i++ ) { + assert( val->val.vector_val.vals[i]->type == type); + v->Assign(i, LogValToVal( val->val.set_val.vals[i], type ), 0); + } + return v; + + } + case TYPE_ENUM: reporter->InternalError("Sorry, Enum reading does not yet work, missing internal inferface"); diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 60a8c5685a..3b4409e652 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -11,20 +11,20 @@ FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int position = arg_position; } -FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_set_type, int arg_position) - : name(arg_name), type(arg_type), set_type(arg_set_type) +FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position) + : name(arg_name), type(arg_type), subtype(arg_subtype) { position = arg_position; } FieldMapping::FieldMapping(const FieldMapping& arg) - : name(arg.name), type(arg.type), set_type(arg.set_type) + : name(arg.name), type(arg.type), subtype(arg.subtype) { position = arg.position; } -FieldMapping FieldMapping::setType() { - return FieldMapping(name, set_type, position); +FieldMapping FieldMapping::subType() { + return FieldMapping(name, subtype, position); } InputReaderAscii::InputReaderAscii() @@ -91,7 +91,7 @@ bool InputReaderAscii::ReadHeader() { const LogField* field = fields[i]; if ( field->name == s ) { // cool, found field. note position - FieldMapping f(field->name, field->type, field->set_type, i); + FieldMapping f(field->name, field->type, field->subtype, i); columnMap.push_back(f); wantFields++; break; // done with searching @@ -112,7 +112,7 @@ bool InputReaderAscii::ReadHeader() { if ( wantFields != (int) num_fields ) { // we did not find all fields? // :( - Error("One of the requested fields could not be found in the input data file"); + Error(Fmt("One of the requested fields could not be found in the input data file. Found %d fields, wanted %d", wantFields, num_fields)); return false; } @@ -199,25 +199,40 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { break; } - case TYPE_TABLE: { - // construct a table from entry... - // for the moment assume, that entries are split by ",". - - if ( s == "-" ) { - // empty - val->val.set_val.size = 0; - break; - } - + case TYPE_TABLE: + case TYPE_VECTOR: + // First - common initialization + // Then - initialization for table. + // Then - initialization for vector. + // Then - common stuff + { // how many entries do we have... unsigned int length = 1; for ( unsigned int i = 0; i < s.size(); i++ ) if ( s[i] == ',') length++; unsigned int pos = 0; + LogVal** lvals = new LogVal* [length]; - val->val.set_val.vals = lvals; - val->val.set_val.size = length; + + if ( field.type == TYPE_TABLE ) { + // construct a table from entry... + // for the moment assume, that entries are split by ",". + + /* Fix support for emtyp tables if ( s == "-" ) { + // empty + val->val.set_val.size = 0; + break; + } */ + + val->val.set_val.vals = lvals; + val->val.set_val.size = length; + } else if ( field.type == TYPE_VECTOR ) { + val->val.vector_val.vals = lvals; + val->val.vector_val.size = length; + } else { + assert(false); + } istringstream splitstream(s); while ( splitstream ) { @@ -232,7 +247,7 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { break; - LogVal* newval = EntryToVal(element, field.setType()); + LogVal* newval = EntryToVal(element, field.subType()); if ( newval == 0 ) { Error("Error while reading set"); return 0; @@ -243,6 +258,7 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { } + if ( pos != length ) { Error("Internal error while parsing set: did not find all elements"); return 0; diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index ab2b89339c..56c1001acb 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -12,15 +12,15 @@ struct FieldMapping { string name; TypeTag type; - TypeTag set_type; + TypeTag subtype; int position; FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); - FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_set_type, int arg_position); + FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position); FieldMapping(const FieldMapping& arg); FieldMapping() { position = -1; } - FieldMapping setType(); + FieldMapping subType(); bool IsEmpty() { return position == -1; } }; diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 9818d9cdfb..6eaace3893 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -83,16 +83,16 @@ bool LogField::Read(SerializationFormat* fmt) int t; int it; - bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type") && fmt->Read(&it, "set_type") ); + bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type") && fmt->Read(&it, "subtype") ); type = (TypeTag) t; - set_type = (TypeTag) it; + subtype = (TypeTag) it; return success; } bool LogField::Write(SerializationFormat* fmt) const { - return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)set_type, "set_type")); + return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); } LogVal::~LogVal() diff --git a/src/LogMgr.h b/src/LogMgr.h index 40dab8677b..b8530d29ab 100644 --- a/src/LogMgr.h +++ b/src/LogMgr.h @@ -15,12 +15,12 @@ class SerializationFormat; struct LogField { string name; TypeTag type; - // needed by input framework. otherwise it cannot determine the inner type of a set. - TypeTag set_type; + // needed by input framework. otherwise it cannot determine the inner type of a set or vector. + TypeTag subtype; LogField() { } LogField(const LogField& other) - : name(other.name), type(other.type), set_type(other.set_type) { } + : name(other.name), type(other.type), subtype(other.subtype) { } // (Un-)serialize. bool Read(SerializationFormat* fmt); From ab68d8400789156a23b1e6bbaf57828e7eac7c83 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 16 Nov 2011 22:13:36 -0800 Subject: [PATCH 039/651] reading of enum types (thanks, Seth) --- src/InputMgr.cc | 50 +++++++++++++++++++++++++++------------------- src/InputMgr.h | 4 ++-- src/InputReader.cc | 3 ++- src/InputReader.h | 2 +- 4 files changed, 34 insertions(+), 25 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 4c21268c84..64f3e2eb9b 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -422,7 +422,7 @@ Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const Lo if ( num_fields == 1 && type->FieldType(0)->Tag() != TYPE_RECORD ) { - idxval = LogValToVal(vals[0]); + idxval = LogValToVal(vals[0], type->FieldType(0)); position = 1; } else { ListVal *l = new ListVal(TYPE_ANY); @@ -430,7 +430,7 @@ Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const Lo if ( type->FieldType(j)->Tag() == TYPE_RECORD ) { l->Append(LogValToRecordVal(vals, type->FieldType(j)->AsRecordType(), &position)); } else { - l->Append(LogValToVal(vals[position], type->FieldType(j)->Tag())); + l->Append(LogValToVal(vals[position], type->FieldType(j))); position++; } } @@ -486,7 +486,7 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { int position = i->num_idx_fields; if ( i->num_val_fields == 1 && !i->want_record ) { - valval = LogValToVal(vals[i->num_idx_fields]); + valval = LogValToVal(vals[i->num_idx_fields], i->rtype->FieldType(i->num_idx_fields)); } else { RecordVal * r = new RecordVal(i->rtype); @@ -501,7 +501,7 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { if ( i->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { val = LogValToRecordVal(vals, i->rtype->FieldType(j)->AsRecordType(), &position); } else { - val = LogValToVal(vals[position], i->rtype->FieldType(j)->Tag()); + val = LogValToVal(vals[position], i->rtype->FieldType(j)); position++; } @@ -705,7 +705,7 @@ void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { int position = i->num_idx_fields; if ( i->num_val_fields == 1 && !i->want_record ) { - valval = LogValToVal(vals[i->num_idx_fields]); + valval = LogValToVal(vals[i->num_idx_fields], i->rtype->FieldType(i->num_idx_fields)); } else { RecordVal * r = new RecordVal(i->rtype); @@ -715,7 +715,7 @@ void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { if ( i->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { val = LogValToRecordVal(vals, i->rtype->FieldType(j)->AsRecordType(), &position); } else { - val = LogValToVal(vals[position], i->rtype->FieldType(j)->Tag()); + val = LogValToVal(vals[position], i->rtype->FieldType(j)); position++; } @@ -760,7 +760,7 @@ void InputMgr::Error(InputReader* reader, const char* msg) reporter->Error("error with input reader for %s: %s", reader->Source().c_str(), msg); } - +/* Does not work atm, because LogValToVal needs BroType void InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) { EventHandler* handler = event_registry->Lookup(name.c_str()); @@ -775,7 +775,7 @@ void InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* c } mgr.Dispatch(new Event(handler, vl)); -} +} */ void InputMgr::SendEvent(const string& name, EnumVal* event, Val* left, Val* right) { @@ -814,7 +814,7 @@ Val* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_ if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) { fieldVal = LogValToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); } else { - fieldVal = LogValToVal(vals[*position], request_type->FieldType(i)->Tag()); + fieldVal = LogValToVal(vals[*position], request_type->FieldType(i)); (*position)++; } @@ -988,10 +988,10 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals } -Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { +Val* InputMgr::LogValToVal(const LogVal* val, BroType* request_type) { - if ( request_type != TYPE_ANY && request_type != val->type ) { - reporter->InternalError("Typetags don't match: %d vs %d", request_type, val->type); + if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) { + reporter->InternalError("Typetags don't match: %d vs %d", request_type->Tag(), val->type); return 0; } @@ -1041,13 +1041,12 @@ Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { return new TableVal(new SetType(set_index, 0)); } else { // all entries have to have the same type... - TypeTag type = val->val.set_val.vals[0]->type; - TypeList* set_index = new TypeList(base_type(type)); - set_index->Append(base_type(type)); + BroType* type = request_type->AsTableType()->Indices()->PureType(); + TypeList* set_index = new TypeList(type->Ref()); + set_index->Append(type->Ref()); SetType* s = new SetType(set_index, 0); TableVal* t = new TableVal(s); for ( int i = 0; i < val->val.set_val.size; i++ ) { - assert( val->val.set_val.vals[i]->type == type); t->Assign(LogValToVal( val->val.set_val.vals[i], type ), 0); } return t; @@ -1059,19 +1058,28 @@ Val* InputMgr::LogValToVal(const LogVal* val, TypeTag request_type) { assert ( val->val.vector_val.size > 1 ); // implement empty vector... // all entries have to have the same type... - TypeTag type = val->val.vector_val.vals[0]->type; - VectorType* vt = new VectorType(base_type(type)); + BroType* type = request_type->AsVectorType()->YieldType(); + VectorType* vt = new VectorType(type->Ref()); VectorVal* v = new VectorVal(vt); for ( int i = 0; i < val->val.vector_val.size; i++ ) { - assert( val->val.vector_val.vals[i]->type == type); v->Assign(i, LogValToVal( val->val.set_val.vals[i], type ), 0); } return v; } - case TYPE_ENUM: - reporter->InternalError("Sorry, Enum reading does not yet work, missing internal inferface"); + case TYPE_ENUM: { + // well, this is kind of stupid, because EnumType just mangles the module name and the var name together again... + // but well + string module = extract_module_name(val->val.string_val->c_str()); + string var = extract_var_name(val->val.string_val->c_str()); + bro_int_t index = request_type->AsEnumType()->Lookup(module, var.c_str()); + if ( index == -1 ) { + reporter->InternalError("Value not found in enum mappimg. Module: %s, var: %s", module.c_str(), var.c_str()); + } + return new EnumVal(index, request_type->Ref()->AsEnumType() ); + break; + } default: diff --git a/src/InputMgr.h b/src/InputMgr.h index 93c6447467..17b7e2e804 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -55,11 +55,11 @@ private: int GetLogValLength(const LogVal* val); int CopyLogVal(char *data, const int startpos, const LogVal* val); - Val* LogValToVal(const LogVal* val, TypeTag request_type = TYPE_ANY); + Val* LogValToVal(const LogVal* val, BroType* request_type); Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); Val* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); - void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + //void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); ReaderInfo* FindReader(const InputReader* reader); ReaderInfo* FindReader(const EnumVal* id); diff --git a/src/InputReader.cc b/src/InputReader.cc index 994f8b9b97..7403d1f989 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -64,10 +64,11 @@ bool InputReader::Update() return DoUpdate(); } +/* void InputReader::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) { input_mgr->SendEvent(name, num_vals, vals); -} +} */ // stolen from logwriter const char* InputReader::Fmt(const char* format, ...) diff --git a/src/InputReader.h b/src/InputReader.h index b547d29506..41000e4c0c 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -40,7 +40,7 @@ protected: // A thread-safe version of fmt(). (stolen from logwriter) const char* Fmt(const char* format, ...); - void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + //void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); void Put(const LogVal* const *val); void Clear(); From 4fef1e3f8c5fe7005f23f91d6001cd1d79ffcdd2 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 16 Nov 2011 22:47:28 -0800 Subject: [PATCH 040/651] set & entry separator configuration (with the restriction that they have to be exactly one character long) --- .../base/frameworks/input/readers/ascii.bro | 19 +++++++++ src/InputReaderAscii.cc | 40 +++++++++++++++++-- src/InputReaderAscii.h | 15 ++++++- src/input.bif | 9 +++++ 4 files changed, 78 insertions(+), 5 deletions(-) create mode 100644 scripts/base/frameworks/input/readers/ascii.bro diff --git a/scripts/base/frameworks/input/readers/ascii.bro b/scripts/base/frameworks/input/readers/ascii.bro new file mode 100644 index 0000000000..9f630975a2 --- /dev/null +++ b/scripts/base/frameworks/input/readers/ascii.bro @@ -0,0 +1,19 @@ +##! Interface for the ascii input reader. + +module InputAscii; + +export { + ## Separator between fields. + ## Please note that the separator has to be exactly one character long + const separator = "\t" &redef; + + ## Separator between set elements. + ## Please note that the separator has to be exactly one character long + const set_separator = "," &redef; + + ## String to use for empty fields. + const empty_field = "-" &redef; + + ## String to use for an unset &optional field. + const unset_field = "-" &redef; +} diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 3b4409e652..22cdcfdcf0 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -2,6 +2,7 @@ #include "InputReaderAscii.h" #include "DebugLogger.h" +#include "NetVar.h" #include @@ -29,15 +30,46 @@ FieldMapping FieldMapping::subType() { InputReaderAscii::InputReaderAscii() { - //DBG_LOG(DBG_LOGGING, "input reader initialized"); file = 0; //keyMap = new map(); + + separator_len = BifConst::LogAscii::separator->Len(); + separator = new char[separator_len]; + memcpy(separator, BifConst::LogAscii::separator->Bytes(), + separator_len); + if ( separator_len != 1 ) { + Error("separator length has to be 1. Separator will be truncated."); + } + + set_separator_len = BifConst::LogAscii::set_separator->Len(); + set_separator = new char[set_separator_len]; + memcpy(set_separator, BifConst::LogAscii::set_separator->Bytes(), + set_separator_len); + if ( set_separator_len != 1 ) { + Error("set_separator length has to be 1. Separator will be truncated."); + } + + empty_field_len = BifConst::LogAscii::empty_field->Len(); + empty_field = new char[empty_field_len]; + memcpy(empty_field, BifConst::LogAscii::empty_field->Bytes(), + empty_field_len); + + unset_field_len = BifConst::LogAscii::unset_field->Len(); + unset_field = new char[unset_field_len]; + memcpy(unset_field, BifConst::LogAscii::unset_field->Bytes(), + unset_field_len); + } InputReaderAscii::~InputReaderAscii() { DoFinish(); + + delete [] separator; + delete [] set_separator; + delete [] empty_field; + delete [] unset_field; } void InputReaderAscii::DoFinish() @@ -83,7 +115,7 @@ bool InputReaderAscii::ReadHeader() { int wantFields = 0; while ( splitstream ) { string s; - if ( !getline(splitstream, s, '\t')) + if ( !getline(splitstream, s, separator[0])) break; // current found heading in s... compare if we want it @@ -243,7 +275,7 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { break; } - if ( !getline(splitstream, element, ',') ) + if ( !getline(splitstream, element, set_separator[0]) ) break; @@ -322,7 +354,7 @@ bool InputReaderAscii::DoUpdate() { while ( splitstream ) { string s; - if ( !getline(splitstream, s, '\t') ) + if ( !getline(splitstream, s, separator[0]) ) break; diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index 56c1001acb..d69b8c04bc 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -57,7 +57,20 @@ private: const LogField* const * fields; // raw mapping //map *keyMap; - + // + // Options set from the script-level. + char* separator; + int separator_len; + + char* set_separator; + int set_separator_len; + + char* empty_field; + int empty_field_len; + + char* unset_field; + int unset_field_len; + }; diff --git a/src/input.bif b/src/input.bif index 7b051fba16..aaef25dcc3 100644 --- a/src/input.bif +++ b/src/input.bif @@ -52,3 +52,12 @@ function Input::__remove_filter%(id: Log::ID, name: string%) : bool return new Val( res, TYPE_BOOL); %} +# Options for Ascii Reader + +module InputAscii; + +const separator: string; +const set_separator: string; +const empty_field: string; +const unset_field: string; + From 4dd95fcf3cf1efc83ec4d2c53fee765d2a800674 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 16 Nov 2011 23:51:51 -0800 Subject: [PATCH 041/651] support for uninitialized fields & empty sets and tables. The only snag is... with the default output format of the log-file writer, the input reader cannot tell if a table or set is empty or uninitialized (both cases use the same character by default). In this case, by default it is assumed that the field/vector is uninitalized. --- scripts/base/frameworks/input/__load__.bro | 2 + src/InputMgr.cc | 61 ++++++++++------------ src/InputReaderAscii.cc | 61 ++++++++-------------- src/InputReaderAscii.h | 12 ++--- 4 files changed, 57 insertions(+), 79 deletions(-) diff --git a/scripts/base/frameworks/input/__load__.bro b/scripts/base/frameworks/input/__load__.bro index a10fe855df..a3315186d5 100644 --- a/scripts/base/frameworks/input/__load__.bro +++ b/scripts/base/frameworks/input/__load__.bro @@ -1 +1,3 @@ @load ./main +@load ./readers/ascii + diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 64f3e2eb9b..9c0e9c12b5 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -505,10 +505,10 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { position++; } - if ( val == 0 ) { + /* if ( val == 0 ) { reporter->InternalError("conversion error"); return; - } + } */ r->Assign(j,val); @@ -871,7 +871,9 @@ int InputMgr::GetLogValLength(const LogVal* val) { } case TYPE_VECTOR: { - for ( int i = 0; i < val->val.vector_val.size; i++ ) { + int j = val->val.vector_val.size; + for ( int i = 0; i < j; i++ ) { + reporter->Error("size is %d", val->val.vector_val.size); length += GetLogValLength(val->val.vector_val.vals[i]); } break; @@ -945,7 +947,8 @@ int InputMgr::CopyLogVal(char *data, const int startpos, const LogVal* val) { case TYPE_VECTOR: { int length = 0; - for ( int i = 0; i < val->val.vector_val.size; i++ ) { + int j = val->val.vector_val.size; + for ( int i = 0; i < j; i++ ) { length += CopyLogVal(data, startpos+length, val->val.vector_val.vals[i]); } return length; @@ -994,6 +997,10 @@ Val* InputMgr::LogValToVal(const LogVal* val, BroType* request_type) { reporter->InternalError("Typetags don't match: %d vs %d", request_type->Tag(), val->type); return 0; } + + if ( !val->present ) { + return 0; // unset field + } switch ( val->type ) { @@ -1033,38 +1040,28 @@ Val* InputMgr::LogValToVal(const LogVal* val, BroType* request_type) { break; case TYPE_TABLE: { - if ( val->val.set_val.size == 0 ) { - // empty table - TypeList* set_index = new TypeList(base_type(TYPE_ANY)); - // iim quite sure this does not work... we probably need the internal set type for this... - reporter->InternalError("Implement me."); - return new TableVal(new SetType(set_index, 0)); - } else { - // all entries have to have the same type... - BroType* type = request_type->AsTableType()->Indices()->PureType(); - TypeList* set_index = new TypeList(type->Ref()); - set_index->Append(type->Ref()); - SetType* s = new SetType(set_index, 0); - TableVal* t = new TableVal(s); - for ( int i = 0; i < val->val.set_val.size; i++ ) { - t->Assign(LogValToVal( val->val.set_val.vals[i], type ), 0); - } - return t; - } + // all entries have to have the same type... + BroType* type = request_type->AsTableType()->Indices()->PureType(); + TypeList* set_index = new TypeList(type->Ref()); + set_index->Append(type->Ref()); + SetType* s = new SetType(set_index, 0); + TableVal* t = new TableVal(s); + for ( int i = 0; i < val->val.set_val.size; i++ ) { + t->Assign(LogValToVal( val->val.set_val.vals[i], type ), 0); + } + return t; break; } case TYPE_VECTOR: { - assert ( val->val.vector_val.size > 1 ); // implement empty vector... - - // all entries have to have the same type... - BroType* type = request_type->AsVectorType()->YieldType(); - VectorType* vt = new VectorType(type->Ref()); - VectorVal* v = new VectorVal(vt); - for ( int i = 0; i < val->val.vector_val.size; i++ ) { - v->Assign(i, LogValToVal( val->val.set_val.vals[i], type ), 0); - } - return v; + // all entries have to have the same type... + BroType* type = request_type->AsVectorType()->YieldType(); + VectorType* vt = new VectorType(type->Ref()); + VectorVal* v = new VectorVal(vt); + for ( int i = 0; i < val->val.vector_val.size; i++ ) { + v->Assign(i, LogValToVal( val->val.set_val.vals[i], type ), 0); + } + return v; } diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 22cdcfdcf0..4a0d4157bc 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -34,31 +34,19 @@ InputReaderAscii::InputReaderAscii() //keyMap = new map(); - separator_len = BifConst::LogAscii::separator->Len(); - separator = new char[separator_len]; - memcpy(separator, BifConst::LogAscii::separator->Bytes(), - separator_len); - if ( separator_len != 1 ) { + separator.assign( (const char*) BifConst::InputAscii::separator->Bytes(), BifConst::InputAscii::separator->Len()); + if ( separator.size() != 1 ) { Error("separator length has to be 1. Separator will be truncated."); } - set_separator_len = BifConst::LogAscii::set_separator->Len(); - set_separator = new char[set_separator_len]; - memcpy(set_separator, BifConst::LogAscii::set_separator->Bytes(), - set_separator_len); - if ( set_separator_len != 1 ) { + set_separator.assign( (const char*) BifConst::InputAscii::set_separator->Bytes(), BifConst::InputAscii::set_separator->Len()); + if ( set_separator.size() != 1 ) { Error("set_separator length has to be 1. Separator will be truncated."); } - empty_field_len = BifConst::LogAscii::empty_field->Len(); - empty_field = new char[empty_field_len]; - memcpy(empty_field, BifConst::LogAscii::empty_field->Bytes(), - empty_field_len); - - unset_field_len = BifConst::LogAscii::unset_field->Len(); - unset_field = new char[unset_field_len]; - memcpy(unset_field, BifConst::LogAscii::unset_field->Bytes(), - unset_field_len); + empty_field.assign( (const char*) BifConst::InputAscii::empty_field->Bytes(), BifConst::InputAscii::empty_field->Len()); + + unset_field.assign( (const char*) BifConst::InputAscii::unset_field->Bytes(), BifConst::InputAscii::unset_field->Len()); } @@ -66,10 +54,6 @@ InputReaderAscii::~InputReaderAscii() { DoFinish(); - delete [] separator; - delete [] set_separator; - delete [] empty_field; - delete [] unset_field; } void InputReaderAscii::DoFinish() @@ -172,7 +156,10 @@ bool InputReaderAscii::GetLine(string& str) { LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { LogVal* val = new LogVal(field.type, true); - //bzero(val, sizeof(LogVal)); + + if ( s.compare(unset_field) == 0 ) { // field is not set... + return new LogVal(field.type, false); + } switch ( field.type ) { case TYPE_ENUM: @@ -244,19 +231,13 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { if ( s[i] == ',') length++; unsigned int pos = 0; + + if ( s.compare(empty_field) == 0 ) + length = 0; LogVal** lvals = new LogVal* [length]; if ( field.type == TYPE_TABLE ) { - // construct a table from entry... - // for the moment assume, that entries are split by ",". - - /* Fix support for emtyp tables if ( s == "-" ) { - // empty - val->val.set_val.size = 0; - break; - } */ - val->val.set_val.vals = lvals; val->val.set_val.size = length; } else if ( field.type == TYPE_VECTOR ) { @@ -266,18 +247,20 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { assert(false); } + if ( length == 0 ) + break; //empty + istringstream splitstream(s); while ( splitstream ) { string element; - if ( pos >= length ) { - Error(Fmt("Internal error while parsing set. pos %d > length %d", pos, length)); - break; - } - if ( !getline(splitstream, element, set_separator[0]) ) break; - + + if ( pos >= length ) { + Error(Fmt("Internal error while parsing set. pos %d >= length %d. Element: %s", pos, length, element.c_str())); + break; + } LogVal* newval = EntryToVal(element, field.subType()); if ( newval == 0 ) { diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index d69b8c04bc..c848c17110 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -59,17 +59,13 @@ private: //map *keyMap; // // Options set from the script-level. - char* separator; - int separator_len; + string separator; - char* set_separator; - int set_separator_len; + string set_separator; - char* empty_field; - int empty_field_len; + string empty_field; - char* unset_field; - int unset_field_len; + string unset_field; }; From e2c521fc4e6ef9eb66eb6cd431f9186468b14b70 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 18 Nov 2011 10:49:20 -0800 Subject: [PATCH 042/651] start reworking input framework... does not compile at the moment, but there are a few uncommitted changes that will be reverted in the next commit. --- scripts/base/frameworks/input/main.bro | 46 ++--- src/InputMgr.cc | 263 ++++++++++++------------- src/InputMgr.h | 18 +- src/InputReader.cc | 39 ++-- src/InputReader.h | 37 ++-- src/input.bif | 22 +-- 6 files changed, 208 insertions(+), 217 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 2b87ac980c..d9c0812498 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -4,30 +4,36 @@ module Input; export { const default_reader = READER_ASCII &redef; - type ReaderDescription: record { + type StreamDescription: record { source: string; - idx: any; - val: any; - destination: any; - want_record: bool &default=T; reader: Reader &default=default_reader; }; type Filter: record { - name: string; ## descriptive name. for later removal + name: string; + ## for tables + idx: any &optional; + val: any &optional; + destination: any &optional; + want_record: bool &default=T; + table_ev: any &optional; # event containing idx, val as values. + + ## decision function, that decides if an insertion, update or removal should really be executed. + ## or events should be thought pred: function(typ: Input::Event, left: any, right: any): bool &optional; - ## decision function, that decides if an inserton, update or removal should really be executed + + ## for "normalized" events + ev: any &optional; + ev_description: any &optional; }; const no_filter: Filter = [$name=""]; # Sentinel. - global create_reader: function(id: Log::ID, description: Input::ReaderDescription) : bool; - global remove_reader: function(id: Log::ID) : bool; + global create_stream: function(id: Log::ID, description: Input::ReaderDescription) : bool; + global remove_stream: function(id: Log::ID) : bool; global force_update: function(id: Log::ID) : bool; - global add_event: function(id: Log::ID, name: string) : bool; - global remove_event: function(id: Log::ID, name: string) : bool; global add_filter: function(id: Log::ID, filter: Input::Filter) : bool; global remove_filter: function(id: Log::ID, name: string) : bool; global get_filter: function(id: ID, name: string) : Filter; @@ -41,14 +47,14 @@ module Input; global filters: table[ID, string] of Filter; -function create_reader(id: Log::ID, description: Input::ReaderDescription) : bool +function create_stream(id: Log::ID, description: Input::ReaderDescription) : bool { - return __create_reader(id, description); + return __create_stream(id, description); } -function remove_reader(id: Log::ID) : bool +function remove_stream(id: Log::ID) : bool { - return __remove_reader(id); + return __remove_stream(id); } function force_update(id: Log::ID) : bool @@ -56,16 +62,6 @@ function force_update(id: Log::ID) : bool return __force_update(id); } -function add_event(id: Log::ID, name: string) : bool - { - return __add_event(id, name); - } - -function remove_event(id: Log::ID, name: string) : bool - { - return __remove_event(id, name); - } - function add_filter(id: Log::ID, filter: Input::Filter) : bool { filters[id, filter$name] = filter; diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 9c0e9c12b5..f9250f6f0f 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -27,22 +27,11 @@ declare(PDict, InputHash); struct InputMgr::Filter { EnumVal* id; string name; - Func* pred; - ~Filter(); -}; - -InputMgr::Filter::~Filter() { - Unref(id); -} - -struct InputMgr::ReaderInfo { - EnumVal* id; - EnumVal* type; - InputReader* reader; unsigned int num_idx_fields; unsigned int num_val_fields; bool want_record; + EventHandlerPtr table_event; TableVal* tab; RecordType* rtype; @@ -50,18 +39,42 @@ struct InputMgr::ReaderInfo { PDict(InputHash)* currDict; PDict(InputHash)* lastDict; - - list events; // events we fire when "something" happens - list filters; // filters that can prevent our actions + + Func* pred; + + EventHandlerPtr event; + RecordType* event_type; + + ~Filter(); +}; + +InputMgr::Filter::~Filter() { + Unref(id); + if ( tab ) + Unref(tab); + if ( itype ) + Unref(itype); + if ( rtype ) + Unref(rtype); + if ( event_type) + Unref(event_type); +} + +struct InputMgr::ReaderInfo { + EnumVal* id; + EnumVal* type; + InputReader* reader; + + //list events; // events we fire when "something" happens + map filters; // filters that can prevent our actions ~ReaderInfo(); }; InputMgr::ReaderInfo::~ReaderInfo() { + // all the contents of filters should delete themselves automatically... + Unref(type); - Unref(tab); - Unref(itype); - Unref(rtype); Unref(id); delete(reader); @@ -86,14 +99,14 @@ InputMgr::InputMgr() } // create a new input reader object to be used at whomevers leisure lateron. -InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) +InputReader* InputMgr::CreateStream(EnumVal* id, RecordVal* description) { InputReaderDefinition* ir = input_readers; RecordType* rtype = description->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::ReaderDescription, 0) ) + if ( ! same_type(rtype, BifType::Record::Input::StreamDescription, 0) ) { - reporter->Error("readerDescription argument not of right type"); + reporter->Error("Streamdescription argument not of right type"); return 0; } @@ -145,55 +158,15 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) const BroString* bsource = description->Lookup(rtype->FieldOffset("source"))->AsString(); string source((const char*) bsource->Bytes(), bsource->Len()); - RecordType *idx = description->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); - RecordType *val = description->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); - TableVal *dst = description->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); - - - vector fieldsV; // vector, because we don't know the length beforehands - - - bool status = !UnrollRecordType(&fieldsV, idx, ""); - - int idxfields = fieldsV.size(); - - status = status || !UnrollRecordType(&fieldsV, val, ""); - int valfields = fieldsV.size() - idxfields; - - if ( status ) { - reporter->Error("Problem unrolling"); - Unref(reader); - return 0; - } - - Val *want_record = description->LookupWithDefault(rtype->FieldOffset("want_record")); - - LogField** fields = new LogField*[fieldsV.size()]; - for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { - fields[i] = fieldsV[i]; - } ReaderInfo* info = new ReaderInfo; info->reader = reader_obj; info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault - info->num_idx_fields = idxfields; - info->num_val_fields = valfields; - info->tab = dst->Ref()->AsTableVal(); - info->rtype = val->Ref()->AsRecordType(); info->id = id->Ref()->AsEnumVal(); - info->itype = idx->Ref()->AsRecordType(); - info->currDict = new PDict(InputHash); - info->lastDict = new PDict(InputHash); - info->want_record = ( want_record->InternalInt() == 1 ); - Unref(want_record); // ref'd by lookupwithdefault - - if ( valfields > 1 ) { - assert(info->want_record); - } readers.push_back(info); - int success = reader_obj->Init(source, fieldsV.size(), idxfields, fields); + int success = reader_obj->Init(source); if ( success == false ) { assert( RemoveReader(id) ); return 0; @@ -208,6 +181,86 @@ InputReader* InputMgr::CreateReader(EnumVal* id, RecordVal* description) } +bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->Error("Stream not found"); + return false; + } + + RecordType* rtype = fval->Type()->AsRecordType(); + if ( ! same_type(rtype, BifType::Record::Input::Filter, 0) ) + { + reporter->Error("filter argument not of right type"); + return false; + } + + + Val* name = fval->Lookup(rtype->FieldOffset("name")); + Val* pred = fval->Lookup(rtype->FieldOffset("pred")); + + RecordType *idx = fval->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); + RecordType *val = fval->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); + TableVal *dst = fval->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); + + vector fieldsV; // vector, because we don't know the length beforehands + + bool status = !UnrollRecordType(&fieldsV, idx, ""); + + int idxfields = fieldsV.size(); + + status = status || !UnrollRecordType(&fieldsV, val, ""); + int valfields = fieldsV.size() - idxfields; + + if ( status ) { + reporter->Error("Problem unrolling"); + return false; + } + + Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); + + LogField** fields = new LogField*[fieldsV.size()]; + for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { + fields[i] = fieldsV[i]; + } + + Filter filter; + filter.name = name->AsString()->CheckString(); + filter.id = id->Ref()->AsEnumVal(); + filter.pred = pred ? pred->AsFunc() : 0; + filter.num_idx_fields = idxfields; + filter.num_val_fields = valfields; + filter.tab = dst ? dst->Ref()->AsTableVal() : 0; + filter.rtype = rtype ? val->Ref()->AsRecordType() : 0; + filter.itype = itype ? idx->Ref()->AsRecordType() : 0; + // ya - well - we actually don't need them in every case... well, a few bytes of memory wasted + filter.currDict = new PDict(InputHash); + filter.lastDict = new PDict(InputHash); + filter.want_record = ( want_record->InternalInt() == 1 ); + Unref(want_record); // ref'd by lookupwithdefault + + if ( valfields > 1 ) { + assert(info->want_record); + } + + i->filters[id->InternalInt()] = filter; + + // ok, now we have to alert the reader of our new filter with our funky new fields + // the id is handled in a ... well, to be honest, a little bit sneaky manner. + // the "problem" is, that we can have several filters in the reader for one filter in the log manager. + // this is due to the fact, that a filter can either output it's result as a table, as an event... + // ...or as an table _and_ an event. And... if we have a table and an event, we actually need two different sets + // of filters in the reader, because the fields for the table and the event may differ and I absolutely do not want + // to build a union of these values and figure it out later. + // hence -> filter id is multiplicated with 2. + // filterId*2 -> results for table + // filterId*2+1 -> results for event + i->AddFilter( id->InternalInt() * 2, fieldsV.size(), idxfields, fields ); + + return true; +} + + bool InputMgr::IsCompatibleType(BroType* t, bool atomic_only) { if ( ! t ) @@ -258,7 +311,7 @@ bool InputMgr::IsCompatibleType(BroType* t, bool atomic_only) } -bool InputMgr::RemoveReader(const EnumVal* id) { +bool InputMgr::RemoveStream(const EnumVal* id) { ReaderInfo *i = 0; for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) { @@ -281,42 +334,6 @@ bool InputMgr::RemoveReader(const EnumVal* id) { return true; } -bool InputMgr::RegisterEvent(const EnumVal* id, string eventName) { - ReaderInfo *i = FindReader(id); - if ( i == 0 ) { - reporter->InternalError("Reader not found"); - return false; - } - - i->events.push_back(eventName); - - return true; -} - -// remove first event with name eventName -// (though there shouldn't really be several events with the same name... -bool InputMgr::UnregisterEvent(const EnumVal* id, string eventName) { - ReaderInfo *i = FindReader(id); - if ( i == 0 ) { - reporter->InternalError("Reader not found"); - return false; - } - - std::list::iterator it = i->events.begin(); - while ( it != i->events.end() ) - { - if ( *it == eventName ) { - it = i->events.erase(it); - return true; - } - else - ++it; - } - - return false; -} - - bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { for ( int i = 0; i < rec->NumFields(); i++ ) { @@ -363,34 +380,6 @@ bool InputMgr::ForceUpdate(const EnumVal* id) return i->reader->Update(); } -bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { - ReaderInfo *i = FindReader(id); - if ( i == 0 ) { - reporter->Error("Reader not found"); - return false; - } - - RecordType* rtype = fval->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::Filter, 0) ) - { - reporter->Error("filter argument not of right type"); - return false; - } - - - Val* name = fval->Lookup(rtype->FieldOffset("name")); - Val* pred = fval->Lookup(rtype->FieldOffset("pred")); - - Filter filter; - filter.name = name->AsString()->CheckString(); - filter.id = id->Ref()->AsEnumVal(); - filter.pred = pred ? pred->AsFunc() : 0; - - i->filters.push_back(filter); - - return true; -} - bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { @@ -398,7 +387,7 @@ bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { return false; } - +/* std::list::iterator it = i->filters.begin(); while ( it != i->filters.end() ) { @@ -410,8 +399,15 @@ bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { else ++it; } + */ - return false;; + map::iterator it = i->filters.find(id->InternalInt()); + if ( it == i->filters.end() ) { + return false; + } + + it->filters.erase(it); + return true; } @@ -444,7 +440,7 @@ Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const Lo } -void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { +void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -605,7 +601,7 @@ void InputMgr::SendEntry(const InputReader* reader, const LogVal* const *vals) { } -void InputMgr::EndCurrentSend(const InputReader* reader) { +void InputMgr::EndCurrentSend(const InputReader* reader, int id) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -693,7 +689,7 @@ void InputMgr::EndCurrentSend(const InputReader* reader) { i->currDict = new PDict(InputHash); } -void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { +void InputMgr::Put(const InputReader* reader, int id, const LogVal* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -733,7 +729,7 @@ void InputMgr::Put(const InputReader* reader, const LogVal* const *vals) { i->tab->Assign(idxval, valval); } -void InputMgr::Clear(const InputReader* reader) { +void InputMgr::Clear(const InputReader* reader, int id) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -873,7 +869,6 @@ int InputMgr::GetLogValLength(const LogVal* val) { case TYPE_VECTOR: { int j = val->val.vector_val.size; for ( int i = 0; i < j; i++ ) { - reporter->Error("size is %d", val->val.vector_val.size); length += GetLogValLength(val->val.vector_val.vals[i]); } break; diff --git a/src/InputMgr.h b/src/InputMgr.h index 17b7e2e804..1cacf89143 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -20,11 +20,9 @@ class InputMgr { public: InputMgr(); - InputReader* CreateReader(EnumVal* id, RecordVal* description); + InputReader* CreateStream(EnumVal* id, RecordVal* description); bool ForceUpdate(const EnumVal* id); - bool RemoveReader(const EnumVal* id); - bool RegisterEvent(const EnumVal* id, string eventName); - bool UnregisterEvent(const EnumVal* id, string eventName); + bool RemoveStream(const EnumVal* id); bool AddFilter(EnumVal *id, RecordVal* filter); bool RemoveFilter(EnumVal* id, const string &name); @@ -36,12 +34,14 @@ protected: // Reports an error for the given reader. void Error(InputReader* reader, const char* msg); - void Put(const InputReader* reader, const LogVal* const *vals); - void Clear(const InputReader* reader); - bool Delete(const InputReader* reader, const LogVal* const *vals); + // for readers to write to input stream in direct mode (reporting new/deleted values directly) + void Put(const InputReader* reader, int id. const LogVal* const *vals); + void Clear(const InputReader* reader, int id); + bool Delete(const InputReader* reader, int id, const LogVal* const *vals); - void SendEntry(const InputReader* reader, const LogVal* const *vals); - void EndCurrentSend(const InputReader* reader); + // for readers to write to input stream in indirect mode (manager is monitoring new/deleted values) + void SendEntry(const InputReader* reader, int id, const LogVal* const *vals); + void EndCurrentSend(const InputReader* reader, int id); private: struct ReaderInfo; diff --git a/src/InputReader.cc b/src/InputReader.cc index 7403d1f989..1008cf1b67 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -24,35 +24,42 @@ void InputReader::Error(const string &msg) input_mgr->Error(this, msg.c_str()); } -void InputReader::Put(const LogVal* const *val) +void InputReader::Put(int id, const LogVal* const *val) { - input_mgr->Put(this, val); + input_mgr->Put(this, int id, val); } -void InputReader::Clear() +void InputReader::Clear(int id) { - input_mgr->Clear(this); + input_mgr->Clear(this, int id); } -void InputReader::Delete(const LogVal* const *val) +void InputReader::Delete(int id, const LogVal* const *val) { - input_mgr->Delete(this, val); + input_mgr->Delete(this, int id, val); } -bool InputReader::Init(string arg_source, int arg_num_fields, int arg_idx_fields, - const LogField* const * arg_fields) +bool InputReader::Init(string arg_source) { source = arg_source; - num_fields = arg_num_fields; - index_fields = arg_idx_fields; - fields = arg_fields; // disable if DoInit returns error. - disabled = !DoInit(arg_source, arg_num_fields, arg_idx_fields, arg_fields); + disabled = !DoInit(arg_source); return !disabled; } +bool InputReader::AddFilter(int id, int arg_num_fields, + const LogField* const * arg_fields) +{ + return DoAddFilter(int id, arg_num_fields, arg_fields); +} + +bool InputReader::RemoveFilter(int id) +{ + return DoRemoveFilter(int id); +} + void InputReader::Finish() { DoFinish(); @@ -96,12 +103,12 @@ const char* InputReader::Fmt(const char* format, ...) } -void InputReader::SendEntry(const LogVal* const *vals) +void InputReader::SendEntry(int id, const LogVal* const *vals) { - input_mgr->SendEntry(this, vals); + input_mgr->SendEntry(this, int id, vals); } -void InputReader::EndCurrentSend() +void InputReader::EndCurrentSend(int id) { - input_mgr->EndCurrentSend(this); + input_mgr->EndCurrentSend(this, int id); } diff --git a/src/InputReader.h b/src/InputReader.h index 41000e4c0c..12f0bc9db4 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -15,7 +15,11 @@ public: InputReader(); virtual ~InputReader(); - bool Init(string arg_source, int arg_num_fields, int arg_idx_fields, const LogField* const* fields); + bool Init(string arg_source); + + bool AddFilter( int id, int arg_num_fields, const LogField* const* fields ); + + bool RemoveFilter ( int id ); void Finish(); @@ -23,8 +27,11 @@ public: protected: // Methods that have to be overwritten by the individual readers - virtual bool DoInit(string arg_source, int arg_num_fields, int arg_idx_fields, const LogField* const * fields) = 0; - + virtual bool DoInit(string arg_sources) = 0; + + virtual bool DoAddFilter( int id, int arg_num_fields, const LogField* const* fields ) = 0; + virtual bool DoRemoveFilter( int id ); + virtual void DoFinish() = 0; // update file contents to logmgr @@ -42,28 +49,26 @@ protected: //void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); - void Put(const LogVal* const *val); - void Clear(); - void Delete(const LogVal* const *val); + // Content-sendinf-functions (simple mode). Including table-specific stuff that simply is not used if we have no table + void Put(int id, const LogVal* const *val); + void Delete(int id, const LogVal* const *val); + void Clear(int id); - void SendEntry(const LogVal* const *vals); - void EndCurrentSend(); + // Table-functions (tracking mode): Only changed lines are propagated. + void SendEntry(int id, const LogVal* const *vals); + void EndCurrentSend(int id); private: friend class InputMgr; string source; - int num_fields; - int index_fields; - const LogField* const * fields; - // When an error occurs, this method is called to set a flag marking the - // writer as disabled. + // When an error occurs, this method is called to set a flag marking the + // writer as disabled. - bool disabled; - - bool Disabled() { return disabled; } + bool disabled; + bool Disabled() { return disabled; } // For implementing Fmt(). char* buf; diff --git a/src/input.bif b/src/input.bif index aaef25dcc3..ef069316ab 100644 --- a/src/input.bif +++ b/src/input.bif @@ -7,18 +7,18 @@ module Input; #include "NetVar.h" %%} -type ReaderDescription: record; +type StreamDescription: record; type Filter: record; -function Input::__create_reader%(id: Log::ID, description: Input::ReaderDescription%) : bool +function Input::__create_stream%(id: Log::ID, description: Input::StreamDescription%) : bool %{ - InputReader *the_reader = input_mgr->CreateReader(id->AsEnumVal(), description->AsRecordVal()); + InputReader *the_reader = input_mgr->CreateStream(id->AsEnumVal(), description->AsRecordVal()); return new Val( the_reader != 0, TYPE_BOOL ); %} -function Input::__remove_reader%(id: Log::ID%) : bool +function Input::__remove_stream%(id: Log::ID%) : bool %{ - bool res = input_mgr->RemoveReader(id->AsEnumVal()); + bool res = input_mgr->RemoveStream(id->AsEnumVal()); return new Val( res, TYPE_BOOL ); %} @@ -28,18 +28,6 @@ function Input::__force_update%(id: Log::ID%) : bool return new Val( res, TYPE_BOOL ); %} -function Input::__add_event%(id: Log::ID, name: string%) : bool - %{ - bool res = input_mgr->RegisterEvent(id->AsEnumVal(), name->AsString()->CheckString()); - return new Val( res, TYPE_BOOL ); - %} - -function Input::__remove_event%(id: Log::ID, name: string%) : bool - %{ - bool res = input_mgr->UnregisterEvent(id->AsEnumVal(), name->AsString()->CheckString()); - return new Val( res, TYPE_BOOL ); - %} - function Input::__add_filter%(id: Log::ID, filter: Input::Filter%) : bool %{ bool res = input_mgr->AddFilter(id->AsEnumVal(), filter->AsRecordVal()); From b3f01915fbca99ddda434ae791b143150d27fcf7 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 20 Nov 2011 12:07:50 -0800 Subject: [PATCH 043/651] compiles with basic new filter framework - but crashes on use. --- scripts/base/frameworks/input/main.bro | 16 +- src/InputMgr.cc | 255 +++++++++++------------- src/InputMgr.h | 2 +- src/InputReader.cc | 14 +- src/InputReader.h | 3 +- src/InputReaderAscii.cc | 265 ++++++++++++------------- src/InputReaderAscii.h | 30 ++- 7 files changed, 283 insertions(+), 302 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index d9c0812498..9d83d73ec6 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -14,9 +14,9 @@ export { name: string; ## for tables - idx: any &optional; - val: any &optional; - destination: any &optional; + idx: any; + val: any; + destination: any; want_record: bool &default=T; table_ev: any &optional; # event containing idx, val as values. @@ -25,13 +25,13 @@ export { pred: function(typ: Input::Event, left: any, right: any): bool &optional; ## for "normalized" events - ev: any &optional; - ev_description: any &optional; + # ev: any &optional; + # ev_description: any &optional; }; - const no_filter: Filter = [$name=""]; # Sentinel. + const no_filter: Filter = [$name="", $idx="", $val="", $destination=""]; # Sentinel. - global create_stream: function(id: Log::ID, description: Input::ReaderDescription) : bool; + global create_stream: function(id: Log::ID, description: Input::StreamDescription) : bool; global remove_stream: function(id: Log::ID) : bool; global force_update: function(id: Log::ID) : bool; global add_filter: function(id: Log::ID, filter: Input::Filter) : bool; @@ -47,7 +47,7 @@ module Input; global filters: table[ID, string] of Filter; -function create_stream(id: Log::ID, description: Input::ReaderDescription) : bool +function create_stream(id: Log::ID, description: Input::StreamDescription) : bool { return __create_stream(id, description); } diff --git a/src/InputMgr.cc b/src/InputMgr.cc index f9250f6f0f..403d656140 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -16,18 +16,20 @@ #include "CompHash.h" -class InputHash { -public: +struct InputHash { HashKey* valhash; HashKey* idxkey; // does not need ref or whatever - if it is present here, it is also still present in the TableVal. }; declare(PDict, InputHash); -struct InputMgr::Filter { +class InputMgr::Filter { +public: EnumVal* id; string name; + //int filter_type; // to distinguish between event and table filters + unsigned int num_idx_fields; unsigned int num_val_fields; bool want_record; @@ -68,6 +70,8 @@ struct InputMgr::ReaderInfo { //list events; // events we fire when "something" happens map filters; // filters that can prevent our actions + bool HasFilter(int id); + ~ReaderInfo(); }; @@ -80,6 +84,15 @@ InputMgr::ReaderInfo::~ReaderInfo() { delete(reader); } +bool InputMgr::ReaderInfo::HasFilter(int id) { + map::iterator it = filters.find(id); + if ( it == filters.end() ) { + return false; + } + return true; +} + + struct InputReaderDefinition { bro_int_t type; // the type const char *name; // descriptive name for error messages @@ -168,12 +181,12 @@ InputReader* InputMgr::CreateStream(EnumVal* id, RecordVal* description) int success = reader_obj->Init(source); if ( success == false ) { - assert( RemoveReader(id) ); + assert( RemoveStream(id) ); return 0; } success = reader_obj->Update(); if ( success == false ) { - assert ( RemoveReader(id) ); + assert ( RemoveStream(id) ); return 0; } @@ -224,6 +237,7 @@ bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { fields[i] = fieldsV[i]; } + // FIXME: remove those funky 0-tests again as the idea was changed. Filter filter; filter.name = name->AsString()->CheckString(); filter.id = id->Ref()->AsEnumVal(); @@ -231,8 +245,8 @@ bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { filter.num_idx_fields = idxfields; filter.num_val_fields = valfields; filter.tab = dst ? dst->Ref()->AsTableVal() : 0; - filter.rtype = rtype ? val->Ref()->AsRecordType() : 0; - filter.itype = itype ? idx->Ref()->AsRecordType() : 0; + filter.rtype = val ? val->Ref()->AsRecordType() : 0; + filter.itype = idx ? idx->Ref()->AsRecordType() : 0; // ya - well - we actually don't need them in every case... well, a few bytes of memory wasted filter.currDict = new PDict(InputHash); filter.lastDict = new PDict(InputHash); @@ -240,22 +254,11 @@ bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { Unref(want_record); // ref'd by lookupwithdefault if ( valfields > 1 ) { - assert(info->want_record); + assert(filter.want_record); } i->filters[id->InternalInt()] = filter; - - // ok, now we have to alert the reader of our new filter with our funky new fields - // the id is handled in a ... well, to be honest, a little bit sneaky manner. - // the "problem" is, that we can have several filters in the reader for one filter in the log manager. - // this is due to the fact, that a filter can either output it's result as a table, as an event... - // ...or as an table _and_ an event. And... if we have a table and an event, we actually need two different sets - // of filters in the reader, because the fields for the table and the event may differ and I absolutely do not want - // to build a union of these values and figure it out later. - // hence -> filter id is multiplicated with 2. - // filterId*2 -> results for table - // filterId*2+1 -> results for event - i->AddFilter( id->InternalInt() * 2, fieldsV.size(), idxfields, fields ); + i->reader->AddFilter( id->InternalInt(), fieldsV.size(), fields ); return true; } @@ -387,31 +390,15 @@ bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { return false; } -/* - std::list::iterator it = i->filters.begin(); - while ( it != i->filters.end() ) - { - if ( (*it).name == name ) { - it = i->filters.erase(it); - return true; - break; - } - else - ++it; - } - */ - map::iterator it = i->filters.find(id->InternalInt()); if ( it == i->filters.end() ) { return false; } - it->filters.erase(it); + i->filters.erase(it); return true; } - - Val* InputMgr::LogValToIndexVal(int num_fields, const RecordType *type, const LogVal* const *vals) { Val* idxval; int position = 0; @@ -449,27 +436,28 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const bool updated = false; + assert(i->HasFilter(id)); //reporter->Error("Hashing %d index fields", i->num_idx_fields); - HashKey* idxhash = HashLogVals(i->num_idx_fields, vals); + HashKey* idxhash = HashLogVals(i->filters[id].num_idx_fields, vals); //reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); //reporter->Error("Hashing %d val fields", i->num_val_fields); - HashKey* valhash = HashLogVals(i->num_val_fields, vals+i->num_idx_fields); + HashKey* valhash = HashLogVals(i->filters[id].num_val_fields, vals+i->filters[id].num_idx_fields); //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); //reporter->Error("received entry with idxhash %d and valhash %d", (uint64_t) idxhash->Hash(), (uint64_t) valhash->Hash()); - InputHash *h = i->lastDict->Lookup(idxhash); + InputHash *h = i->filters[id].lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before if ( h->valhash->Hash() == valhash->Hash() ) { // ok, double. - i->lastDict->Remove(idxhash); - i->currDict->Insert(idxhash, h); + i->filters[id].lastDict->Remove(idxhash); + i->filters[id].currDict->Insert(idxhash, h); return; } else { // updated - i->lastDict->Remove(idxhash); + i->filters[id].lastDict->Remove(idxhash); delete(h); updated = true; @@ -477,27 +465,22 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const } - Val* idxval = LogValToIndexVal(i->num_idx_fields, i->itype, vals); + Val* idxval = LogValToIndexVal(i->filters[id].num_idx_fields, i->filters[id].itype, vals); Val* valval; - int position = i->num_idx_fields; - if ( i->num_val_fields == 1 && !i->want_record ) { - valval = LogValToVal(vals[i->num_idx_fields], i->rtype->FieldType(i->num_idx_fields)); + int position = i->filters[id].num_idx_fields; + if ( i->filters[id].num_val_fields == 1 && !i->filters[id].want_record ) { + valval = LogValToVal(vals[i->filters[id].num_idx_fields], i->filters[id].rtype->FieldType(i->filters[id].num_idx_fields)); } else { - RecordVal * r = new RecordVal(i->rtype); + RecordVal * r = new RecordVal(i->filters[id].rtype); - /* if ( i->rtype->NumFields() != (int) i->num_val_fields ) { - reporter->InternalError("Type mismatch"); - return; - } */ - - for ( int j = 0; j < i->rtype->NumFields(); j++) { + for ( int j = 0; j < i->filters[id].rtype->NumFields(); j++) { Val* val = 0; - if ( i->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { - val = LogValToRecordVal(vals, i->rtype->FieldType(j)->AsRecordType(), &position); + if ( i->filters[id].rtype->FieldType(j)->Tag() == TYPE_RECORD ) { + val = LogValToRecordVal(vals, i->filters[id].rtype->FieldType(j)->AsRecordType(), &position); } else { - val = LogValToVal(vals[position], i->rtype->FieldType(j)); + val = LogValToVal(vals[position], i->filters[id].rtype->FieldType(j)); position++; } @@ -516,17 +499,12 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const Val* oldval = 0; if ( updated == true ) { // in that case, we need the old value to send the event (if we send an event). - oldval = i->tab->Lookup(idxval); + oldval = i->filters[id].tab->Lookup(idxval); } - // call filters first do determine if we really add / change the entry - std::list::iterator it = i->filters.begin(); - while ( it != i->filters.end() ) { - if (! (*it).pred ) { - continue; - } - + // call filter first to determine if we really add / change the entry + if ( i->filters[id].pred ) { EnumVal* ev; Ref(idxval); Ref(valval); @@ -541,44 +519,45 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const vl.append(ev); vl.append(idxval); vl.append(valval); - Val* v = (*it).pred->Call(&vl); + Val* v = i->filters[id].pred->Call(&vl); bool result = v->AsBool(); Unref(v); if ( result == false ) { if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... - delete(i->currDict->RemoveEntry(idxhash)); + delete(i->filters[id].currDict->RemoveEntry(idxhash)); return; } else { // keep old one - i->currDict->Insert(idxhash, h); + i->filters[id].currDict->Insert(idxhash, h); return; } } - ++it; } //i->tab->Assign(idxval, valval); - HashKey* k = i->tab->ComputeHash(idxval); + HashKey* k = i->filters[id].tab->ComputeHash(idxval); if ( !k ) { reporter->InternalError("could not hash"); return; } - i->tab->Assign(idxval, k, valval); + i->filters[id].tab->Assign(idxval, k, valval); InputHash* ih = new InputHash(); - k = i->tab->ComputeHash(idxval); + k = i->filters[id].tab->ComputeHash(idxval); ih->idxkey = k; ih->valhash = valhash; //i->tab->Delete(k); - i->currDict->Insert(idxhash, ih); + i->filters[id].currDict->Insert(idxhash, ih); // send events now that we are kind of finished. + + /* FIXME: fix me. std::list::iterator filter_iterator = i->events.begin(); while ( filter_iterator != i->events.end() ) { EnumVal* ev; @@ -597,7 +576,7 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const ++filter_iterator; - } + } */ } @@ -607,86 +586,74 @@ void InputMgr::EndCurrentSend(const InputReader* reader, int id) { reporter->InternalError("Unknown reader"); return; } + + assert(i->HasFilter(id)); + // lastdict contains all deleted entries and should be empty apart from that - IterCookie *c = i->lastDict->InitForIteration(); - i->lastDict->MakeRobustCookie(c); + IterCookie *c = i->filters[id].lastDict->InitForIteration(); + i->filters[id].lastDict->MakeRobustCookie(c); InputHash* ih; HashKey *lastDictIdxKey; //while ( ( ih = i->lastDict->NextEntry(c) ) ) { - while ( ( ih = i->lastDict->NextEntry(lastDictIdxKey, c) ) ) { - - if ( i->events.size() != 0 || i->filters.size() != 0 ) // we have a filter or an event - { + while ( ( ih = i->filters[id].lastDict->NextEntry(lastDictIdxKey, c) ) ) { - ListVal *idx = i->tab->RecoverIndex(ih->idxkey); + if ( i->filters[id].pred ) { + ListVal *idx = i->filters[id].tab->RecoverIndex(ih->idxkey); assert(idx != 0); - Val *val = i->tab->Lookup(idx); + Val *val = i->filters[id].tab->Lookup(idx); assert(val != 0); - { - bool doBreak = false; - // ask filter, if we want to expire this element... - std::list::iterator it = i->filters.begin(); - while ( it != i->filters.end() ) { - if (! (*it).pred ) { - continue; - } + bool doBreak = false; + // ask predicate, if we want to expire this element... - EnumVal* ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - Ref(idx); - Ref(val); + EnumVal* ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + Ref(idx); + Ref(val); - val_list vl(3); - vl.append(ev); - vl.append(idx); - vl.append(val); - Val* v = (*it).pred->Call(&vl); - bool result = v->AsBool(); - Unref(v); - - ++it; - - if ( result == false ) { - // Keep it. Hence - we quit and simply go to the next entry of lastDict - // ah well - and we have to add the entry to currDict... - i->currDict->Insert(lastDictIdxKey, i->lastDict->RemoveEntry(lastDictIdxKey)); - doBreak = true; - continue; - } - - } - - if ( doBreak ) { - continue; - } + val_list vl(3); + vl.append(ev); + vl.append(idx); + vl.append(val); + Val* v = i->filters[id].pred->Call(&vl); + bool result = v->AsBool(); + Unref(v); + + if ( result == false ) { + // Keep it. Hence - we quit and simply go to the next entry of lastDict + // ah well - and we have to add the entry to currDict... + i->filters[id].currDict->Insert(lastDictIdxKey, i->filters[id].lastDict->RemoveEntry(lastDictIdxKey)); + continue; } - + + // { - std::list::iterator it = i->events.begin(); - while ( it != i->events.end() ) { + /* FIXME: events + std::list::iterator it = i->filters[id].events.begin(); + while ( it != i->filters[id].events.end() ) { Ref(idx); Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); SendEvent(*it, ev, idx, val); ++it; } + */ } } - i->tab->Delete(ih->idxkey); - i->lastDict->Remove(lastDictIdxKey); // deletex in next line + i->filters[id].tab->Delete(ih->idxkey); + i->filters[id].lastDict->Remove(lastDictIdxKey); // deletex in next line delete(ih); } - i->lastDict->Clear(); // should be empty... but... well... who knows... - delete(i->lastDict); + i->filters[id].lastDict->Clear(); // should be empty... but... well... who knows... + delete(i->filters[id].lastDict); - i->lastDict = i->currDict; - i->currDict = new PDict(InputHash); + i->filters[id].lastDict = i->filters[id].currDict; + i->filters[id].currDict = new PDict(InputHash); } void InputMgr::Put(const InputReader* reader, int id, const LogVal* const *vals) { @@ -696,22 +663,24 @@ void InputMgr::Put(const InputReader* reader, int id, const LogVal* const *vals) return; } - Val* idxval = LogValToIndexVal(i->num_idx_fields, i->itype, vals); + assert(i->HasFilter(id)); + + Val* idxval = LogValToIndexVal(i->filters[id].num_idx_fields, i->filters[id].itype, vals); Val* valval; - int position = i->num_idx_fields; - if ( i->num_val_fields == 1 && !i->want_record ) { - valval = LogValToVal(vals[i->num_idx_fields], i->rtype->FieldType(i->num_idx_fields)); + int position = i->filters[id].num_idx_fields; + if ( i->filters[id].num_val_fields == 1 && !i->filters[id].want_record ) { + valval = LogValToVal(vals[i->filters[id].num_idx_fields], i->filters[id].rtype->FieldType(i->filters[id].num_idx_fields)); } else { - RecordVal * r = new RecordVal(i->rtype); + RecordVal * r = new RecordVal(i->filters[id].rtype); - for ( int j = 0; j < i->rtype->NumFields(); j++) { + for ( int j = 0; j < i->filters[id].rtype->NumFields(); j++) { Val* val = 0; - if ( i->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { - val = LogValToRecordVal(vals, i->rtype->FieldType(j)->AsRecordType(), &position); + if ( i->filters[id].rtype->FieldType(j)->Tag() == TYPE_RECORD ) { + val = LogValToRecordVal(vals, i->filters[id].rtype->FieldType(j)->AsRecordType(), &position); } else { - val = LogValToVal(vals[position], i->rtype->FieldType(j)); + val = LogValToVal(vals[position], i->filters[id].rtype->FieldType(j)); position++; } @@ -726,7 +695,7 @@ void InputMgr::Put(const InputReader* reader, int id, const LogVal* const *vals) valval = r; } - i->tab->Assign(idxval, valval); + i->filters[id].tab->Assign(idxval, valval); } void InputMgr::Clear(const InputReader* reader, int id) { @@ -735,20 +704,24 @@ void InputMgr::Clear(const InputReader* reader, int id) { reporter->InternalError("Unknown reader"); return; } - - i->tab->RemoveAll(); + + assert(i->HasFilter(id)); + + i->filters[id].tab->RemoveAll(); } -bool InputMgr::Delete(const InputReader* reader, const LogVal* const *vals) { +bool InputMgr::Delete(const InputReader* reader, int id, const LogVal* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); return false; } - - Val* idxval = LogValToIndexVal(i->num_idx_fields, i->itype, vals); - return ( i->tab->Delete(idxval) != 0 ); + assert(i->HasFilter(id)); + + Val* idxval = LogValToIndexVal(i->filters[id].num_idx_fields, i->filters[id].itype, vals); + + return ( i->filters[id].tab->Delete(idxval) != 0 ); } void InputMgr::Error(InputReader* reader, const char* msg) diff --git a/src/InputMgr.h b/src/InputMgr.h index 1cacf89143..5d531cd6fc 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -35,7 +35,7 @@ protected: void Error(InputReader* reader, const char* msg); // for readers to write to input stream in direct mode (reporting new/deleted values directly) - void Put(const InputReader* reader, int id. const LogVal* const *vals); + void Put(const InputReader* reader, int id, const LogVal* const *vals); void Clear(const InputReader* reader, int id); bool Delete(const InputReader* reader, int id, const LogVal* const *vals); diff --git a/src/InputReader.cc b/src/InputReader.cc index 1008cf1b67..1c65985fd6 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -26,17 +26,17 @@ void InputReader::Error(const string &msg) void InputReader::Put(int id, const LogVal* const *val) { - input_mgr->Put(this, int id, val); + input_mgr->Put(this, id, val); } void InputReader::Clear(int id) { - input_mgr->Clear(this, int id); + input_mgr->Clear(this, id); } void InputReader::Delete(int id, const LogVal* const *val) { - input_mgr->Delete(this, int id, val); + input_mgr->Delete(this, id, val); } @@ -52,12 +52,12 @@ bool InputReader::Init(string arg_source) bool InputReader::AddFilter(int id, int arg_num_fields, const LogField* const * arg_fields) { - return DoAddFilter(int id, arg_num_fields, arg_fields); + return DoAddFilter(id, arg_num_fields, arg_fields); } bool InputReader::RemoveFilter(int id) { - return DoRemoveFilter(int id); + return DoRemoveFilter(id); } void InputReader::Finish() @@ -105,10 +105,10 @@ const char* InputReader::Fmt(const char* format, ...) void InputReader::SendEntry(int id, const LogVal* const *vals) { - input_mgr->SendEntry(this, int id, vals); + input_mgr->SendEntry(this, id, vals); } void InputReader::EndCurrentSend(int id) { - input_mgr->EndCurrentSend(this, int id); + input_mgr->EndCurrentSend(this, id); } diff --git a/src/InputReader.h b/src/InputReader.h index 12f0bc9db4..6e3d689750 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -30,7 +30,8 @@ protected: virtual bool DoInit(string arg_sources) = 0; virtual bool DoAddFilter( int id, int arg_num_fields, const LogField* const* fields ) = 0; - virtual bool DoRemoveFilter( int id ); + + virtual bool DoRemoveFilter( int id ) = 0; virtual void DoFinish() = 0; diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 4a0d4157bc..84feb74e61 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -28,6 +28,7 @@ FieldMapping FieldMapping::subType() { return FieldMapping(name, subtype, position); } + InputReaderAscii::InputReaderAscii() { file = 0; @@ -58,7 +59,7 @@ InputReaderAscii::~InputReaderAscii() void InputReaderAscii::DoFinish() { - columnMap.empty(); + filters.empty(); if ( file != 0 ) { file->close(); delete(file); @@ -66,7 +67,7 @@ void InputReaderAscii::DoFinish() } } -bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const LogField* const * fields) +bool InputReaderAscii::DoInit(string path) { fname = path; @@ -76,11 +77,39 @@ bool InputReaderAscii::DoInit(string path, int num_fields, int idx_fields, const return false; } + return true; +} - this->num_fields = num_fields; - this->idx_fields = idx_fields; - this->fields = fields; +bool InputReaderAscii::DoAddFilter( int id, int arg_num_fields, const LogField* const* fields ) { + if ( HasFilter(id) ) { + return false; // no, we don't want to add this a second time + } + Filter f; + f.num_fields = arg_num_fields; + f.fields = fields; + + filters[id] = f; + + return true; +} + +bool InputReaderAscii::DoRemoveFilter ( int id ) { + if (!HasFilter(id) ) { + return false; + } + + assert ( filters.erase(id) == 1 ); + + return true; +} + + +bool InputReaderAscii::HasFilter(int id) { + map::iterator it = filters.find(id); + if ( it == filters.end() ) { + return false; + } return true; } @@ -93,46 +122,47 @@ bool InputReaderAscii::ReadHeader() { return false; } - // split on tabs... - istringstream splitstream(line); - unsigned int currTab = 0; - int wantFields = 0; - while ( splitstream ) { - string s; - if ( !getline(splitstream, s, separator[0])) - break; - - // current found heading in s... compare if we want it - for ( unsigned int i = 0; i < num_fields; i++ ) { - const LogField* field = fields[i]; - if ( field->name == s ) { - // cool, found field. note position - FieldMapping f(field->name, field->type, field->subtype, i); - columnMap.push_back(f); - wantFields++; - break; // done with searching + for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { + // split on tabs... + istringstream splitstream(line); + unsigned int currTab = 0; + int wantFields = 0; + while ( splitstream ) { + string s; + if ( !getline(splitstream, s, separator[0])) + break; + + // current found heading in s... compare if we want it + for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { + const LogField* field = (*it).second.fields[i]; + if ( field->name == s ) { + // cool, found field. note position + FieldMapping f(field->name, field->type, field->subtype, i); + (*it).second.columnMap.push_back(f); + wantFields++; + break; // done with searching + } } + + // look if we did push something... + if ( (*it).second.columnMap.size() == currTab ) { + // no, we didn't. note that... + FieldMapping empty; + (*it).second.columnMap.push_back(empty); + } + + // done + currTab++; + } + + if ( wantFields != (int) (*it).second.num_fields ) { + // we did not find all fields? + // :( + Error(Fmt("One of the requested fields could not be found in the input data file. Found %d fields, wanted %d. Filternum: %d", wantFields, (*it).second.num_fields, (*it).first)); + return false; } - - // look if we did push something... - if ( columnMap.size() == currTab ) { - // no, we didn't. note that... - FieldMapping empty; - columnMap.push_back(empty); - } - - // done - currTab++; - } - - if ( wantFields != (int) num_fields ) { - // we did not find all fields? - // :( - Error(Fmt("One of the requested fields could not be found in the input data file. Found %d fields, wanted %d", wantFields, num_fields)); - return false; } - // well, that seems to have worked... return true; } @@ -314,110 +344,77 @@ bool InputReaderAscii::DoUpdate() { return false; } - // TODO: all the stuff we need for a second reading. - // *cough* - // - // - - - // new keymap - //map *newKeyMap = new map(); - string line; while ( GetLine(line ) ) { - // split on tabs + + for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - istringstream splitstream(line); - - LogVal** fields = new LogVal*[num_fields]; - //string string_fields[num_fields]; - - unsigned int currTab = 0; - unsigned int currField = 0; - while ( splitstream ) { - - string s; - if ( !getline(splitstream, s, separator[0]) ) - break; - + // split on tabs - if ( currTab >= columnMap.size() ) { - Error("Tabs in heading do not match tabs in data?"); - //disabled = true; - return false; - } - - FieldMapping currMapping = columnMap[currTab]; - currTab++; - - if ( currMapping.IsEmpty() ) { - // well, that was easy - continue; - } - - if ( currField >= num_fields ) { - Error("internal error - fieldnum greater as possible"); - return false; - } - - LogVal* val = EntryToVal(s, currMapping); - if ( val == 0 ) { - return false; - } - fields[currMapping.position] = val; - //string_fields[currMapping.position] = s; - - currField++; - } - - if ( currField != num_fields ) { - Error("curr_field != num_fields in DoUpdate. Columns in file do not match column definition."); - return false; - } - - - SendEntry(fields); - - /* - string indexstring = ""; - string valstring = ""; - for ( unsigned int i = 0; i < idx_fields; i++ ) { - indexstring.append(string_fields[i]); - } - - for ( unsigned int i = idx_fields; i < num_fields; i++ ) { - valstring.append(string_fields[i]); - } - - string valhash = Hash(valstring); - string indexhash = Hash(indexstring); - - if ( keyMap->find(indexhash) == keyMap->end() ) { - // new key - Put(fields); - } else if ( (*keyMap)[indexhash] != valhash ) { - // changed key - Put(fields); - keyMap->erase(indexhash); - } else { - // field not changed - keyMap->erase(indexhash); - } - - - (*newKeyMap)[indexhash] = valhash; - */ + istringstream splitstream(line); - for ( unsigned int i = 0; i < num_fields; i++ ) { - delete fields[i]; + LogVal** fields = new LogVal*[(*it).second.num_fields]; + //string string_fields[num_fields]; + + unsigned int currTab = 0; + unsigned int currField = 0; + while ( splitstream ) { + + string s; + if ( !getline(splitstream, s, separator[0]) ) + break; + + + if ( currTab >= (*it).second.columnMap.size() ) { + Error("Tabs in heading do not match tabs in data?"); + //disabled = true; + return false; + } + + FieldMapping currMapping = (*it).second.columnMap[currTab]; + currTab++; + + if ( currMapping.IsEmpty() ) { + // well, that was easy + continue; + } + + if ( currField >= (*it).second.num_fields ) { + Error("internal error - fieldnum greater as possible"); + return false; + } + + LogVal* val = EntryToVal(s, currMapping); + if ( val == 0 ) { + return false; + } + fields[currMapping.position] = val; + //string_fields[currMapping.position] = s; + + currField++; + } + + if ( currField != (*it).second.num_fields ) { + Error("curr_field != num_fields in DoUpdate. Columns in file do not match column definition."); + return false; + } + + + SendEntry((*it).first, fields); + + for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { + delete fields[i]; + } + delete [] fields; } - delete [] fields; } //file->clear(); // remove end of file evil bits //file->seekg(0, ios::beg); // and seek to start. - EndCurrentSend(); + for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { + EndCurrentSend((*it).first); + } return true; } diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index c848c17110..01169a3cfc 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -34,13 +34,30 @@ public: protected: - virtual bool DoInit(string path, int arg_num_fields, int arg_idx_fields, - const LogField* const * fields); + virtual bool DoInit(string path); + + virtual bool DoAddFilter( int id, int arg_num_fields, const LogField* const* fields ); + + virtual bool DoRemoveFilter ( int id ); + virtual void DoFinish(); virtual bool DoUpdate(); private: + + struct Filter { + unsigned int num_fields; + + const LogField* const * fields; // raw mapping + + // map columns in the file to columns to send back to the manager + vector columnMap; + + }; + + bool HasFilter(int id); + bool ReadHeader(); LogVal* EntryToVal(string s, FieldMapping type); @@ -49,15 +66,8 @@ private: ifstream* file; string fname; - unsigned int num_fields; - unsigned int idx_fields; + map filters; - // map columns in the file to columns to send back to the manager - vector columnMap; - const LogField* const * fields; // raw mapping - - //map *keyMap; - // // Options set from the script-level. string separator; From 7eb4d9934168fa0d37e01e3dbdd6feb60a88c485 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 20 Nov 2011 12:27:34 -0800 Subject: [PATCH 044/651] very basic functionality kind of works again --- src/InputMgr.cc | 52 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 403d656140..b51cd6fd27 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -47,10 +47,50 @@ public: EventHandlerPtr event; RecordType* event_type; - ~Filter(); + // ~Filter(); + // Filter(); + // Filter(const InputMgr::Filter& filter); + + void DoCleanup(); }; -InputMgr::Filter::~Filter() { +/* +InputMgr::Filter::Filter() { + tab = 0; + itype = 0; + rtype = 0; + event_type = 0; +} + +InputMgr::Filter::Filter(const InputMgr::Filter& f) { + id = f.id; + id->Ref(); + + tab = f.tab; + if ( tab ) + tab->Ref(); + + itype = f.itype; + if ( itype ) + itype->Ref(); + + rtype = f.rtype; + if ( rtype ) + Ref(rtype); + + event_type = f.event_type; + if ( event_type ) + Ref(event_type); + + name = f.name; + num_idx_fields = f.num_idx_fields; + num_val_fields = f.num_val_fields; + want_record = f.want_record; + + +} */ + +void InputMgr::Filter::DoCleanup() { Unref(id); if ( tab ) Unref(tab); @@ -60,7 +100,10 @@ InputMgr::Filter::~Filter() { Unref(rtype); if ( event_type) Unref(event_type); -} + + delete currDict; + delete lastDict; +} struct InputMgr::ReaderInfo { EnumVal* id; @@ -395,6 +438,8 @@ bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { return false; } + i->filters[id->InternalInt()].DoCleanup(); + i->filters.erase(it); return true; } @@ -545,6 +590,7 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const return; } + reporter->Error("assigning"); i->filters[id].tab->Assign(idxval, k, valval); InputHash* ih = new InputHash(); From 029871e48c4c63add07d63a52127bb7b50f47189 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 20 Nov 2011 13:42:02 -0800 Subject: [PATCH 045/651] first test. --- .../scripts.base.frameworks.input.basic/out | 14 +++++ .../scripts/base/frameworks/input/basic.bro | 52 +++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.basic/out create mode 100644 testing/btest/scripts/base/frameworks/input/basic.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.basic/out b/testing/btest/Baseline/scripts.base.frameworks.input.basic/out new file mode 100644 index 0000000000..ebac1866b6 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.basic/out @@ -0,0 +1,14 @@ +{ +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro new file mode 100644 index 0000000000..5e0c7be12e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -0,0 +1,52 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Log::ID += { LOG }; +} + +type idx: record { + i: int; +}; + +type val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::create_stream(A::LOG, [$source="input.log"]); + Input::add_filter(A::LOG, [$name="ssh", $idx=idx, $val=val, $destination=servers]); + Input::force_update(A::LOG); + print servers; +} From f0e5303330d9842a7454001276ef5857c252bba9 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 21 Nov 2011 15:09:00 -0800 Subject: [PATCH 046/651] make want_record field for tablefilter work... --- src/InputMgr.cc | 2 +- .../out | 3 ++ .../out | 3 ++ .../frameworks/input/onecolumn-norecord.bro | 38 +++++++++++++++++++ .../frameworks/input/onecolumn-record.bro | 38 +++++++++++++++++++ 5 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-norecord/out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-record/out create mode 100644 testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro create mode 100644 testing/btest/scripts/base/frameworks/input/onecolumn-record.bro diff --git a/src/InputMgr.cc b/src/InputMgr.cc index b51cd6fd27..c4180f4f8d 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -515,7 +515,7 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const int position = i->filters[id].num_idx_fields; if ( i->filters[id].num_val_fields == 1 && !i->filters[id].want_record ) { - valval = LogValToVal(vals[i->filters[id].num_idx_fields], i->filters[id].rtype->FieldType(i->filters[id].num_idx_fields)); + valval = LogValToVal(vals[position], i->filters[id].rtype->FieldType(0)); } else { RecordVal * r = new RecordVal(i->filters[id].rtype); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-norecord/out b/testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-norecord/out new file mode 100644 index 0000000000..bbce48f4f6 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-norecord/out @@ -0,0 +1,3 @@ +{ +[-42] = T +} diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-record/out b/testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-record/out new file mode 100644 index 0000000000..3f9af35c59 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.onecolumn-record/out @@ -0,0 +1,3 @@ +{ +[-42] = [b=T] +} diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro new file mode 100644 index 0000000000..74fd477e28 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -0,0 +1,38 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i +#types bool int +T -42 +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Log::ID += { LOG }; +} + +type idx: record { + i: int; +}; + +type val: record { + b: bool; +}; + +global servers: table[int] of val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::create_stream(A::LOG, [$source="input.log"]); + Input::add_filter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers, $want_record=F]); + Input::force_update(A::LOG); + print servers; +} diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro new file mode 100644 index 0000000000..3cc7090462 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -0,0 +1,38 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i +#types bool int +T -42 +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Log::ID += { LOG }; +} + +type idx: record { + i: int; +}; + +type val: record { + b: bool; +}; + +global servers: table[int] of val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::create_stream(A::LOG, [$source="input.log"]); + Input::add_filter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers]); + Input::force_update(A::LOG); + print servers; +} From 18591b53d422e5b97dc8595a5171e50a64feec78 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 21 Nov 2011 15:20:52 -0800 Subject: [PATCH 047/651] rename filter to tablefilter in preparation of event filters... --- scripts/base/frameworks/input/main.bro | 38 +++++++++---------- src/InputMgr.cc | 6 +-- src/InputMgr.h | 4 +- src/input.bif | 10 ++--- testing/btest/btest.cfg | 2 +- .../scripts/base/frameworks/input/basic.bro | 2 +- .../frameworks/input/onecolumn-norecord.bro | 2 +- .../frameworks/input/onecolumn-record.bro | 2 +- 8 files changed, 33 insertions(+), 33 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 9d83d73ec6..4560421ecc 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -9,7 +9,7 @@ export { reader: Reader &default=default_reader; }; - type Filter: record { + type TableFilter: record { ## descriptive name. for later removal name: string; @@ -29,14 +29,14 @@ export { # ev_description: any &optional; }; - const no_filter: Filter = [$name="", $idx="", $val="", $destination=""]; # Sentinel. + #const no_filter: Filter = [$name="", $idx="", $val="", $destination=""]; # Sentinel. global create_stream: function(id: Log::ID, description: Input::StreamDescription) : bool; global remove_stream: function(id: Log::ID) : bool; global force_update: function(id: Log::ID) : bool; - global add_filter: function(id: Log::ID, filter: Input::Filter) : bool; - global remove_filter: function(id: Log::ID, name: string) : bool; - global get_filter: function(id: ID, name: string) : Filter; + global add_tablefilter: function(id: Log::ID, filter: Input::TableFilter) : bool; + global remove_tablefilter: function(id: Log::ID, name: string) : bool; + #global get_filter: function(id: ID, name: string) : Filter; } @@ -45,7 +45,7 @@ export { module Input; -global filters: table[ID, string] of Filter; +#global filters: table[ID, string] of Filter; function create_stream(id: Log::ID, description: Input::StreamDescription) : bool { @@ -62,22 +62,22 @@ function force_update(id: Log::ID) : bool return __force_update(id); } -function add_filter(id: Log::ID, filter: Input::Filter) : bool +function add_tablefilter(id: Log::ID, filter: Input::TableFilter) : bool { - filters[id, filter$name] = filter; - return __add_filter(id, filter); +# filters[id, filter$name] = filter; + return __add_tablefilter(id, filter); } -function remove_filter(id: Log::ID, name: string) : bool +function remove_tablefilter(id: Log::ID, name: string) : bool { - delete filters[id, name]; - return __remove_filter(id, name); +# delete filters[id, name]; + return __remove_tablefilter(id, name); } -function get_filter(id: ID, name: string) : Filter - { - if ( [id, name] in filters ) - return filters[id, name]; - - return no_filter; - } +#function get_filter(id: ID, name: string) : Filter +# { +# if ( [id, name] in filters ) +# return filters[id, name]; +# +# return no_filter; +# } diff --git a/src/InputMgr.cc b/src/InputMgr.cc index c4180f4f8d..8fda5d506f 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -237,7 +237,7 @@ InputReader* InputMgr::CreateStream(EnumVal* id, RecordVal* description) } -bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { +bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { reporter->Error("Stream not found"); @@ -245,7 +245,7 @@ bool InputMgr::AddFilter(EnumVal *id, RecordVal* fval) { } RecordType* rtype = fval->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::Filter, 0) ) + if ( ! same_type(rtype, BifType::Record::Input::TableFilter, 0) ) { reporter->Error("filter argument not of right type"); return false; @@ -426,7 +426,7 @@ bool InputMgr::ForceUpdate(const EnumVal* id) return i->reader->Update(); } -bool InputMgr::RemoveFilter(EnumVal* id, const string &name) { +bool InputMgr::RemoveTableFilter(EnumVal* id, const string &name) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { reporter->Error("Reader not found"); diff --git a/src/InputMgr.h b/src/InputMgr.h index 5d531cd6fc..4280ba1d81 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -24,8 +24,8 @@ public: bool ForceUpdate(const EnumVal* id); bool RemoveStream(const EnumVal* id); - bool AddFilter(EnumVal *id, RecordVal* filter); - bool RemoveFilter(EnumVal* id, const string &name); + bool AddTableFilter(EnumVal *id, RecordVal* filter); + bool RemoveTableFilter(EnumVal* id, const string &name); protected: diff --git a/src/input.bif b/src/input.bif index ef069316ab..1300f91bea 100644 --- a/src/input.bif +++ b/src/input.bif @@ -8,7 +8,7 @@ module Input; %%} type StreamDescription: record; -type Filter: record; +type TableFilter: record; function Input::__create_stream%(id: Log::ID, description: Input::StreamDescription%) : bool %{ @@ -28,15 +28,15 @@ function Input::__force_update%(id: Log::ID%) : bool return new Val( res, TYPE_BOOL ); %} -function Input::__add_filter%(id: Log::ID, filter: Input::Filter%) : bool +function Input::__add_tablefilter%(id: Log::ID, filter: Input::TableFilter%) : bool %{ - bool res = input_mgr->AddFilter(id->AsEnumVal(), filter->AsRecordVal()); + bool res = input_mgr->AddTableFilter(id->AsEnumVal(), filter->AsRecordVal()); return new Val( res, TYPE_BOOL ); %} -function Input::__remove_filter%(id: Log::ID, name: string%) : bool +function Input::__remove_tablefilter%(id: Log::ID, name: string%) : bool %{ - bool res = input_mgr->RemoveFilter(id->AsEnumVal(), name->AsString()->CheckString()); + bool res = input_mgr->RemoveTableFilter(id->AsEnumVal(), name->AsString()->CheckString()); return new Val( res, TYPE_BOOL); %} diff --git a/testing/btest/btest.cfg b/testing/btest/btest.cfg index 7d8283587c..739d0b2ad4 100644 --- a/testing/btest/btest.cfg +++ b/testing/btest/btest.cfg @@ -3,7 +3,7 @@ TestDirs = doc bifs language core scripts istate coverage TmpDir = %(testbase)s/.tmp BaselineDir = %(testbase)s/Baseline IgnoreDirs = .svn CVS .tmp -IgnoreFiles = *.tmp *.swp #* *.trace +IgnoreFiles = *.tmp *.swp #* *.trace .DS_Store [environment] BROPATH=`bash -c %(testbase)s/../../build/bro-path-dev` diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 5e0c7be12e..139888fa7c 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -46,7 +46,7 @@ event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_filter(A::LOG, [$name="ssh", $idx=idx, $val=val, $destination=servers]); + Input::add_tablefilter(A::LOG, [$name="ssh", $idx=idx, $val=val, $destination=servers]); Input::force_update(A::LOG); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro index 74fd477e28..12dbdd42aa 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -32,7 +32,7 @@ event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_filter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers, $want_record=F]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers, $want_record=F]); Input::force_update(A::LOG); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro index 3cc7090462..4eef12d752 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -32,7 +32,7 @@ event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_filter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers]); Input::force_update(A::LOG); print servers; } From 92b3723b0947739fb4aff9a3d7a87662f65011c7 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 21 Nov 2011 15:36:03 -0800 Subject: [PATCH 048/651] add very basic predicate test. --- .../out | 7 ++ .../base/frameworks/input/predicate.bro | 66 +++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.predicate/out create mode 100644 testing/btest/scripts/base/frameworks/input/predicate.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.predicate/out b/testing/btest/Baseline/scripts.base.frameworks.input.predicate/out new file mode 100644 index 0000000000..d805f804d8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.predicate/out @@ -0,0 +1,7 @@ +VALID +VALID +VALID +VALID +VALID +VALID +VALID diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro new file mode 100644 index 0000000000..e82ded6fd0 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -0,0 +1,66 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Log::ID += { LOG }; +} + +type idx: record { + i: int; +}; + +type val: record { + b: bool; +}; + +global servers: table[int] of val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::create_stream(A::LOG, [$source="input.log"]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers, $want_record=F, + $pred(typ: Input::Event, left: idx, right: bool) = { return right; } + ]); + Input::force_update(A::LOG); + if ( 1 in servers ) { + print "VALID"; + } + if ( 2 in servers ) { + print "VALID"; + } + if ( !(3 in servers) ) { + print "VALID"; + } + if ( !(4 in servers) ) { + print "VALID"; + } + if ( !(5 in servers) ) { + print "VALID"; + } + if ( !(6 in servers) ) { + print "VALID"; + } + if ( 7 in servers ) { + print "VALID"; + } +} From 77a517f2b5e90d94c9de144605c24247c620e4af Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 21 Nov 2011 15:45:27 -0800 Subject: [PATCH 049/651] camel-casing for types --- testing/btest/scripts/base/frameworks/input/basic.bro | 8 ++++---- .../base/frameworks/input/onecolumn-norecord.bro | 8 ++++---- .../scripts/base/frameworks/input/onecolumn-record.bro | 8 ++++---- .../btest/scripts/base/frameworks/input/predicate.bro | 10 +++++----- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 139888fa7c..3ad45eac69 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -18,11 +18,11 @@ export { redef enum Log::ID += { LOG }; } -type idx: record { +type Idx: record { i: int; }; -type val: record { +type Val: record { b: bool; e: Log::ID; c: count; @@ -40,13 +40,13 @@ type val: record { ve: vector of int; }; -global servers: table[int] of val = table(); +global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="ssh", $idx=idx, $val=val, $destination=servers]); + Input::add_tablefilter(A::LOG, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); Input::force_update(A::LOG); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro index 12dbdd42aa..134ceb49e6 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -18,21 +18,21 @@ export { redef enum Log::ID += { LOG }; } -type idx: record { +type Idx: record { i: int; }; -type val: record { +type Val: record { b: bool; }; -global servers: table[int] of val = table(); +global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers, $want_record=F]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); Input::force_update(A::LOG); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro index 4eef12d752..c07c9c826c 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -18,21 +18,21 @@ export { redef enum Log::ID += { LOG }; } -type idx: record { +type Idx: record { i: int; }; -type val: record { +type Val: record { b: bool; }; -global servers: table[int] of val = table(); +global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=servers]); Input::force_update(A::LOG); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro index e82ded6fd0..769536d2a6 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -24,22 +24,22 @@ export { redef enum Log::ID += { LOG }; } -type idx: record { +type Idx: record { i: int; }; -type val: record { +type Val: record { b: bool; }; -global servers: table[int] of val = table(); +global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=idx, $val=val, $destination=servers, $want_record=F, - $pred(typ: Input::Event, left: idx, right: bool) = { return right; } + Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, + $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); Input::force_update(A::LOG); if ( 1 in servers ) { From 53af0544ccc9a60254228ab702ec39639b5b577d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 21 Nov 2011 19:01:07 -0800 Subject: [PATCH 050/651] re-enable table events --- scripts/base/frameworks/input/main.bro | 2 +- src/InputMgr.cc | 120 +++++++++++------- src/InputMgr.h | 2 +- .../scripts.base.frameworks.input.event/out | 21 +++ .../scripts/base/frameworks/input/event.bro | 48 +++++++ 5 files changed, 147 insertions(+), 46 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.event/out create mode 100644 testing/btest/scripts/base/frameworks/input/event.bro diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 4560421ecc..69b4d41ebb 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -18,7 +18,7 @@ export { val: any; destination: any; want_record: bool &default=T; - table_ev: any &optional; # event containing idx, val as values. + ev: any &optional; # event containing idx, val as values. ## decision function, that decides if an insertion, update or removal should really be executed. ## or events should be thought diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 8fda5d506f..de9ef158b7 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -214,7 +214,6 @@ InputReader* InputMgr::CreateStream(EnumVal* id, RecordVal* description) const BroString* bsource = description->Lookup(rtype->FieldOffset("source"))->AsString(); string source((const char*) bsource->Bytes(), bsource->Len()); - ReaderInfo* info = new ReaderInfo; info->reader = reader_obj; info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault @@ -259,6 +258,50 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { RecordType *val = fval->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); TableVal *dst = fval->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); + Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); + + Val* event_val = fval->Lookup(rtype->FieldOffset("ev")); + Func* event = event_val ? event_val->AsFunc() : 0; + + if ( event ) { + FuncType* etype = event->FType()->AsFuncType(); + + if ( ! etype->IsEvent() ) { + reporter->Error("stream event is a function, not an event"); + return false; + } + + const type_list* args = etype->ArgTypes()->Types(); + + if ( args->length() != 3 ) + { + reporter->Error("Table event must take 3 arguments"); + return false; + } + + if ( ! same_type((*args)[0], BifType::Enum::Input::Event, 0) ) + { + reporter->Error("table events first attribute must be of type Input::Event"); + return false; + } + + if ( ! same_type((*args)[1], idx) ) + { + reporter->Error("table events index attributes do not match"); + return false; + } + + if ( want_record->InternalInt() == 1 && ! same_type((*args)[2], val) ) + { + reporter->Error("table events value attributes do not match"); + return false; + } else if ( want_record->InternalInt() == 0 && !same_type((*args)[2], val->FieldType(0) ) ) { + reporter->Error("table events value attribute does not match"); + return false; + } + + } + vector fieldsV; // vector, because we don't know the length beforehands bool status = !UnrollRecordType(&fieldsV, idx, ""); @@ -273,24 +316,22 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { return false; } - Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); LogField** fields = new LogField*[fieldsV.size()]; for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { fields[i] = fieldsV[i]; } - // FIXME: remove those funky 0-tests again as the idea was changed. Filter filter; filter.name = name->AsString()->CheckString(); filter.id = id->Ref()->AsEnumVal(); filter.pred = pred ? pred->AsFunc() : 0; filter.num_idx_fields = idxfields; filter.num_val_fields = valfields; - filter.tab = dst ? dst->Ref()->AsTableVal() : 0; - filter.rtype = val ? val->Ref()->AsRecordType() : 0; - filter.itype = idx ? idx->Ref()->AsRecordType() : 0; - // ya - well - we actually don't need them in every case... well, a few bytes of memory wasted + filter.tab = dst->Ref()->AsTableVal(); + filter.rtype = val->Ref()->AsRecordType(); + filter.itype = idx->Ref()->AsRecordType(); + filter.event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; filter.currDict = new PDict(InputHash); filter.lastDict = new PDict(InputHash); filter.want_record = ( want_record->InternalInt() == 1 ); @@ -601,11 +642,7 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const i->filters[id].currDict->Insert(idxhash, ih); - // send events now that we are kind of finished. - - /* FIXME: fix me. - std::list::iterator filter_iterator = i->events.begin(); - while ( filter_iterator != i->events.end() ) { + if ( i->filters[id].event ) { EnumVal* ev; Ref(idxval); @@ -613,16 +650,13 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); Ref(oldval); - SendEvent(*filter_iterator, ev, idxval, oldval); + SendEvent(i->filters[id].event, ev, idxval, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); Ref(valval); - SendEvent(*filter_iterator, ev, idxval, valval); + SendEvent(i->filters[id].event, ev, idxval, valval); } - - - ++filter_iterator; - } */ + } } @@ -643,12 +677,17 @@ void InputMgr::EndCurrentSend(const InputReader* reader, int id) { //while ( ( ih = i->lastDict->NextEntry(c) ) ) { while ( ( ih = i->filters[id].lastDict->NextEntry(lastDictIdxKey, c) ) ) { - if ( i->filters[id].pred ) { - ListVal *idx = i->filters[id].tab->RecoverIndex(ih->idxkey); - assert(idx != 0); - Val *val = i->filters[id].tab->Lookup(idx); - assert(val != 0); + ListVal * idx; + Val *val; + if ( i->filters[id].pred || i->filters[id].event ) { + idx = i->filters[id].tab->RecoverIndex(ih->idxkey); + assert(idx != 0); + val = i->filters[id].tab->Lookup(idx); + assert(val != 0); + } + + if ( i->filters[id].pred ) { bool doBreak = false; // ask predicate, if we want to expire this element... @@ -673,21 +712,13 @@ void InputMgr::EndCurrentSend(const InputReader* reader, int id) { } - // - - { - /* FIXME: events - std::list::iterator it = i->filters[id].events.begin(); - while ( it != i->filters[id].events.end() ) { - Ref(idx); - Ref(val); - EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - SendEvent(*it, ev, idx, val); - ++it; - } - */ - } + } + if ( i->filters[id].event ) { + Ref(idx); + Ref(val); + EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + SendEvent(i->filters[id].event, ev, idx, val); } i->filters[id].tab->Delete(ih->idxkey); @@ -792,20 +823,21 @@ void InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* c mgr.Dispatch(new Event(handler, vl)); } */ -void InputMgr::SendEvent(const string& name, EnumVal* event, Val* left, Val* right) +void InputMgr::SendEvent(EventHandlerPtr ev, EnumVal* event, Val* left, Val* right) { - EventHandler* handler = event_registry->Lookup(name.c_str()); - if ( handler == 0 ) { - reporter->Error("Event %s not found", name.c_str()); - return; - } + //EventHandler* handler = event_registry->Lookup(name.c_str()); + //if ( handler == 0 ) { + // reporter->Error("Event %s not found", name.c_str()); + // return; + //} val_list* vl = new val_list; vl->append(event); vl->append(left); vl->append(right); - mgr.Dispatch(new Event(handler, vl)); + //mgr.Dispatch(new Event(handler, vl)); + mgr.QueueEvent(ev, vl, SOURCE_LOCAL); } diff --git a/src/InputMgr.h b/src/InputMgr.h index 4280ba1d81..d04b7c9a2c 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -49,7 +49,7 @@ private: bool IsCompatibleType(BroType* t, bool atomic_only=false); bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); - void SendEvent(const string& name, EnumVal* event, Val* left, Val* right); + void SendEvent(EventHandlerPtr ev, EnumVal* event, Val* left, Val* right); HashKey* HashLogVals(const int num_elements, const LogVal* const *vals); int GetLogValLength(const LogVal* val); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.event/out b/testing/btest/Baseline/scripts.base.frameworks.input.event/out new file mode 100644 index 0000000000..e32a2aea00 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.event/out @@ -0,0 +1,21 @@ +Input::EVENT_NEW +1 +T +Input::EVENT_NEW +2 +T +Input::EVENT_NEW +3 +F +Input::EVENT_NEW +4 +F +Input::EVENT_NEW +5 +F +Input::EVENT_NEW +6 +F +Input::EVENT_NEW +7 +T diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro new file mode 100644 index 0000000000..36e8171689 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -0,0 +1,48 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Log::ID += { LOG }; +} + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global destination: table[int] of Val = table(); + +event line(tpe: Input::Event, left: Idx, right: bool) { + print tpe; + print left; + print right; +} + +event bro_init() +{ + Input::create_stream(A::LOG, [$source="input.log"]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); + Input::force_update(A::LOG); +} From 3035eb2b219946144fe67097781226d77f34176e Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 21 Nov 2011 19:30:16 -0800 Subject: [PATCH 051/651] fix a little bug that prevented several simultaneous filters from working. --- src/InputMgr.cc | 8 +- .../out | 15 +++ .../base/frameworks/input/twofilters.bro | 95 +++++++++++++++++++ 3 files changed, 116 insertions(+), 2 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out create mode 100644 testing/btest/scripts/base/frameworks/input/twofilters.bro diff --git a/src/InputMgr.cc b/src/InputMgr.cc index de9ef158b7..2ec3c649be 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -341,8 +341,12 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { assert(filter.want_record); } - i->filters[id->InternalInt()] = filter; - i->reader->AddFilter( id->InternalInt(), fieldsV.size(), fields ); + int filterid = 0; + if ( i->filters.size() > 0 ) { + filterid = i->filters.rbegin()->first + 1; // largest element is at beginning of map. new id = old id + 1. + } + i->filters[filterid] = filter; + i->reader->AddFilter( filterid, fieldsV.size(), fields ); return true; } diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out b/testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out new file mode 100644 index 0000000000..5b1ee5e983 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out @@ -0,0 +1,15 @@ +VALID +VALID +VALID +VALID +VALID +VALID +VALID +MARK +VALID +VALID +VALID +VALID +VALID +VALID +VALID diff --git a/testing/btest/scripts/base/frameworks/input/twofilters.bro b/testing/btest/scripts/base/frameworks/input/twofilters.bro new file mode 100644 index 0000000000..575665e6e5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/twofilters.bro @@ -0,0 +1,95 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Log::ID += { LOG }; +} + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global destination1: table[int] of Val = table(); +global destination2: table[int] of Val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::create_stream(A::LOG, [$source="input.log"]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=destination1, $want_record=F, + $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } + ]); + Input::add_tablefilter(A::LOG, [$name="input2",$idx=Idx, $val=Val, $destination=destination2]); + + Input::force_update(A::LOG); + if ( 1 in destination1 ) { + print "VALID"; + } + if ( 2 in destination1 ) { + print "VALID"; + } + if ( !(3 in destination1) ) { + print "VALID"; + } + if ( !(4 in destination1) ) { + print "VALID"; + } + if ( !(5 in destination1) ) { + print "VALID"; + } + if ( !(6 in destination1) ) { + print "VALID"; + } + if ( 7 in destination1 ) { + print "VALID"; + } + + print "MARK"; + + if ( 2 in destination2 ) { + print "VALID"; + } + if ( 2 in destination2 ) { + print "VALID"; + } + if ( 3 in destination2 ) { + print "VALID"; + } + if ( 4 in destination2 ) { + print "VALID"; + } + if ( 5 in destination2 ) { + print "VALID"; + } + if ( 6 in destination2 ) { + print "VALID"; + } + if ( 7 in destination2 ) { + print "VALID"; + } + + +} From f82bf3f35fb5f1e298366f9af6d9ec0372ac3d58 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 22 Nov 2011 11:09:06 -0800 Subject: [PATCH 052/651] re-enable direct event sending from input readers --- src/InputMgr.cc | 18 +++++++++++++----- src/InputMgr.h | 3 ++- src/InputReader.cc | 7 +++---- src/InputReader.h | 2 +- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 2ec3c649be..e6d739d26f 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -810,22 +810,30 @@ void InputMgr::Error(InputReader* reader, const char* msg) reporter->Error("error with input reader for %s: %s", reader->Source().c_str(), msg); } -/* Does not work atm, because LogValToVal needs BroType -void InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) +bool InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) { EventHandler* handler = event_registry->Lookup(name.c_str()); if ( handler == 0 ) { reporter->Error("Event %s not found", name.c_str()); - return; + return false; + } + + RecordType *type = handler->FType()->Args(); + int num_event_vals = type->NumFields(); + if ( num_vals != num_event_vals ) { + reporter->Error("Wrong number of values for event %s", name.c_str()); + return false; } val_list* vl = new val_list; for ( int i = 0; i < num_vals; i++) { - vl->append(LogValToVal(vals[i])); + vl->append(LogValToVal(vals[i], type->FieldType(i))); } mgr.Dispatch(new Event(handler, vl)); -} */ + + return true; +} void InputMgr::SendEvent(EventHandlerPtr ev, EnumVal* event, Val* left, Val* right) { diff --git a/src/InputMgr.h b/src/InputMgr.h index d04b7c9a2c..ba1ddafc92 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -59,7 +59,8 @@ private: Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); Val* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); - //void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + bool SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + ReaderInfo* FindReader(const InputReader* reader); ReaderInfo* FindReader(const EnumVal* id); diff --git a/src/InputReader.cc b/src/InputReader.cc index 1c65985fd6..3f296dc0aa 100644 --- a/src/InputReader.cc +++ b/src/InputReader.cc @@ -71,11 +71,10 @@ bool InputReader::Update() return DoUpdate(); } -/* -void InputReader::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) +bool InputReader::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) { - input_mgr->SendEvent(name, num_vals, vals); -} */ + return input_mgr->SendEvent(name, num_vals, vals); +} // stolen from logwriter const char* InputReader::Fmt(const char* format, ...) diff --git a/src/InputReader.h b/src/InputReader.h index 6e3d689750..34d549308e 100644 --- a/src/InputReader.h +++ b/src/InputReader.h @@ -48,7 +48,7 @@ protected: // A thread-safe version of fmt(). (stolen from logwriter) const char* Fmt(const char* format, ...); - //void SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + bool SendEvent(const string& name, const int num_vals, const LogVal* const *vals); // Content-sendinf-functions (simple mode). Including table-specific stuff that simply is not used if we have no table void Put(int id, const LogVal* const *val); From 3c40f00a539e5d3d2945ffd1df71e19e93c7133d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 22 Nov 2011 11:39:27 -0800 Subject: [PATCH 053/651] make filters pointers (for inheritance) --- scripts/base/frameworks/input/main.bro | 29 +- src/InputMgr.cc | 519 +++++++++++++----- src/InputMgr.h | 21 +- src/input.bif | 13 + .../out | 21 + .../scripts/base/frameworks/input/event.bro | 16 +- .../base/frameworks/input/tableevent.bro | 48 ++ 7 files changed, 507 insertions(+), 160 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out create mode 100644 testing/btest/scripts/base/frameworks/input/tableevent.bro diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 69b4d41ebb..a036eeeebd 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -23,10 +23,19 @@ export { ## decision function, that decides if an insertion, update or removal should really be executed. ## or events should be thought pred: function(typ: Input::Event, left: any, right: any): bool &optional; + }; - ## for "normalized" events - # ev: any &optional; - # ev_description: any &optional; + type EventFilter: record { + ## descriptive name. for later removal + name: string; + + # the event + ev: any; + # record describing the fields + fields: any; + + # does the event want the field unrolled (default) or as a simple record value? + want_record: bool &default=F; }; #const no_filter: Filter = [$name="", $idx="", $val="", $destination=""]; # Sentinel. @@ -36,6 +45,8 @@ export { global force_update: function(id: Log::ID) : bool; global add_tablefilter: function(id: Log::ID, filter: Input::TableFilter) : bool; global remove_tablefilter: function(id: Log::ID, name: string) : bool; + global add_eventfilter: function(id: Log::ID, filter: Input::EventFilter) : bool; + global remove_eventfilter: function(id: Log::ID, name: string) : bool; #global get_filter: function(id: ID, name: string) : Filter; } @@ -74,6 +85,18 @@ function remove_tablefilter(id: Log::ID, name: string) : bool return __remove_tablefilter(id, name); } +function add_eventfilter(id: Log::ID, filter: Input::EventFilter) : bool + { +# filters[id, filter$name] = filter; + return __add_eventfilter(id, filter); + } + +function remove_eventfilter(id: Log::ID, name: string) : bool + { +# delete filters[id, name]; + return __remove_eventfilter(id, name); + } + #function get_filter(id: ID, name: string) : Filter # { # if ( [id, name] in filters ) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index e6d739d26f..0df11ea359 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -28,7 +28,13 @@ public: EnumVal* id; string name; - //int filter_type; // to distinguish between event and table filters + FilterType filter_type; // to distinguish between event and table filters + + virtual ~Filter(); +}; + +class InputMgr::TableFilter: public InputMgr::Filter { +public: unsigned int num_idx_fields; unsigned int num_val_fields; @@ -45,61 +51,42 @@ public: Func* pred; EventHandlerPtr event; - RecordType* event_type; - // ~Filter(); - // Filter(); - // Filter(const InputMgr::Filter& filter); - - void DoCleanup(); + TableFilter(); + ~TableFilter(); }; -/* -InputMgr::Filter::Filter() { +class InputMgr::EventFilter: public InputMgr::Filter { +public: + EventHandlerPtr event; + + RecordType* fields; + unsigned int num_fields; + + bool want_record; + EventFilter(); +}; + +InputMgr::TableFilter::TableFilter() { + filter_type = TABLE_FILTER; + tab = 0; itype = 0; rtype = 0; - event_type = 0; } -InputMgr::Filter::Filter(const InputMgr::Filter& f) { - id = f.id; - id->Ref(); +InputMgr::EventFilter::EventFilter() { + filter_type = EVENT_FILTER; +} - tab = f.tab; - if ( tab ) - tab->Ref(); - - itype = f.itype; - if ( itype ) - itype->Ref(); - - rtype = f.rtype; - if ( rtype ) - Ref(rtype); - - event_type = f.event_type; - if ( event_type ) - Ref(event_type); - - name = f.name; - num_idx_fields = f.num_idx_fields; - num_val_fields = f.num_val_fields; - want_record = f.want_record; - - -} */ - -void InputMgr::Filter::DoCleanup() { +InputMgr::Filter::~Filter() { Unref(id); - if ( tab ) - Unref(tab); - if ( itype ) - Unref(itype); - if ( rtype ) - Unref(rtype); - if ( event_type) - Unref(event_type); +} + +InputMgr::TableFilter::~TableFilter() { + Unref(tab); + Unref(itype); + Unref(rtype); delete currDict; delete lastDict; @@ -111,7 +98,7 @@ struct InputMgr::ReaderInfo { InputReader* reader; //list events; // events we fire when "something" happens - map filters; // filters that can prevent our actions + map filters; // filters that can prevent our actions bool HasFilter(int id); @@ -119,7 +106,11 @@ struct InputMgr::ReaderInfo { }; InputMgr::ReaderInfo::~ReaderInfo() { - // all the contents of filters should delete themselves automatically... + map::iterator it = filters.begin(); + + while ( it != filters.end() ) { + delete (*it).second; + } Unref(type); Unref(id); @@ -128,7 +119,7 @@ InputMgr::ReaderInfo::~ReaderInfo() { } bool InputMgr::ReaderInfo::HasFilter(int id) { - map::iterator it = filters.find(id); + map::iterator it = filters.find(id); if ( it == filters.end() ) { return false; } @@ -236,6 +227,114 @@ InputReader* InputMgr::CreateStream(EnumVal* id, RecordVal* description) } +bool InputMgr::AddEventFilter(EnumVal *id, RecordVal* fval) { + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->Error("Stream not found"); + return false; + } + + RecordType* rtype = fval->Type()->AsRecordType(); + if ( ! same_type(rtype, BifType::Record::Input::EventFilter, 0) ) + { + reporter->Error("filter argument not of right type"); + return false; + } + + Val* name = fval->Lookup(rtype->FieldOffset("name")); + RecordType *fields = fval->Lookup(rtype->FieldOffset("fields"))->AsType()->AsTypeType()->Type()->AsRecordType(); + + Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); + + Val* event_val = fval->Lookup(rtype->FieldOffset("ev")); + Func* event = event_val->AsFunc(); + + { + FuncType* etype = event->FType()->AsFuncType(); + + if ( ! etype->IsEvent() ) { + reporter->Error("stream event is a function, not an event"); + return false; + } + + const type_list* args = etype->ArgTypes()->Types(); + + if ( args->length() < 2 ) { + reporter->Error("event takes not enough arguments"); + return false; + } + + if ( ! same_type((*args)[0], BifType::Enum::Input::Event, 0) ) + { + reporter->Error("events first attribute must be of type Input::Event"); + return false; + } + + if ( want_record->InternalInt() == 0 ) { + if ( args->length() != fields->NumFields() + 1 ) { + reporter->Error("events has wrong number of arguments"); + return false; + } + + for ( int i = 0; i < fields->NumFields(); i++ ) { + if ( !same_type((*args)[i+1], fields->FieldType(i) ) ) { + reporter->Error("Incompatible type for event"); + return false; + } + } + + } else if ( want_record->InternalInt() == 1 ) { + if ( args->length() != 2 ) { + reporter->Error("events has wrong number of arguments"); + return false; + } + + if ( !same_type((*args)[1], fields ) ) { + reporter->Error("Incompatible type for event"); + return false; + } + + } else { + assert(false); + } + + } + + + vector fieldsV; // vector, because UnrollRecordType needs it + + bool status = !UnrollRecordType(&fieldsV, fields, ""); + + if ( status ) { + reporter->Error("Problem unrolling"); + return false; + } + + + LogField** logf = new LogField*[fieldsV.size()]; + for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { + logf[i] = fieldsV[i]; + } + + EventFilter* filter = new EventFilter(); + filter->name = name->AsString()->CheckString(); + filter->id = id->Ref()->AsEnumVal(); + filter->num_fields = fieldsV.size(); + filter->fields = fields->Ref()->AsRecordType(); + filter->event = event_registry->Lookup(event->GetID()->Name()); + filter->want_record = ( want_record->InternalInt() == 1 ); + Unref(want_record); // ref'd by lookupwithdefault + + int filterid = 0; + if ( i->filters.size() > 0 ) { + filterid = i->filters.rbegin()->first + 1; // largest element is at beginning of map-> new id = old id + 1-> + } + i->filters[filterid] = filter; + i->reader->AddFilter( filterid, fieldsV.size(), logf ); + + return true; +} + bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { ReaderInfo *i = FindReader(id); if ( i == 0 ) { @@ -299,6 +398,7 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { reporter->Error("table events value attribute does not match"); return false; } + assert(want_record->InternalInt() == 1 || want_record->InternalInt() == 0); } @@ -322,28 +422,28 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { fields[i] = fieldsV[i]; } - Filter filter; - filter.name = name->AsString()->CheckString(); - filter.id = id->Ref()->AsEnumVal(); - filter.pred = pred ? pred->AsFunc() : 0; - filter.num_idx_fields = idxfields; - filter.num_val_fields = valfields; - filter.tab = dst->Ref()->AsTableVal(); - filter.rtype = val->Ref()->AsRecordType(); - filter.itype = idx->Ref()->AsRecordType(); - filter.event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; - filter.currDict = new PDict(InputHash); - filter.lastDict = new PDict(InputHash); - filter.want_record = ( want_record->InternalInt() == 1 ); + TableFilter* filter = new TableFilter(); + filter->name = name->AsString()->CheckString(); + filter->id = id->Ref()->AsEnumVal(); + filter->pred = pred ? pred->AsFunc() : 0; + filter->num_idx_fields = idxfields; + filter->num_val_fields = valfields; + filter->tab = dst->Ref()->AsTableVal(); + filter->rtype = val->Ref()->AsRecordType(); + filter->itype = idx->Ref()->AsRecordType(); + filter->event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; + filter->currDict = new PDict(InputHash); + filter->lastDict = new PDict(InputHash); + filter->want_record = ( want_record->InternalInt() == 1 ); Unref(want_record); // ref'd by lookupwithdefault if ( valfields > 1 ) { - assert(filter.want_record); + assert(filter->want_record); } int filterid = 0; if ( i->filters.size() > 0 ) { - filterid = i->filters.rbegin()->first + 1; // largest element is at beginning of map. new id = old id + 1. + filterid = i->filters.rbegin()->first + 1; // largest element is at beginning of map-> new id = old id + 1-> } i->filters[filterid] = filter; i->reader->AddFilter( filterid, fieldsV.size(), fields ); @@ -478,13 +578,39 @@ bool InputMgr::RemoveTableFilter(EnumVal* id, const string &name) { return false; } - map::iterator it = i->filters.find(id->InternalInt()); + map::iterator it = i->filters.find(id->InternalInt()); if ( it == i->filters.end() ) { return false; } - i->filters[id->InternalInt()].DoCleanup(); + if ( i->filters[id->InternalInt()]->filter_type != TABLE_FILTER ) { + // wrong type; + return false; + } + delete (*it).second; + i->filters.erase(it); + return true; +} + +bool InputMgr::RemoveEventFilter(EnumVal* id, const string &name) { + ReaderInfo *i = FindReader(id); + if ( i == 0 ) { + reporter->Error("Reader not found"); + return false; + } + + map::iterator it = i->filters.find(id->InternalInt()); + if ( it == i->filters.end() ) { + return false; + } + + if ( i->filters[id->InternalInt()]->filter_type != EVENT_FILTER ) { + // wrong type; + return false; + } + + delete (*it).second; i->filters.erase(it); return true; } @@ -524,30 +650,53 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const return; } + if ( !i->HasFilter(id) ) { + reporter->InternalError("Unknown filter"); + return; + } + + if ( i->filters[id]->filter_type == TABLE_FILTER ) { + SendEntryTable(reader, id, vals); + } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { + EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); + SendEventFilterEvent(reader, type, id, vals); + } else { + assert(false); + } + +} + +void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* const *vals) { + ReaderInfo *i = FindReader(reader); + bool updated = false; + assert(i); assert(i->HasFilter(id)); + assert(i->filters[id]->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i->filters[id]; + //reporter->Error("Hashing %d index fields", i->num_idx_fields); - HashKey* idxhash = HashLogVals(i->filters[id].num_idx_fields, vals); + HashKey* idxhash = HashLogVals(filter->num_idx_fields, vals); //reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); //reporter->Error("Hashing %d val fields", i->num_val_fields); - HashKey* valhash = HashLogVals(i->filters[id].num_val_fields, vals+i->filters[id].num_idx_fields); + HashKey* valhash = HashLogVals(filter->num_val_fields, vals+filter->num_idx_fields); //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); //reporter->Error("received entry with idxhash %d and valhash %d", (uint64_t) idxhash->Hash(), (uint64_t) valhash->Hash()); - InputHash *h = i->filters[id].lastDict->Lookup(idxhash); + InputHash *h = filter->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before if ( h->valhash->Hash() == valhash->Hash() ) { // ok, double. - i->filters[id].lastDict->Remove(idxhash); - i->filters[id].currDict->Insert(idxhash, h); + filter->lastDict->Remove(idxhash); + filter->currDict->Insert(idxhash, h); return; } else { // updated - i->filters[id].lastDict->Remove(idxhash); + filter->lastDict->Remove(idxhash); delete(h); updated = true; @@ -555,30 +704,25 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const } - Val* idxval = LogValToIndexVal(i->filters[id].num_idx_fields, i->filters[id].itype, vals); + Val* idxval = LogValToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; - int position = i->filters[id].num_idx_fields; - if ( i->filters[id].num_val_fields == 1 && !i->filters[id].want_record ) { - valval = LogValToVal(vals[position], i->filters[id].rtype->FieldType(0)); + int position = filter->num_idx_fields; + if ( filter->num_val_fields == 1 && !filter->want_record ) { + valval = LogValToVal(vals[position], filter->rtype->FieldType(0)); } else { - RecordVal * r = new RecordVal(i->filters[id].rtype); + RecordVal * r = new RecordVal(filter->rtype); - for ( int j = 0; j < i->filters[id].rtype->NumFields(); j++) { + for ( int j = 0; j < filter->rtype->NumFields(); j++) { Val* val = 0; - if ( i->filters[id].rtype->FieldType(j)->Tag() == TYPE_RECORD ) { - val = LogValToRecordVal(vals, i->filters[id].rtype->FieldType(j)->AsRecordType(), &position); + if ( filter->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { + val = LogValToRecordVal(vals, filter->rtype->FieldType(j)->AsRecordType(), &position); } else { - val = LogValToVal(vals[position], i->filters[id].rtype->FieldType(j)); + val = LogValToVal(vals[position], filter->rtype->FieldType(j)); position++; } - /* if ( val == 0 ) { - reporter->InternalError("conversion error"); - return; - } */ - r->Assign(j,val); } @@ -589,12 +733,12 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const Val* oldval = 0; if ( updated == true ) { // in that case, we need the old value to send the event (if we send an event). - oldval = i->filters[id].tab->Lookup(idxval); + oldval = filter->tab->Lookup(idxval); } // call filter first to determine if we really add / change the entry - if ( i->filters[id].pred ) { + if ( filter->pred ) { EnumVal* ev; Ref(idxval); Ref(valval); @@ -609,18 +753,18 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const vl.append(ev); vl.append(idxval); vl.append(valval); - Val* v = i->filters[id].pred->Call(&vl); + Val* v = filter->pred->Call(&vl); bool result = v->AsBool(); Unref(v); if ( result == false ) { if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... - delete(i->filters[id].currDict->RemoveEntry(idxhash)); + delete(filter->currDict->RemoveEntry(idxhash)); return; } else { // keep old one - i->filters[id].currDict->Insert(idxhash, h); + filter->currDict->Insert(idxhash, h); return; } } @@ -629,24 +773,23 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const //i->tab->Assign(idxval, valval); - HashKey* k = i->filters[id].tab->ComputeHash(idxval); + HashKey* k = filter->tab->ComputeHash(idxval); if ( !k ) { reporter->InternalError("could not hash"); return; } - reporter->Error("assigning"); - i->filters[id].tab->Assign(idxval, k, valval); + filter->tab->Assign(idxval, k, valval); InputHash* ih = new InputHash(); - k = i->filters[id].tab->ComputeHash(idxval); + k = filter->tab->ComputeHash(idxval); ih->idxkey = k; ih->valhash = valhash; //i->tab->Delete(k); - i->filters[id].currDict->Insert(idxhash, ih); + filter->currDict->Insert(idxhash, ih); - if ( i->filters[id].event ) { + if ( filter->event ) { EnumVal* ev; Ref(idxval); @@ -654,11 +797,11 @@ void InputMgr::SendEntry(const InputReader* reader, int id, const LogVal* const ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); Ref(oldval); - SendEvent(i->filters[id].event, ev, idxval, oldval); + SendEvent(filter->event, 3, ev, idxval, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); Ref(valval); - SendEvent(i->filters[id].event, ev, idxval, valval); + SendEvent(filter->event, 3, ev, idxval, valval); } } } @@ -673,25 +816,33 @@ void InputMgr::EndCurrentSend(const InputReader* reader, int id) { assert(i->HasFilter(id)); + if ( i->filters[id]->filter_type == EVENT_FILTER ) { + // nothing to do.. + return; + } + + assert(i->filters[id]->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i->filters[id]; + // lastdict contains all deleted entries and should be empty apart from that - IterCookie *c = i->filters[id].lastDict->InitForIteration(); - i->filters[id].lastDict->MakeRobustCookie(c); + IterCookie *c = filter->lastDict->InitForIteration(); + filter->lastDict->MakeRobustCookie(c); InputHash* ih; HashKey *lastDictIdxKey; //while ( ( ih = i->lastDict->NextEntry(c) ) ) { - while ( ( ih = i->filters[id].lastDict->NextEntry(lastDictIdxKey, c) ) ) { + while ( ( ih = filter->lastDict->NextEntry(lastDictIdxKey, c) ) ) { ListVal * idx; Val *val; - if ( i->filters[id].pred || i->filters[id].event ) { - idx = i->filters[id].tab->RecoverIndex(ih->idxkey); + if ( filter->pred || filter->event ) { + idx = filter->tab->RecoverIndex(ih->idxkey); assert(idx != 0); - val = i->filters[id].tab->Lookup(idx); + val = filter->tab->Lookup(idx); assert(val != 0); } - if ( i->filters[id].pred ) { + if ( filter->pred ) { bool doBreak = false; // ask predicate, if we want to expire this element... @@ -704,37 +855,37 @@ void InputMgr::EndCurrentSend(const InputReader* reader, int id) { vl.append(ev); vl.append(idx); vl.append(val); - Val* v = i->filters[id].pred->Call(&vl); + Val* v = filter->pred->Call(&vl); bool result = v->AsBool(); Unref(v); if ( result == false ) { // Keep it. Hence - we quit and simply go to the next entry of lastDict // ah well - and we have to add the entry to currDict... - i->filters[id].currDict->Insert(lastDictIdxKey, i->filters[id].lastDict->RemoveEntry(lastDictIdxKey)); + filter->currDict->Insert(lastDictIdxKey, filter->lastDict->RemoveEntry(lastDictIdxKey)); continue; } } - if ( i->filters[id].event ) { + if ( filter->event ) { Ref(idx); Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - SendEvent(i->filters[id].event, ev, idx, val); + SendEvent(filter->event, 3, ev, idx, val); } - i->filters[id].tab->Delete(ih->idxkey); - i->filters[id].lastDict->Remove(lastDictIdxKey); // deletex in next line + filter->tab->Delete(ih->idxkey); + filter->lastDict->Remove(lastDictIdxKey); // deletex in next line delete(ih); } - i->filters[id].lastDict->Clear(); // should be empty... but... well... who knows... - delete(i->filters[id].lastDict); + filter->lastDict->Clear(); // should be empty->->-> but->->-> well->->-> who knows->->-> + delete(filter->lastDict); - i->filters[id].lastDict = i->filters[id].currDict; - i->filters[id].currDict = new PDict(InputHash); + filter->lastDict = filter->currDict; + filter->currDict = new PDict(InputHash); } void InputMgr::Put(const InputReader* reader, int id, const LogVal* const *vals) { @@ -744,24 +895,86 @@ void InputMgr::Put(const InputReader* reader, int id, const LogVal* const *vals) return; } + if ( !i->HasFilter(id) ) { + reporter->InternalError("Unknown filter"); + return; + } + + if ( i->filters[id]->filter_type == TABLE_FILTER ) { + PutTable(reader, id, vals); + } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { + EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); + SendEventFilterEvent(reader, type, id, vals); + } else { + assert(false); + } + +} + +void InputMgr::SendEventFilterEvent(const InputReader* reader, EnumVal* type, int id, const LogVal* const *vals) { + ReaderInfo *i = FindReader(reader); + + bool updated = false; + + assert(i); assert(i->HasFilter(id)); - Val* idxval = LogValToIndexVal(i->filters[id].num_idx_fields, i->filters[id].itype, vals); + assert(i->filters[id]->filter_type == EVENT_FILTER); + EventFilter* filter = (EventFilter*) i->filters[id]; + + Val *val; + list out_vals; + // no tracking, send everything with a new event... + //out_vals.push_back(new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event)); + out_vals.push_back(type); + + int position = 0; + if ( filter->want_record ) { + RecordVal * r = LogValToRecordVal(vals, filter->fields, &position); + out_vals.push_back(r); + + } else { + for ( int j = 0; j < filter->fields->NumFields(); j++) { + Val* val = 0; + if ( filter->fields->FieldType(j)->Tag() == TYPE_RECORD ) { + val = LogValToRecordVal(vals, filter->fields->FieldType(j)->AsRecordType(), &position); + } else { + val = LogValToVal(vals[position], filter->fields->FieldType(j)); + position++; + } + out_vals.push_back(val); + } + } + + SendEvent(filter->event, out_vals); + +} + +void InputMgr::PutTable(const InputReader* reader, int id, const LogVal* const *vals) { + ReaderInfo *i = FindReader(reader); + + assert(i); + assert(i->HasFilter(id)); + + assert(i->filters[id]->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i->filters[id]; + + Val* idxval = LogValToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; - int position = i->filters[id].num_idx_fields; - if ( i->filters[id].num_val_fields == 1 && !i->filters[id].want_record ) { - valval = LogValToVal(vals[i->filters[id].num_idx_fields], i->filters[id].rtype->FieldType(i->filters[id].num_idx_fields)); + int position = filter->num_idx_fields; + if ( filter->num_val_fields == 1 && !filter->want_record ) { + valval = LogValToVal(vals[filter->num_idx_fields], filter->rtype->FieldType(filter->num_idx_fields)); } else { - RecordVal * r = new RecordVal(i->filters[id].rtype); + RecordVal * r = new RecordVal(filter->rtype); - for ( int j = 0; j < i->filters[id].rtype->NumFields(); j++) { + for ( int j = 0; j < filter->rtype->NumFields(); j++) { Val* val = 0; - if ( i->filters[id].rtype->FieldType(j)->Tag() == TYPE_RECORD ) { - val = LogValToRecordVal(vals, i->filters[id].rtype->FieldType(j)->AsRecordType(), &position); + if ( filter->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { + val = LogValToRecordVal(vals, filter->rtype->FieldType(j)->AsRecordType(), &position); } else { - val = LogValToVal(vals[position], i->filters[id].rtype->FieldType(j)); + val = LogValToVal(vals[position], filter->rtype->FieldType(j)); position++; } @@ -776,7 +989,7 @@ void InputMgr::Put(const InputReader* reader, int id, const LogVal* const *vals) valval = r; } - i->filters[id].tab->Assign(idxval, valval); + filter->tab->Assign(idxval, valval); } void InputMgr::Clear(const InputReader* reader, int id) { @@ -788,7 +1001,10 @@ void InputMgr::Clear(const InputReader* reader, int id) { assert(i->HasFilter(id)); - i->filters[id].tab->RemoveAll(); + assert(i->filters[id]->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i->filters[id]; + + filter->tab->RemoveAll(); } bool InputMgr::Delete(const InputReader* reader, int id, const LogVal* const *vals) { @@ -800,9 +1016,18 @@ bool InputMgr::Delete(const InputReader* reader, int id, const LogVal* const *va assert(i->HasFilter(id)); - Val* idxval = LogValToIndexVal(i->filters[id].num_idx_fields, i->filters[id].itype, vals); - - return ( i->filters[id].tab->Delete(idxval) != 0 ); + if ( i->filters[id]->filter_type == TABLE_FILTER ) { + TableFilter* filter = (TableFilter*) i->filters[id]; + Val* idxval = LogValToIndexVal(filter->num_idx_fields, filter->itype, vals); + return( filter->tab->Delete(idxval) != 0 ); + } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { + EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + SendEventFilterEvent(reader, type, id, vals); + return true; + } else { + assert(false); + return false; + } } void InputMgr::Error(InputReader* reader, const char* msg) @@ -835,25 +1060,35 @@ bool InputMgr::SendEvent(const string& name, const int num_vals, const LogVal* c return true; } -void InputMgr::SendEvent(EventHandlerPtr ev, EnumVal* event, Val* left, Val* right) +void InputMgr::SendEvent(EventHandlerPtr ev, const int numvals, ...) { - //EventHandler* handler = event_registry->Lookup(name.c_str()); - //if ( handler == 0 ) { - // reporter->Error("Event %s not found", name.c_str()); - // return; - //} - val_list* vl = new val_list; - vl->append(event); - vl->append(left); - vl->append(right); + + va_list lP; + va_start(lP, numvals); + for ( int i = 0; i < numvals; i++ ) + { + vl->append( va_arg(lP, Val*) ); + } + va_end(lP); + + mgr.QueueEvent(ev, vl, SOURCE_LOCAL); +} + +void InputMgr::SendEvent(EventHandlerPtr ev, list events) +{ + val_list* vl = new val_list; + + for ( list::iterator i = events.begin(); i != events.end(); i++ ) { + vl->append( *i ); + } - //mgr.Dispatch(new Event(handler, vl)); mgr.QueueEvent(ev, vl, SOURCE_LOCAL); } -Val* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position) { + +RecordVal* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position) { if ( position == 0 ) { reporter->InternalError("Need position"); return 0; diff --git a/src/InputMgr.h b/src/InputMgr.h index ba1ddafc92..cebed231e4 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -27,6 +27,8 @@ public: bool AddTableFilter(EnumVal *id, RecordVal* filter); bool RemoveTableFilter(EnumVal* id, const string &name); + bool AddEventFilter(EnumVal *id, RecordVal* filter); + bool RemoveEventFilter(EnumVal* id, const string &name); protected: friend class InputReader; @@ -46,10 +48,17 @@ protected: private: struct ReaderInfo; + void SendEntryTable(const InputReader* reader, int id, const LogVal* const *vals); + void PutTable(const InputReader* reader, int id, const LogVal* const *vals); + void SendEventFilterEvent(const InputReader* reader, EnumVal* type, int id, const LogVal* const *vals); + bool IsCompatibleType(BroType* t, bool atomic_only=false); bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); - void SendEvent(EventHandlerPtr ev, EnumVal* event, Val* left, Val* right); + + void SendEvent(EventHandlerPtr ev, const int numvals, ...); + void SendEvent(EventHandlerPtr ev, list events); + bool SendEvent(const string& name, const int num_vals, const LogVal* const *vals); HashKey* HashLogVals(const int num_elements, const LogVal* const *vals); int GetLogValLength(const LogVal* val); @@ -57,9 +66,8 @@ private: Val* LogValToVal(const LogVal* val, BroType* request_type); Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); - Val* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); + RecordVal* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); - bool SendEvent(const string& name, const int num_vals, const LogVal* const *vals); ReaderInfo* FindReader(const InputReader* reader); ReaderInfo* FindReader(const EnumVal* id); @@ -68,7 +76,12 @@ private: string Hash(const string &input); - struct Filter; + class Filter; + class TableFilter; + class EventFilter; + + enum FilterType { TABLE_FILTER, EVENT_FILTER }; + }; diff --git a/src/input.bif b/src/input.bif index 1300f91bea..b1d57b1df6 100644 --- a/src/input.bif +++ b/src/input.bif @@ -9,6 +9,7 @@ module Input; type StreamDescription: record; type TableFilter: record; +type EventFilter: record; function Input::__create_stream%(id: Log::ID, description: Input::StreamDescription%) : bool %{ @@ -40,6 +41,18 @@ function Input::__remove_tablefilter%(id: Log::ID, name: string%) : bool return new Val( res, TYPE_BOOL); %} +function Input::__add_eventfilter%(id: Log::ID, filter: Input::EventFilter%) : bool + %{ + bool res = input_mgr->AddEventFilter(id->AsEnumVal(), filter->AsRecordVal()); + return new Val( res, TYPE_BOOL ); + %} + +function Input::__remove_eventfilter%(id: Log::ID, name: string%) : bool + %{ + bool res = input_mgr->RemoveEventFilter(id->AsEnumVal(), name->AsString()->CheckString()); + return new Val( res, TYPE_BOOL); + %} + # Options for Ascii Reader module InputAscii; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out new file mode 100644 index 0000000000..e32a2aea00 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out @@ -0,0 +1,21 @@ +Input::EVENT_NEW +1 +T +Input::EVENT_NEW +2 +T +Input::EVENT_NEW +3 +F +Input::EVENT_NEW +4 +F +Input::EVENT_NEW +5 +F +Input::EVENT_NEW +6 +F +Input::EVENT_NEW +7 +T diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro index 36e8171689..d9be733a1b 100644 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -16,7 +16,6 @@ 7 T @TEST-END-FILE -redef InputAscii::empty_field = "EMPTY"; module A; @@ -24,25 +23,20 @@ export { redef enum Log::ID += { LOG }; } -type Idx: record { - i: int; -}; - type Val: record { + i: int; b: bool; }; -global destination: table[int] of Val = table(); - -event line(tpe: Input::Event, left: Idx, right: bool) { +event line(tpe: Input::Event, i: int, b: bool) { print tpe; - print left; - print right; + print i; + print b; } event bro_init() { Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); + Input::add_eventfilter(A::LOG, [$name="input", $fields=Val, $ev=line]); Input::force_update(A::LOG); } diff --git a/testing/btest/scripts/base/frameworks/input/tableevent.bro b/testing/btest/scripts/base/frameworks/input/tableevent.bro new file mode 100644 index 0000000000..36e8171689 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/tableevent.bro @@ -0,0 +1,48 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Log::ID += { LOG }; +} + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global destination: table[int] of Val = table(); + +event line(tpe: Input::Event, left: Idx, right: bool) { + print tpe; + print left; + print right; +} + +event bro_init() +{ + Input::create_stream(A::LOG, [$source="input.log"]); + Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); + Input::force_update(A::LOG); +} From be1b3ce5e1f239861013517b8d8c5a5fafbdf0bd Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 28 Nov 2011 13:29:02 -0600 Subject: [PATCH 054/651] Add note about independent component releases to Broxygen index. --- doc/index.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/index.rst b/doc/index.rst index ba3df81e7d..0a6b225431 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -46,6 +46,12 @@ Script Reference Other Bro Components -------------------- +The following are snapshots of documentation for components that come +with this version of Bro (|version|). Since they can also be used +independently, see the `download page +`_ for documentation of any +current, independent component releases. + .. toctree:: :maxdepth: 1 From 4975584e01af095cf6a6d10a4c09eb88a3f198e0 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 28 Nov 2011 13:28:44 -0800 Subject: [PATCH 055/651] change Log enum to Input enum. --- scripts/base/frameworks/input/main.bro | 10 +++++----- src/input.bif | 10 +++++----- src/types.bif | 1 - testing/btest/scripts/base/frameworks/input/basic.bro | 8 ++++---- testing/btest/scripts/base/frameworks/input/event.bro | 8 ++++---- .../base/frameworks/input/onecolumn-norecord.bro | 8 ++++---- .../scripts/base/frameworks/input/onecolumn-record.bro | 8 ++++---- .../btest/scripts/base/frameworks/input/predicate.bro | 8 ++++---- .../btest/scripts/base/frameworks/input/twofilters.bro | 10 +++++----- 9 files changed, 35 insertions(+), 36 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index a036eeeebd..1a9e8b885c 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -58,28 +58,28 @@ module Input; #global filters: table[ID, string] of Filter; -function create_stream(id: Log::ID, description: Input::StreamDescription) : bool +function create_stream(id: Input::ID, description: Input::StreamDescription) : bool { return __create_stream(id, description); } -function remove_stream(id: Log::ID) : bool +function remove_stream(id: Input::ID) : bool { return __remove_stream(id); } -function force_update(id: Log::ID) : bool +function force_update(id: Input::ID) : bool { return __force_update(id); } -function add_tablefilter(id: Log::ID, filter: Input::TableFilter) : bool +function add_tablefilter(id: Input::ID, filter: Input::TableFilter) : bool { # filters[id, filter$name] = filter; return __add_tablefilter(id, filter); } -function remove_tablefilter(id: Log::ID, name: string) : bool +function remove_tablefilter(id: Input::ID, name: string) : bool { # delete filters[id, name]; return __remove_tablefilter(id, name); diff --git a/src/input.bif b/src/input.bif index b1d57b1df6..a7d561c060 100644 --- a/src/input.bif +++ b/src/input.bif @@ -11,31 +11,31 @@ type StreamDescription: record; type TableFilter: record; type EventFilter: record; -function Input::__create_stream%(id: Log::ID, description: Input::StreamDescription%) : bool +function Input::__create_stream%(id: Input::ID, description: Input::StreamDescription%) : bool %{ InputReader *the_reader = input_mgr->CreateStream(id->AsEnumVal(), description->AsRecordVal()); return new Val( the_reader != 0, TYPE_BOOL ); %} -function Input::__remove_stream%(id: Log::ID%) : bool +function Input::__remove_stream%(id: Input::ID%) : bool %{ bool res = input_mgr->RemoveStream(id->AsEnumVal()); return new Val( res, TYPE_BOOL ); %} -function Input::__force_update%(id: Log::ID%) : bool +function Input::__force_update%(id: Input::ID%) : bool %{ bool res = input_mgr->ForceUpdate(id->AsEnumVal()); return new Val( res, TYPE_BOOL ); %} -function Input::__add_tablefilter%(id: Log::ID, filter: Input::TableFilter%) : bool +function Input::__add_tablefilter%(id: Input::ID, filter: Input::TableFilter%) : bool %{ bool res = input_mgr->AddTableFilter(id->AsEnumVal(), filter->AsRecordVal()); return new Val( res, TYPE_BOOL ); %} -function Input::__remove_tablefilter%(id: Log::ID, name: string%) : bool +function Input::__remove_tablefilter%(id: Input::ID, name: string%) : bool %{ bool res = input_mgr->RemoveTableFilter(id->AsEnumVal(), name->AsString()->CheckString()); return new Val( res, TYPE_BOOL); diff --git a/src/types.bif b/src/types.bif index f90a954224..15fab1a7b1 100644 --- a/src/types.bif +++ b/src/types.bif @@ -180,7 +180,6 @@ enum Event %{ EVENT_REMOVED, %} - enum ID %{ Unknown, %} diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 3ad45eac69..10cc7376a8 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -15,7 +15,7 @@ redef InputAscii::empty_field = "EMPTY"; module A; export { - redef enum Log::ID += { LOG }; + redef enum Input::ID += { INPUT }; } type Idx: record { @@ -45,8 +45,8 @@ global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); - Input::force_update(A::LOG); + Input::create_stream(A::INPUT, [$source="input.log"]); + Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::force_update(A::INPUT); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro index d9be733a1b..a07f0934a0 100644 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -20,7 +20,7 @@ module A; export { - redef enum Log::ID += { LOG }; + redef enum Input::ID += { INPUT }; } type Val: record { @@ -36,7 +36,7 @@ event line(tpe: Input::Event, i: int, b: bool) { event bro_init() { - Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_eventfilter(A::LOG, [$name="input", $fields=Val, $ev=line]); - Input::force_update(A::LOG); + Input::create_stream(A::INPUT, [$source="input.log"]); + Input::add_eventfilter(A::INPUT, [$name="input", $fields=Val, $ev=line]); + Input::force_update(A::INPUT); } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro index 134ceb49e6..88838cc8d6 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -15,7 +15,7 @@ redef InputAscii::empty_field = "EMPTY"; module A; export { - redef enum Log::ID += { LOG }; + redef enum Input::ID += { INPUT }; } type Idx: record { @@ -31,8 +31,8 @@ global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); - Input::force_update(A::LOG); + Input::create_stream(A::INPUT, [$source="input.log"]); + Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); + Input::force_update(A::INPUT); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro index c07c9c826c..fc4d862cd3 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -15,7 +15,7 @@ redef InputAscii::empty_field = "EMPTY"; module A; export { - redef enum Log::ID += { LOG }; + redef enum Input::ID += { INPUT }; } type Idx: record { @@ -31,8 +31,8 @@ global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=servers]); - Input::force_update(A::LOG); + Input::create_stream(A::INPUT, [$source="input.log"]); + Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers]); + Input::force_update(A::INPUT); print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro index 769536d2a6..5e6bae7b62 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -21,7 +21,7 @@ redef InputAscii::empty_field = "EMPTY"; module A; export { - redef enum Log::ID += { LOG }; + redef enum Input::ID += { INPUT }; } type Idx: record { @@ -37,11 +37,11 @@ global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, + Input::create_stream(A::INPUT, [$source="input.log"]); + Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); - Input::force_update(A::LOG); + Input::force_update(A::INPUT); if ( 1 in servers ) { print "VALID"; } diff --git a/testing/btest/scripts/base/frameworks/input/twofilters.bro b/testing/btest/scripts/base/frameworks/input/twofilters.bro index 575665e6e5..5af664e0e9 100644 --- a/testing/btest/scripts/base/frameworks/input/twofilters.bro +++ b/testing/btest/scripts/base/frameworks/input/twofilters.bro @@ -21,7 +21,7 @@ redef InputAscii::empty_field = "EMPTY"; module A; export { - redef enum Log::ID += { LOG }; + redef enum Input::ID += { INPUT }; } type Idx: record { @@ -38,13 +38,13 @@ global destination2: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=destination1, $want_record=F, + Input::create_stream(A::INPUT, [$source="input.log"]); + Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=destination1, $want_record=F, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); - Input::add_tablefilter(A::LOG, [$name="input2",$idx=Idx, $val=Val, $destination=destination2]); + Input::add_tablefilter(A::INPUT, [$name="input2",$idx=Idx, $val=Val, $destination=destination2]); - Input::force_update(A::LOG); + Input::force_update(A::INPUT); if ( 1 in destination1 ) { print "VALID"; } From 1abb1424b8f1e050f24ffb9a9a7829fd8e10c3f0 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 28 Nov 2011 14:15:04 -0800 Subject: [PATCH 056/651] begin documenting... --- doc/index.rst | 1 + doc/input.rst | 190 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 191 insertions(+) create mode 100644 doc/input.rst diff --git a/doc/index.rst b/doc/index.rst index 0a6b225431..975ee7def4 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -20,6 +20,7 @@ Frameworks notice logging + input cluster signatures diff --git a/doc/input.rst b/doc/input.rst new file mode 100644 index 0000000000..78e96fe06e --- /dev/null +++ b/doc/input.rst @@ -0,0 +1,190 @@ +===================== +Loading Data into Bro +===================== + +.. rst-class:: opening + + Bro comes with a flexible input interface that allows to read + previously stored data. Data is either read into bro tables or + sent to scripts using events. + This document describes how the input framework can be used. + +.. contents:: + +Terminology +=========== + +Bro's input framework is built around three main abstracts, that are +very similar to the abstracts used in the logging framework: + + Input Streams + An input stream corresponds to a single input source + (usually a textfile). It defined the information necessary + to find the source (e.g. the filename) + + Filters + Each input stream has a set of filters attached to it, that + determine exaclty what kind of information is read. + There are two different kind of streams, event streams and table + streams. + By default, event streams generate an event for each line read + from the input source. + Table streams on the other hand read the input source in a bro + table for easy later access. + + Readers + A reader defines the input format for the specific input stream. + At the moment, Bro comes with only one type of reader, which can + read the tab seperated ASCII logfiles that were generated by the + logging framework. + + +Basics +====== + +For examples, please look at the unit tests in +``testing/btest/scripts/base/frameworks/input/``. + +A very basic example to open an input stream is: + +.. code:: bro + + module Foo; + + export { + # Create an ID for our new stream + redef enum Input::ID += { INPUT }; + } + + event bro_init() { + Input::create_stream(FOO::INPUT, [$source="input.log"]); + } + +The fields that can be set when creating a stream are: + + ``source`` + A mandatory string identifying the source of the data. + For the ASCII reader this is the filename. + + ``reader`` + The reader used for this stream. Default is ``READER_ASCII``. + + +Filters +======= + +Each filter defines the data fields that it wants to receive from the respective +input file. Depending on the type of filter, events or a table are created from +the data in the source file. + +Event Filters +------------- + +Event filters are filters that generate an event for each line in of the input source. + +For example, a simple filter retrieving the fields ``i`` and ``b`` from an inputSource +could be defined as follows: + +.. code:: bro + + type Val: record { + i: int; + b: bool; + }; + + event line(tpe: Input::Event, i: int, b: bool) { + # work with event data + } + + event bro_init { + # Input stream definition, etc + ... + + Input::add_eventfilter(Foo::INPUT, [$name="input", $fields=Val, $ev=line]); + + # read the file after all filters have been set + Input::force_update(Foo::INPUT); + } + +The fields that can be set for an event filter are: + + ``name`` + A mandatory name for the filter that can later be used + to manipulate it further. + + ``fields`` + Name of a record type containing the fields, which should be retrieved from + the input stream. + + ``ev`` + The event which is fired, after a line has been read from the input source. + The first argument that is passed to the event is an Input::Event structure, + followed by the data, either inside of a record (if ``want_record is set``) or as + individual fields. + The Input::Event structure can contain information, if the received line is ``NEW``, has + been ``CHANGED`` or ``DELETED``. Singe the ascii reader cannot track this information + for event filters, the value is always ``NEW`` at the moment. + + ``want_record`` + Boolean value, that defines if the event wants to receive the fields inside of + a single record value, or individually (default). + +Table Filters +------------- + +Table filters are the second, more complex type of filter. + +Table filters store the information they read from an input source in a bro table. For example, +when reading a file that contains ip addresses and connection attemt information one could use +an approach similar to this: + +.. code:: bro + + type Idx: record { + a: addr; + }; + + type Val: record { + tries: count; + }; + + global conn_attempts: table[addr] of count = table(); + + event bro_init { + # Input stream definitions, etc. + ... + + Input::add_tablefilter(Foo::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=conn_attempts]); + + # read the file after all filters have been set + Input::force_update(Foo::INPUT); + } + +The table conn_attempts will then contain the information about connection attemps. + +The possible fields that can be set for an table filter are: + + ``name`` + A mandatory name for the filter that can later be used + to manipulate it further. + + ``idx`` + Record type that defines the index of the table + + ``val`` + Record type that defines the values of the table + + ``want_record`` + Defines if the values of the table should be stored as a record (default), + or as a simple value. Has to be set if Val contains more than one element. + + ``destination`` + The destination table + + ``ev`` + Optional event that is raised, when values are added to, changed in or deleted from the table. + Events are passed an Input::Event description as the first argument, the index record as the second argument + and the values as the third argument. + + ``pred`` + Optional predicate, that can prevent entries from being added to the table and events from being sent. From 2a6387129ceb68941beccb866822958c661ab2c4 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 29 Nov 2011 11:25:11 -0800 Subject: [PATCH 057/651] documentation --- doc/scripts/DocSourcesList.cmake | 3 + scripts/base/frameworks/input/main.bro | 96 ++++++++++++++++++++------ 2 files changed, 78 insertions(+), 21 deletions(-) diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index 9d99effc02..bd41d301c0 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -19,6 +19,7 @@ rest_target(${psd} base/init-bare.bro internal) rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/input.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro) @@ -31,6 +32,8 @@ rest_target(${psd} base/frameworks/cluster/setup-connections.bro) rest_target(${psd} base/frameworks/communication/main.bro) rest_target(${psd} base/frameworks/control/main.bro) rest_target(${psd} base/frameworks/dpd/main.bro) +rest_target(${psd} base/frameworks/input/main.bro) +rest_target(${psd} base/frameworks/input/readers/ascii.bro) rest_target(${psd} base/frameworks/intel/main.bro) rest_target(${psd} base/frameworks/logging/main.bro) rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 1a9e8b885c..66b13743b8 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -1,52 +1,106 @@ +##! The input framework provides a way to read previously stored data either +##! as an event stream or into a bro table. module Input; export { + ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; + ## Stream decription type used for the `create_stream` method type StreamDescription: record { + ## String that allows the reader to find the source. + ## For `READER_ASCII`, this is the filename. source: string; + + ## Reader to use for this steam reader: Reader &default=default_reader; }; + ## TableFilter description type used for the `add_tablefilter` method. type TableFilter: record { - ## descriptive name. for later removal + ## Descriptive name. Used to remove a filter at a later time name: string; - ## for tables - idx: any; - val: any; + ## Table which will contain the data read by the input framework destination: any; + ## Record that defines the values used as the index of the table + idx: any; + ## Record that defines the values used as the values of the table + val: any; + ## Defines if the value of the table is a record (default), or a single value. + ## Val can only contain one element when this is set to false. want_record: bool &default=T; + + ## The event that is raised each time a value is added to, changed in or removed from the table. + ## The event will receive an Input::Event enum as the first argument, the idx record as the second argument + ## and the value (record) as the third argument. ev: any &optional; # event containing idx, val as values. - ## decision function, that decides if an insertion, update or removal should really be executed. - ## or events should be thought + ## Predicate function, that can decide if an insertion, update or removal should really be executed. + ## Parameters are the same as for the event. If true is returned, the update is performed. If false + ## is returned, it is skipped pred: function(typ: Input::Event, left: any, right: any): bool &optional; }; + ## EventFilter description type used for the `add_eventfilter` method. type EventFilter: record { - ## descriptive name. for later removal + ## Descriptive name. Used to remove a filter at a later time name: string; - # the event - ev: any; - # record describing the fields + ## Record describing the fields to be retrieved from the source input. fields: any; - - # does the event want the field unrolled (default) or as a simple record value? + ## If want_record if false (default), the event receives each value in fields as a seperate argument. + ## If it is set to true, the event receives all fields in a signle record value. want_record: bool &default=F; + + ## The event that is rised each time a new line is received from the reader. + ## The event will receive an Input::Event enum as the first element, and the fields as the following arguments. + ev: any; + }; #const no_filter: Filter = [$name="", $idx="", $val="", $destination=""]; # Sentinel. - global create_stream: function(id: Log::ID, description: Input::StreamDescription) : bool; - global remove_stream: function(id: Log::ID) : bool; - global force_update: function(id: Log::ID) : bool; - global add_tablefilter: function(id: Log::ID, filter: Input::TableFilter) : bool; - global remove_tablefilter: function(id: Log::ID, name: string) : bool; - global add_eventfilter: function(id: Log::ID, filter: Input::EventFilter) : bool; - global remove_eventfilter: function(id: Log::ID, name: string) : bool; + ## Create a new input stream from a given source. Returns true on success. + ## + ## id: `Input::ID` enum value identifying this stream + ## description: `StreamDescription` record describing the source. + global create_stream: function(id: Input::ID, description: Input::StreamDescription) : bool; + + ## Remove a current input stream. Returns true on success. + ## + ## id: `Input::ID` enum value identifying the stream to be removed + global remove_stream: function(id: Input::ID) : bool; + + ## Forces the current input to be checked for changes. + ## + ## id: `Input::ID` enum value identifying the stream + global force_update: function(id: Input::ID) : bool; + + ## Adds a table filter to a specific input stream. Returns true on success. + ## + ## id: `Input::ID` enum value identifying the stream + ## filter: the `TableFilter` record describing the filter. + global add_tablefilter: function(id: Input::ID, filter: Input::TableFilter) : bool; + + ## Removes a named table filter to a specific input stream. Returns true on success. + ## + ## id: `Input::ID` enum value identifying the stream + ## name: the name of the filter to be removed. + global remove_tablefilter: function(id: Input::ID, name: string) : bool; + + ## Adds an event filter to a specific input stream. Returns true on success. + ## + ## id: `Input::ID` enum value identifying the stream + ## filter: the `EventFilter` record describing the filter. + global add_eventfilter: function(id: Input::ID, filter: Input::EventFilter) : bool; + + ## Removes a named event filter to a specific input stream. Returns true on success. + ## + ## id: `Input::ID` enum value identifying the stream + ## name: the name of the filter to be removed. + global remove_eventfilter: function(id: Input::ID, name: string) : bool; #global get_filter: function(id: ID, name: string) : Filter; } @@ -85,13 +139,13 @@ function remove_tablefilter(id: Input::ID, name: string) : bool return __remove_tablefilter(id, name); } -function add_eventfilter(id: Log::ID, filter: Input::EventFilter) : bool +function add_eventfilter(id: Input::ID, filter: Input::EventFilter) : bool { # filters[id, filter$name] = filter; return __add_eventfilter(id, filter); } -function remove_eventfilter(id: Log::ID, name: string) : bool +function remove_eventfilter(id: Input::ID, name: string) : bool { # delete filters[id, name]; return __remove_eventfilter(id, name); From a68e6b9fa49211a7c51c45f83ec0c610f03a956d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 29 Nov 2011 14:32:53 -0800 Subject: [PATCH 058/651] allow sets to be read from files, convenience function for reading a file once, bug in destructor that could lead to a segfault. --- scripts/base/frameworks/input/main.bro | 34 +++++++++++- src/InputMgr.cc | 54 ++++++++++++++----- .../scripts/base/frameworks/input/basic.bro | 2 + 3 files changed, 75 insertions(+), 15 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 66b13743b8..c76eba80b9 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -4,6 +4,8 @@ module Input; export { + redef enum Input::ID += { TABLE_READ }; + ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; @@ -27,7 +29,8 @@ export { ## Record that defines the values used as the index of the table idx: any; ## Record that defines the values used as the values of the table - val: any; + ## If val is undefined, destination has to be a set. + val: any &optional; ## Defines if the value of the table is a record (default), or a single value. ## Val can only contain one element when this is set to false. want_record: bool &default=T; @@ -102,6 +105,14 @@ export { ## name: the name of the filter to be removed. global remove_eventfilter: function(id: Input::ID, name: string) : bool; #global get_filter: function(id: ID, name: string) : Filter; + + ## Convenience function for reading a specific input source exactly once using + ## exactly one tablefilter + ## + ## id: `Input::ID` enum value identifying the stream + ## description: `StreamDescription` record describing the source. + ## filter: the `TableFilter` record describing the filter. + global read_table: function(description: Input::StreamDescription, filter: Input::TableFilter) : bool; } @@ -151,6 +162,27 @@ function remove_eventfilter(id: Input::ID, name: string) : bool return __remove_eventfilter(id, name); } +function read_table(description: Input::StreamDescription, filter: Input::TableFilter) : bool { + local ok: bool = T; + # since we create and delete it ourselves this should be ok... at least for singlethreaded operation + local id: Input::ID = Input::TABLE_READ; + + ok = create_stream(id, description); + if ( ok ) { + ok = add_tablefilter(id, filter); + } + if ( ok ) { + ok = force_update(id); + } + if ( ok ) { + ok = remove_stream(id); + } else { + remove_stream(id); + } + + return ok; +} + #function get_filter(id: ID, name: string) : Filter # { # if ( [id, name] in filters ) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 0df11ea359..7ec5974199 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -86,7 +86,8 @@ InputMgr::Filter::~Filter() { InputMgr::TableFilter::~TableFilter() { Unref(tab); Unref(itype); - Unref(rtype); + if ( rtype ) // can be 0 for sets + Unref(rtype); delete currDict; delete lastDict; @@ -110,6 +111,7 @@ InputMgr::ReaderInfo::~ReaderInfo() { while ( it != filters.end() ) { delete (*it).second; + ++it; } Unref(type); @@ -354,7 +356,10 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { Val* pred = fval->Lookup(rtype->FieldOffset("pred")); RecordType *idx = fval->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); - RecordType *val = fval->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); + RecordType *val = 0; + if ( fval->Lookup(rtype->FieldOffset("val")) != 0 ) { + val = fval->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); + } TableVal *dst = fval->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); @@ -408,9 +413,14 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { int idxfields = fieldsV.size(); - status = status || !UnrollRecordType(&fieldsV, val, ""); + if ( val ) // if we are not a set + status = status || !UnrollRecordType(&fieldsV, val, ""); + int valfields = fieldsV.size() - idxfields; + if ( !val ) + assert(valfields == 0); + if ( status ) { reporter->Error("Problem unrolling"); return false; @@ -429,7 +439,7 @@ bool InputMgr::AddTableFilter(EnumVal *id, RecordVal* fval) { filter->num_idx_fields = idxfields; filter->num_val_fields = valfields; filter->tab = dst->Ref()->AsTableVal(); - filter->rtype = val->Ref()->AsRecordType(); + filter->rtype = val ? val->Ref()->AsRecordType() : 0; filter->itype = idx->Ref()->AsRecordType(); filter->event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; filter->currDict = new PDict(InputHash); @@ -681,7 +691,10 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c HashKey* idxhash = HashLogVals(filter->num_idx_fields, vals); //reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); //reporter->Error("Hashing %d val fields", i->num_val_fields); - HashKey* valhash = HashLogVals(filter->num_val_fields, vals+filter->num_idx_fields); + HashKey* valhash = 0; + if ( filter->num_val_fields > 0 ) + HashLogVals(filter->num_val_fields, vals+filter->num_idx_fields); + //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); //reporter->Error("received entry with idxhash %d and valhash %d", (uint64_t) idxhash->Hash(), (uint64_t) valhash->Hash()); @@ -689,12 +702,13 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c InputHash *h = filter->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before - if ( h->valhash->Hash() == valhash->Hash() ) { - // ok, double. + if ( filter->num_val_fields == 0 || h->valhash->Hash() == valhash->Hash() ) { + // ok, exact duplicate filter->lastDict->Remove(idxhash); filter->currDict->Insert(idxhash, h); return; } else { + assert( filter->num_val_fields > 0 ); // updated filter->lastDict->Remove(idxhash); delete(h); @@ -708,7 +722,9 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c Val* valval; int position = filter->num_idx_fields; - if ( filter->num_val_fields == 1 && !filter->want_record ) { + if ( filter->num_val_fields == 0 ) { + valval = 0; + } else if ( filter->num_val_fields == 1 && !filter->want_record ) { valval = LogValToVal(vals[position], filter->rtype->FieldType(0)); } else { RecordVal * r = new RecordVal(filter->rtype); @@ -732,8 +748,9 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c Val* oldval = 0; if ( updated == true ) { - // in that case, we need the old value to send the event (if we send an event). - oldval = filter->tab->Lookup(idxval); + assert(filter->num_val_fields > 0); + // in that case, we need the old value to send the event (if we send an event). + oldval = filter->tab->Lookup(idxval); } @@ -749,10 +766,12 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); } - val_list vl(3); + val_list vl( 2 + (filter->num_val_fields > 0) ); // 2 if we don't have values, 3 otherwise. vl.append(ev); vl.append(idxval); - vl.append(valval); + if ( filter->num_val_fields > 0 ) + vl.append(valval); + Val* v = filter->pred->Call(&vl); bool result = v->AsBool(); Unref(v); @@ -794,6 +813,7 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c Ref(idxval); if ( updated ) { // in case of update send back the old value. + assert ( filter->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); Ref(oldval); @@ -801,7 +821,11 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); Ref(valval); - SendEvent(filter->event, 3, ev, idxval, valval); + if ( filter->num_val_fields == 0 ) { + SendEvent(filter->event, 3, ev, idxval); + } else { + SendEvent(filter->event, 3, ev, idxval, valval); + } } } } @@ -963,7 +987,9 @@ void InputMgr::PutTable(const InputReader* reader, int id, const LogVal* const * Val* valval; int position = filter->num_idx_fields; - if ( filter->num_val_fields == 1 && !filter->want_record ) { + if ( filter->num_val_fields == 0 ) { + valval = 0; + } else if ( filter->num_val_fields == 1 && !filter->want_record ) { valval = LogValToVal(vals[filter->num_idx_fields], filter->rtype->FieldType(filter->num_idx_fields)); } else { RecordVal * r = new RecordVal(filter->rtype); diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 10cc7376a8..d1b6659eb6 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -49,4 +49,6 @@ event bro_init() Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); Input::force_update(A::INPUT); print servers; + Input::remove_tablefilter(A::INPUT, "ssh"); + Input::remove_stream(A::INPUT); } From 78b24da7e4f84de7f499cc9365a30e7c966a7d14 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 5 Dec 2011 15:02:03 -0800 Subject: [PATCH 059/651] start support for annotation for log field types. commit before rolling part of it back... --- src/Attr.h | 1 + src/InputMgr.cc | 10 ++++++++++ src/InputReaderAscii.h | 3 +++ src/LogMgr.cc | 8 +++++--- src/LogMgr.h | 6 ++++-- src/parse.y | 5 ++++- src/scan.l | 1 + 7 files changed, 28 insertions(+), 6 deletions(-) diff --git a/src/Attr.h b/src/Attr.h index 6c835dc61c..471acfe4ba 100644 --- a/src/Attr.h +++ b/src/Attr.h @@ -35,6 +35,7 @@ typedef enum { ATTR_GROUP, ATTR_LOG, ATTR_ERROR_HANDLER, + ATTR_TYPE_COLUMN, // for input framework ATTR_TRACKED, // hidden attribute, tracked by NotifierRegistry #define NUM_ATTRS (int(ATTR_TRACKED) + 1) } attr_tag; diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 7ec5974199..0cfb59ee90 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -561,6 +561,16 @@ bool InputMgr::UnrollRecordType(vector *fields, const RecordType *rec field->subtype = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); } else if ( field->type == TYPE_VECTOR ) { field->subtype = rec->FieldType(i)->AsVectorType()->YieldType()->Tag(); + } else if ( field->type == TYPE_PORT && + rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN) ) { + // we have an annotation for the second column + + Val* c = rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN)->AttrExpr()->Eval(0); + + assert(c); + assert(c->Type()->Tag() == TYPE_STRING); + + field->secondary_name = c->AsStringVal()->AsString()->CheckString(); } fields->push_back(field); diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index 01169a3cfc..c174248454 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -12,8 +12,11 @@ struct FieldMapping { string name; TypeTag type; + // internal type for sets and vectors TypeTag subtype; int position; + // for ports: pos of the second field + int secondary_position; FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position); diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 7fc8d4ef86..dd4f9c8e5a 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -83,7 +83,8 @@ bool LogField::Read(SerializationFormat* fmt) int t; int it; - bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type") && fmt->Read(&it, "subtype") ); + bool success = (fmt->Read(&name, "name") && fmt->Read(&secondary_name, "secondary_name") && + fmt->Read(&t, "type") && fmt->Read(&it, "subtype") ); type = (TypeTag) t; subtype = (TypeTag) it; @@ -92,7 +93,8 @@ bool LogField::Read(SerializationFormat* fmt) bool LogField::Write(SerializationFormat* fmt) const { - return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); + return (fmt->Write(name, "name") && fmt->Write(secondary_name, "secondary_name") && fmt->Write((int)type, "type") && + fmt->Write((int)subtype, "subtype")); } LogVal::~LogVal() @@ -151,7 +153,7 @@ bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) if ( ! t->IsSet() ) return false; - return IsCompatibleType(t->AsSetType()->Indices()->PureType(), true); + return IsCompatibleType(t->AsSetType()->Indices()->PureType()); } case TYPE_VECTOR: diff --git a/src/LogMgr.h b/src/LogMgr.h index b8530d29ab..4ccdeb793c 100644 --- a/src/LogMgr.h +++ b/src/LogMgr.h @@ -14,13 +14,15 @@ class SerializationFormat; // Description of a log field. struct LogField { string name; + // needed by input framework. port fields have two names (one for the port, one for the type) - this specifies the secondary name. + string secondary_name; TypeTag type; - // needed by input framework. otherwise it cannot determine the inner type of a set or vector. + // needed by input framework. otherwise it cannot determine the inner type of a set. TypeTag subtype; LogField() { } LogField(const LogField& other) - : name(other.name), type(other.type), subtype(other.subtype) { } + : name(other.name), secondary_name(other.secondary_name), type(other.type), subtype(other.subtype) { } // (Un-)serialize. bool Read(SerializationFormat* fmt); diff --git a/src/parse.y b/src/parse.y index 495931aae0..988a19714b 100644 --- a/src/parse.y +++ b/src/parse.y @@ -2,7 +2,7 @@ // See the file "COPYING" in the main distribution directory for copyright. %} -%expect 88 +%expect 91 %token TOK_ADD TOK_ADD_TO TOK_ADDR TOK_ANY %token TOK_ATENDIF TOK_ATELSE TOK_ATIF TOK_ATIFDEF TOK_ATIFNDEF @@ -24,6 +24,7 @@ %token TOK_ATTR_PERSISTENT TOK_ATTR_SYNCHRONIZED %token TOK_ATTR_DISABLE_PRINT_HOOK TOK_ATTR_RAW_OUTPUT TOK_ATTR_MERGEABLE %token TOK_ATTR_PRIORITY TOK_ATTR_GROUP TOK_ATTR_LOG TOK_ATTR_ERROR_HANDLER +%token TOK_ATTR_TYPE_COLUMN %token TOK_DEBUG @@ -1313,6 +1314,8 @@ attr: { $$ = new Attr(ATTR_PRIORITY, $3); } | TOK_ATTR_GROUP '=' expr { $$ = new Attr(ATTR_GROUP, $3); } + | TOK_ATTR_TYPE_COLUMN '=' expr + { $$ = new Attr(ATTR_TYPE_COLUMN, $3); } | TOK_ATTR_LOG { $$ = new Attr(ATTR_LOG); } | TOK_ATTR_ERROR_HANDLER diff --git a/src/scan.l b/src/scan.l index 7ebd7894e1..6a85e89780 100644 --- a/src/scan.l +++ b/src/scan.l @@ -308,6 +308,7 @@ when return TOK_WHEN; &optional return TOK_ATTR_OPTIONAL; &persistent return TOK_ATTR_PERSISTENT; &priority return TOK_ATTR_PRIORITY; +&type_column return TOK_ATTR_TYPE_COLUMN; &read_expire return TOK_ATTR_EXPIRE_READ; &redef return TOK_ATTR_REDEF; &rotate_interval return TOK_ATTR_ROTATE_INTERVAL; From aecbbdd966c896b475591a3415578adc1629273e Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 5 Dec 2011 16:18:54 -0800 Subject: [PATCH 060/651] make logging framework send the protocol to the writer. for use in future writers, that have a special type for port, which includes the protocol. --- src/LogMgr.cc | 32 +++++++++++++++++++++++++++++--- src/LogMgr.h | 4 ++++ src/LogWriterAscii.cc | 5 ++++- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/src/LogMgr.cc b/src/LogMgr.cc index dd4f9c8e5a..ed32e4e40b 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -118,6 +118,10 @@ LogVal::~LogVal() delete [] val.vector_val.vals; } + +// if ( type == TYPE_PORT && present ) +// delete val.port_val.proto; + } bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) @@ -190,9 +194,12 @@ bool LogVal::Read(SerializationFormat* fmt) case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: return fmt->Read(&val.uint_val, "uint"); + case TYPE_PORT: + val.port_val.proto = new string; + return fmt->Read(&val.port_val.port, "port") && fmt->Read(val.port_val.proto, "proto"); + case TYPE_SUBNET: { uint32 net[4]; @@ -305,9 +312,11 @@ bool LogVal::Write(SerializationFormat* fmt) const case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: return fmt->Write(val.uint_val, "uint"); + case TYPE_PORT: + return fmt->Write(val.port_val.port, "port") && fmt->Write(*val.port_val.proto, "proto"); + case TYPE_SUBNET: { uint32 net[4]; @@ -1066,6 +1075,22 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) return true; } +string LogMgr::TransportProtoToString(TransportProto p) { + switch ( p ) { + case TRANSPORT_UNKNOWN: + return "unknown"; + case TRANSPORT_TCP: + return "tcp"; + case TRANSPORT_UDP: + return "udp"; + case TRANSPORT_ICMP: + return "icmp"; + } + + assert(false); + return ""; +} + LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) { if ( ! ty ) @@ -1097,7 +1122,8 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) break; case TYPE_PORT: - lval->val.uint_val = val->AsPortVal()->Port(); + lval->val.port_val.port = val->AsPortVal()->Port(); + lval->val.port_val.proto = new string(TransportProtoToString(val->AsPortVal()->PortType())); break; case TYPE_SUBNET: diff --git a/src/LogMgr.h b/src/LogMgr.h index 4ccdeb793c..d9d61236e0 100644 --- a/src/LogMgr.h +++ b/src/LogMgr.h @@ -38,10 +38,12 @@ struct LogVal { // types we can log directly. struct set_t { bro_int_t size; LogVal** vals; }; typedef set_t vec_t; + struct port_t { bro_uint_t port; string* proto; }; union _val { bro_int_t int_val; bro_uint_t uint_val; + port_t port_val; uint32 addr_val[NUM_ADDR_WORDS]; subnet_type subnet_val; double double_val; @@ -136,6 +138,8 @@ private: Filter* FindFilter(EnumVal* id, StringVal* filter); WriterInfo* FindWriter(LogWriter* writer); + string TransportProtoToString(TransportProto p); + vector streams; // Indexed by stream enum. }; diff --git a/src/LogWriterAscii.cc b/src/LogWriterAscii.cc index 9b1fda3b62..c449c1a788 100644 --- a/src/LogWriterAscii.cc +++ b/src/LogWriterAscii.cc @@ -169,10 +169,13 @@ bool LogWriterAscii::DoWriteOne(ODesc* desc, LogVal* val, const LogField* field) case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: desc->Add(val->val.uint_val); break; + case TYPE_PORT: + desc->Add(val->val.port_val.port); + break; + case TYPE_SUBNET: desc->Add(dotted_addr(val->val.subnet_val.net)); desc->Add("/"); From 4a690484ecaf5ea086090d2b6ef855cc6c913cad Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 6 Dec 2011 10:42:37 -0800 Subject: [PATCH 061/651] make port annotation work and ascii input reader way more rebust with better error messages. --- src/Attr.cc | 22 ++- src/InputMgr.cc | 44 ++++- src/InputMgr.h | 2 + src/InputReaderAscii.cc | 150 +++++++++--------- src/InputReaderAscii.h | 4 +- src/LogMgr.cc | 1 - .../scripts.base.frameworks.input.port/out | 6 + .../scripts/base/frameworks/input/port.bro | 39 +++++ 8 files changed, 187 insertions(+), 81 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.port/out create mode 100644 testing/btest/scripts/base/frameworks/input/port.bro diff --git a/src/Attr.cc b/src/Attr.cc index a5a350f452..1d610f7cb4 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -17,7 +17,7 @@ const char* attr_name(attr_tag t) "&persistent", "&synchronized", "&postprocessor", "&encrypt", "&match", "&disable_print_hook", "&raw_output", "&mergeable", "&priority", - "&group", "&log", "&error_handler", "(&tracked)", + "&group", "&log", "&error_handler", "&type_column", "(&tracked)", }; return attr_names[int(t)]; @@ -417,6 +417,26 @@ void Attributes::CheckAttr(Attr* a) Error("&log applied to a type that cannot be logged"); break; + case ATTR_TYPE_COLUMN: + { + if ( type->Tag() != TYPE_PORT ) + { + Error("type_column tag only applicable to ports"); + break; + } + + BroType* atype = a->AttrExpr()->Type(); + + if ( atype->Tag() != TYPE_STRING ) { + Error("type column needs to have a string argument"); + break; + } + + + break; + } + + default: BadTag("Attributes::CheckAttr", attr_name(a->Tag())); } diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 0cfb59ee90..612461bae8 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -1167,9 +1167,14 @@ int InputMgr::GetLogValLength(const LogVal* val) { case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: length += sizeof(val->val.uint_val); break; + + case TYPE_PORT: + length += sizeof(val->val.port_val.port); + if ( val->val.port_val.proto != 0 ) + length += val->val.port_val.proto->size(); + break; case TYPE_DOUBLE: case TYPE_TIME: @@ -1228,12 +1233,24 @@ int InputMgr::CopyLogVal(char *data, const int startpos, const LogVal* val) { case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: //*(data+startpos) = val->val.uint_val; memcpy(data+startpos, (const void*) &(val->val.uint_val), sizeof(val->val.uint_val)); return sizeof(val->val.uint_val); break; + case TYPE_PORT: { + int length = 0; + memcpy(data+startpos, (const void*) &(val->val.port_val.port), sizeof(val->val.port_val.port)); + length += sizeof(val->val.port_val.port); + if ( val->val.port_val.proto != 0 ) { + memcpy(data+startpos, val->val.port_val.proto->c_str(), val->val.port_val.proto->length()); + length += val->val.port_val.proto->size(); + } + return length; + break; + } + + case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: @@ -1320,6 +1337,24 @@ HashKey* InputMgr::HashLogVals(const int num_elements, const LogVal* const *vals } +TransportProto InputMgr::StringToProto(const string &proto) { + if ( proto == "unknown" ) { + return TRANSPORT_UNKNOWN; + } else if ( proto == "tcp" ) { + return TRANSPORT_TCP; + } else if ( proto == "udp" ) { + return TRANSPORT_UDP; + } else if ( proto == "icmp" ) { + return TRANSPORT_ICMP; + } + + //assert(false); + + reporter->Error("Tried to parse invalid/unknown protocol: %s", proto.c_str()); + + return TRANSPORT_UNKNOWN; +} + Val* InputMgr::LogValToVal(const LogVal* val, BroType* request_type) { if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) { @@ -1357,7 +1392,10 @@ Val* InputMgr::LogValToVal(const LogVal* val, BroType* request_type) { } case TYPE_PORT: - return new PortVal(val->val.uint_val); + if ( val->val.port_val.proto == 0 ) + return new PortVal(val->val.port_val.port); + else + return new PortVal(val->val.port_val.port, StringToProto(*val->val.port_val.proto)); break; case TYPE_ADDR: diff --git a/src/InputMgr.h b/src/InputMgr.h index cebed231e4..ba6e208fd1 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -68,6 +68,8 @@ private: Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); RecordVal* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); + TransportProto StringToProto(const string &proto); + ReaderInfo* FindReader(const InputReader* reader); ReaderInfo* FindReader(const EnumVal* id); diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 84feb74e61..501022d58e 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -10,25 +10,27 @@ FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int : name(arg_name), type(arg_type) { position = arg_position; + secondary_position = -1; } FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position) : name(arg_name), type(arg_type), subtype(arg_subtype) { position = arg_position; + secondary_position = -1; } FieldMapping::FieldMapping(const FieldMapping& arg) : name(arg.name), type(arg.type), subtype(arg.subtype) { position = arg.position; + secondary_position = arg.secondary_position; } FieldMapping FieldMapping::subType() { return FieldMapping(name, subtype, position); } - InputReaderAscii::InputReaderAscii() { file = 0; @@ -122,45 +124,45 @@ bool InputReaderAscii::ReadHeader() { return false; } + map fields; + + // construcr list of field names. + istringstream splitstream(line); + int pos=0; + while ( splitstream ) { + string s; + if ( !getline(splitstream, s, separator[0])) + break; + + fields[s] = pos; + pos++; + } + + for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - // split on tabs... - istringstream splitstream(line); - unsigned int currTab = 0; - int wantFields = 0; - while ( splitstream ) { - string s; - if ( !getline(splitstream, s, separator[0])) - break; - // current found heading in s... compare if we want it - for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { - const LogField* field = (*it).second.fields[i]; - if ( field->name == s ) { - // cool, found field. note position - FieldMapping f(field->name, field->type, field->subtype, i); - (*it).second.columnMap.push_back(f); - wantFields++; - break; // done with searching + for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { + const LogField* field = (*it).second.fields[i]; + + map::iterator fit = fields.find(field->name); + if ( fit == fields.end() ) { + Error(Fmt("Did not find requested field %s in input data file.", field->name.c_str())); + return false; + } + + + FieldMapping f(field->name, field->type, field->subtype, fields[field->name]); + if ( field->secondary_name != "" ) { + map::iterator fit2 = fields.find(field->secondary_name); + if ( fit2 == fields.end() ) { + Error(Fmt("Could not find requested port type field %s in input data file.", field->secondary_name.c_str())); + return false; } + f.secondary_position = fields[field->secondary_name]; } - - // look if we did push something... - if ( (*it).second.columnMap.size() == currTab ) { - // no, we didn't. note that... - FieldMapping empty; - (*it).second.columnMap.push_back(empty); - } - - // done - currTab++; - } - - if ( wantFields != (int) (*it).second.num_fields ) { - // we did not find all fields? - // :( - Error(Fmt("One of the requested fields could not be found in the input data file. Found %d fields, wanted %d. Filternum: %d", wantFields, (*it).second.num_fields, (*it).first)); - return false; + (*it).second.columnMap.push_back(f); } + } // well, that seems to have worked... @@ -220,10 +222,14 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: val->val.uint_val = atoi(s.c_str()); break; + case TYPE_PORT: + val->val.port_val.port = atoi(s.c_str()); + val->val.port_val.proto = 0; + break; + case TYPE_SUBNET: { int pos = s.find("/"); string width = s.substr(pos+1); @@ -346,59 +352,55 @@ bool InputReaderAscii::DoUpdate() { string line; while ( GetLine(line ) ) { + // split on tabs + istringstream splitstream(line); + + map stringfields; + int pos = 0; + while ( splitstream ) { + string s; + if ( !getline(splitstream, s, separator[0]) ) + break; + + stringfields[pos] = s; + pos++; + } + + pos--; // for easy comparisons of max element. for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - // split on tabs - - istringstream splitstream(line); - LogVal** fields = new LogVal*[(*it).second.num_fields]; - //string string_fields[num_fields]; - unsigned int currTab = 0; - unsigned int currField = 0; - while ( splitstream ) { + int fpos = 0; + for ( vector::iterator fit = (*it).second.columnMap.begin(); + fit != (*it).second.columnMap.end(); + fit++ ){ - string s; - if ( !getline(splitstream, s, separator[0]) ) - break; - - - if ( currTab >= (*it).second.columnMap.size() ) { - Error("Tabs in heading do not match tabs in data?"); - //disabled = true; + if ( (*fit).position > pos || (*fit).secondary_position > pos ) { + Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", line.c_str(), pos, (*fit).position, (*fit).secondary_position)); return false; } - FieldMapping currMapping = (*it).second.columnMap[currTab]; - currTab++; - - if ( currMapping.IsEmpty() ) { - // well, that was easy - continue; - } - - if ( currField >= (*it).second.num_fields ) { - Error("internal error - fieldnum greater as possible"); - return false; - } - - LogVal* val = EntryToVal(s, currMapping); + LogVal* val = EntryToVal(stringfields[(*fit).position], *fit); if ( val == 0 ) { return false; } - fields[currMapping.position] = val; - //string_fields[currMapping.position] = s; + + if ( (*fit).secondary_position != -1 ) { + // we have a port definition :) + assert(val->type == TYPE_PORT ); + // Error(Fmt("Got type %d != PORT with secondary position!", val->type)); - currField++; - } - - if ( currField != (*it).second.num_fields ) { - Error("curr_field != num_fields in DoUpdate. Columns in file do not match column definition."); - return false; + val->val.port_val.proto = new string(stringfields[(*fit).secondary_position]); + } + + fields[fpos] = val; + + fpos++; } + assert ( (unsigned int) fpos == (*it).second.num_fields ); SendEntry((*it).first, fields); diff --git a/src/InputReaderAscii.h b/src/InputReaderAscii.h index c174248454..2670d785d5 100644 --- a/src/InputReaderAscii.h +++ b/src/InputReaderAscii.h @@ -21,10 +21,10 @@ struct FieldMapping { FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position); FieldMapping(const FieldMapping& arg); - FieldMapping() { position = -1; } + FieldMapping() { position = -1; secondary_position = -1; } FieldMapping subType(); - bool IsEmpty() { return position == -1; } + //bool IsEmpty() { return position == -1; } }; diff --git a/src/LogMgr.cc b/src/LogMgr.cc index ed32e4e40b..307a2e24e8 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -1088,7 +1088,6 @@ string LogMgr::TransportProtoToString(TransportProto p) { } assert(false); - return ""; } LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.port/out b/testing/btest/Baseline/scripts.base.frameworks.input.port/out new file mode 100644 index 0000000000..6f2bd3271b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.port/out @@ -0,0 +1,6 @@ +Trying to find field: t +{ +[1.2.3.4] = [p=80/tcp], +[1.2.4.6] = [p=30/unknown], +[1.2.3.5] = [p=52/udp] +} diff --git a/testing/btest/scripts/base/frameworks/input/port.bro b/testing/btest/scripts/base/frameworks/input/port.bro new file mode 100644 index 0000000000..6f98c363c7 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/port.bro @@ -0,0 +1,39 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#fields i p t +1.2.3.4 80 tcp +1.2.3.5 52 udp +1.2.4.6 30 unknown +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Input::ID += { INPUT }; +} + +type Idx: record { + i: addr; +}; + +type Val: record { + p: port &type_column="t"; +}; + +global servers: table[addr] of Val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::create_stream(A::INPUT, [$source="input.log"]); + Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::force_update(A::INPUT); + print servers; + Input::remove_tablefilter(A::INPUT, "ssh"); + Input::remove_stream(A::INPUT); +} From 9f32f68a13f290f01747f02b05dbe4b8b63d2790 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 6 Dec 2011 10:50:36 -0800 Subject: [PATCH 062/651] make test more robust. --- .../Baseline/scripts.base.frameworks.input.port/out | 9 +++------ testing/btest/scripts/base/frameworks/input/port.bro | 10 ++++++---- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.port/out b/testing/btest/Baseline/scripts.base.frameworks.input.port/out index 6f2bd3271b..858551aa2f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.port/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.port/out @@ -1,6 +1,3 @@ -Trying to find field: t -{ -[1.2.3.4] = [p=80/tcp], -[1.2.4.6] = [p=30/unknown], -[1.2.3.5] = [p=52/udp] -} +[p=80/tcp] +[p=52/udp] +[p=30/unknown] diff --git a/testing/btest/scripts/base/frameworks/input/port.bro b/testing/btest/scripts/base/frameworks/input/port.bro index 6f98c363c7..c14892ae36 100644 --- a/testing/btest/scripts/base/frameworks/input/port.bro +++ b/testing/btest/scripts/base/frameworks/input/port.bro @@ -6,7 +6,7 @@ #fields i p t 1.2.3.4 80 tcp 1.2.3.5 52 udp -1.2.4.6 30 unknown +1.2.3.6 30 unknown @TEST-END-FILE redef InputAscii::empty_field = "EMPTY"; @@ -31,9 +31,11 @@ event bro_init() { # first read in the old stuff into the table... Input::create_stream(A::INPUT, [$source="input.log"]); - Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers]); Input::force_update(A::INPUT); - print servers; - Input::remove_tablefilter(A::INPUT, "ssh"); + print servers[1.2.3.4]; + print servers[1.2.3.5]; + print servers[1.2.3.6]; + Input::remove_tablefilter(A::INPUT, "input"); Input::remove_stream(A::INPUT); } From eb64eeedcd27c543becf330d7da48db9aa541ea8 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 6 Dec 2011 10:56:26 -0800 Subject: [PATCH 063/651] memleak fix. --- src/LogMgr.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 307a2e24e8..dd5bc4b66a 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -119,8 +119,8 @@ LogVal::~LogVal() delete [] val.vector_val.vals; } -// if ( type == TYPE_PORT && present ) -// delete val.port_val.proto; + if ( type == TYPE_PORT && present ) + delete val.port_val.proto; } From ca17a1cf46ce50a84038eecfa5da401351777d4b Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 5 Dec 2011 16:18:54 -0800 Subject: [PATCH 064/651] make logging framework send the protocol to the writer. for use in future writers, that have a special type for port, which includes the protocol. --- src/LogMgr.cc | 32 +++++++++++++++++++++++++++++--- src/LogMgr.h | 4 ++++ src/LogWriterAscii.cc | 5 ++++- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 0b706f6417..8873b22b46 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -114,6 +114,10 @@ LogVal::~LogVal() delete [] val.vector_val.vals; } + +// if ( type == TYPE_PORT && present ) +// delete val.port_val.proto; + } bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) @@ -186,9 +190,12 @@ bool LogVal::Read(SerializationFormat* fmt) case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: return fmt->Read(&val.uint_val, "uint"); + case TYPE_PORT: + val.port_val.proto = new string; + return fmt->Read(&val.port_val.port, "port") && fmt->Read(val.port_val.proto, "proto"); + case TYPE_SUBNET: { uint32 net[4]; @@ -301,9 +308,11 @@ bool LogVal::Write(SerializationFormat* fmt) const case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: return fmt->Write(val.uint_val, "uint"); + case TYPE_PORT: + return fmt->Write(val.port_val.port, "port") && fmt->Write(*val.port_val.proto, "proto"); + case TYPE_SUBNET: { uint32 net[4]; @@ -1062,6 +1071,22 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) return true; } +string LogMgr::TransportProtoToString(TransportProto p) { + switch ( p ) { + case TRANSPORT_UNKNOWN: + return "unknown"; + case TRANSPORT_TCP: + return "tcp"; + case TRANSPORT_UDP: + return "udp"; + case TRANSPORT_ICMP: + return "icmp"; + } + + assert(false); + return ""; +} + LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) { if ( ! ty ) @@ -1093,7 +1118,8 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) break; case TYPE_PORT: - lval->val.uint_val = val->AsPortVal()->Port(); + lval->val.port_val.port = val->AsPortVal()->Port(); + lval->val.port_val.proto = new string(TransportProtoToString(val->AsPortVal()->PortType())); break; case TYPE_SUBNET: diff --git a/src/LogMgr.h b/src/LogMgr.h index 10530960cb..52acd04be5 100644 --- a/src/LogMgr.h +++ b/src/LogMgr.h @@ -34,10 +34,12 @@ struct LogVal { // types we can log directly. struct set_t { bro_int_t size; LogVal** vals; }; typedef set_t vec_t; + struct port_t { bro_uint_t port; string* proto; }; union _val { bro_int_t int_val; bro_uint_t uint_val; + port_t port_val; uint32 addr_val[NUM_ADDR_WORDS]; subnet_type subnet_val; double double_val; @@ -132,6 +134,8 @@ private: Filter* FindFilter(EnumVal* id, StringVal* filter); WriterInfo* FindWriter(LogWriter* writer); + string TransportProtoToString(TransportProto p); + vector streams; // Indexed by stream enum. }; diff --git a/src/LogWriterAscii.cc b/src/LogWriterAscii.cc index 9b1fda3b62..c449c1a788 100644 --- a/src/LogWriterAscii.cc +++ b/src/LogWriterAscii.cc @@ -169,10 +169,13 @@ bool LogWriterAscii::DoWriteOne(ODesc* desc, LogVal* val, const LogField* field) case TYPE_COUNT: case TYPE_COUNTER: - case TYPE_PORT: desc->Add(val->val.uint_val); break; + case TYPE_PORT: + desc->Add(val->val.port_val.port); + break; + case TYPE_SUBNET: desc->Add(dotted_addr(val->val.subnet_val.net)); desc->Add("/"); From a0da991030836d53bc669b64ad3de9b4dba34070 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 6 Dec 2011 10:56:26 -0800 Subject: [PATCH 065/651] memleak fix. --- src/LogMgr.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 8873b22b46..729979b4ef 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -115,8 +115,8 @@ LogVal::~LogVal() delete [] val.vector_val.vals; } -// if ( type == TYPE_PORT && present ) -// delete val.port_val.proto; + if ( type == TYPE_PORT && present ) + delete val.port_val.proto; } From 4b3cc95f7206d61614a0193508acc0f60828e3df Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 7 Dec 2011 12:43:15 -0800 Subject: [PATCH 066/651] send enum instead of string --- src/LogMgr.cc | 50 ++++++++++++++++++++++++++------------------------ src/LogMgr.h | 4 +--- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 729979b4ef..58581e2943 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -115,9 +115,6 @@ LogVal::~LogVal() delete [] val.vector_val.vals; } - if ( type == TYPE_PORT && present ) - delete val.port_val.proto; - } bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) @@ -192,9 +189,30 @@ bool LogVal::Read(SerializationFormat* fmt) case TYPE_COUNTER: return fmt->Read(&val.uint_val, "uint"); - case TYPE_PORT: - val.port_val.proto = new string; - return fmt->Read(&val.port_val.port, "port") && fmt->Read(val.port_val.proto, "proto"); + case TYPE_PORT: { + int proto; + if ( ! (fmt->Read(&val.port_val.port, "port") && fmt->Read(&proto, "proto") ) ) { + return false; + } + + switch (proto) { + case 0: + val.port_val.proto = TRANSPORT_UNKNOWN; + break; + case 1: + val.port_val.proto = TRANSPORT_TCP; + break; + case 2: + val.port_val.proto = TRANSPORT_UDP; + break; + case 3: + val.port_val.proto = TRANSPORT_ICMP; + break; + default: + return false; + } + return true; + } case TYPE_SUBNET: { @@ -311,7 +329,7 @@ bool LogVal::Write(SerializationFormat* fmt) const return fmt->Write(val.uint_val, "uint"); case TYPE_PORT: - return fmt->Write(val.port_val.port, "port") && fmt->Write(*val.port_val.proto, "proto"); + return fmt->Write(val.port_val.port, "port") && fmt->Write(val.port_val.proto, "proto"); case TYPE_SUBNET: { @@ -1071,22 +1089,6 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) return true; } -string LogMgr::TransportProtoToString(TransportProto p) { - switch ( p ) { - case TRANSPORT_UNKNOWN: - return "unknown"; - case TRANSPORT_TCP: - return "tcp"; - case TRANSPORT_UDP: - return "udp"; - case TRANSPORT_ICMP: - return "icmp"; - } - - assert(false); - return ""; -} - LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) { if ( ! ty ) @@ -1119,7 +1121,7 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) case TYPE_PORT: lval->val.port_val.port = val->AsPortVal()->Port(); - lval->val.port_val.proto = new string(TransportProtoToString(val->AsPortVal()->PortType())); + lval->val.port_val.proto = val->AsPortVal()->PortType(); break; case TYPE_SUBNET: diff --git a/src/LogMgr.h b/src/LogMgr.h index 52acd04be5..8c2c8250f8 100644 --- a/src/LogMgr.h +++ b/src/LogMgr.h @@ -34,7 +34,7 @@ struct LogVal { // types we can log directly. struct set_t { bro_int_t size; LogVal** vals; }; typedef set_t vec_t; - struct port_t { bro_uint_t port; string* proto; }; + struct port_t { bro_uint_t port; TransportProto proto; }; union _val { bro_int_t int_val; @@ -134,8 +134,6 @@ private: Filter* FindFilter(EnumVal* id, StringVal* filter); WriterInfo* FindWriter(LogWriter* writer); - string TransportProtoToString(TransportProto p); - vector streams; // Indexed by stream enum. }; From e0b7dc04512a228b332e2b9aaddd39300646d5cc Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 8 Dec 2011 14:12:59 -0800 Subject: [PATCH 067/651] fix compile warnings --- src/InputMgr.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 00a1a26311..1fe3a82abe 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -866,8 +866,8 @@ void InputMgr::EndCurrentSend(const InputReader* reader, int id) { //while ( ( ih = i->lastDict->NextEntry(c) ) ) { while ( ( ih = filter->lastDict->NextEntry(lastDictIdxKey, c) ) ) { - ListVal * idx; - Val *val; + ListVal * idx = 0; + Val *val = 0; if ( filter->pred || filter->event ) { idx = filter->tab->RecoverIndex(ih->idxkey); From a14ec02d3b75788966db7dccb1e8a533e627300a Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 19 Dec 2011 12:43:25 -0800 Subject: [PATCH 068/651] change empty field defenition like in logging framework --- scripts/base/frameworks/input/readers/ascii.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/base/frameworks/input/readers/ascii.bro b/scripts/base/frameworks/input/readers/ascii.bro index 9f630975a2..14c04757f7 100644 --- a/scripts/base/frameworks/input/readers/ascii.bro +++ b/scripts/base/frameworks/input/readers/ascii.bro @@ -12,7 +12,7 @@ export { const set_separator = "," &redef; ## String to use for empty fields. - const empty_field = "-" &redef; + const empty_field = "(empty)" &redef; ## String to use for an unset &optional field. const unset_field = "-" &redef; From 70a2cf67324c607014b067af13fc9c6887e571e3 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 19 Dec 2011 12:43:51 -0800 Subject: [PATCH 069/651] update baseline to include input framework --- .../coverage.bare-load-baseline/canonified_loaded_scripts.log | 4 ++++ .../canonified_loaded_scripts.log | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index 8fab67304e..5a0de92a6e 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -18,4 +18,8 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/./postprocessors/__load__.bro scripts/base/frameworks/logging/./postprocessors/./scp.bro scripts/base/frameworks/logging/./writers/ascii.bro + scripts/base/frameworks/input/__load__.bro + scripts/base/frameworks/input/./main.bro + build/src/base/input.bif.bro + scripts/base/frameworks/input/./readers/ascii.bro scripts/policy/misc/loaded-scripts.bro diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 3f77797df8..e727928579 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -18,6 +18,10 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/./postprocessors/__load__.bro scripts/base/frameworks/logging/./postprocessors/./scp.bro scripts/base/frameworks/logging/./writers/ascii.bro + scripts/base/frameworks/input/__load__.bro + scripts/base/frameworks/input/./main.bro + build/src/base/input.bif.bro + scripts/base/frameworks/input/./readers/ascii.bro scripts/base/init-default.bro scripts/base/utils/site.bro scripts/base/utils/./patterns.bro From eb53a3d1c8bb9b7a590fc4f5ba70a5ea5017b6ee Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 12 Jan 2012 11:51:12 -0800 Subject: [PATCH 070/651] make input framework compile with brov6 --- src/InputReaderAscii.cc | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/InputReaderAscii.cc b/src/InputReaderAscii.cc index 6da161a2bf..257cb4cf71 100644 --- a/src/InputReaderAscii.cc +++ b/src/InputReaderAscii.cc @@ -253,19 +253,37 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { val->val.subnet_val.width = atoi(width.c_str()); string addr = s.substr(0, pos); s = addr; - // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. + // NOTE: dotted_to_addr BREAKS THREAD SAFETY! it uses reporter. // Solve this some other time.... +#ifdef BROv6 + if ( s.find(':') != s.npos ) { + uint32* addr = dotted_to_addr6(s.c_str()); + copy_addr(val->val.subnet_val.net, addr); + delete addr; + } else { + val->val.subnet_val.net[0] = val->val.subnet_val.net[1] = val->val.subnet_val.net[2] = 0; + val->val.subnet_val.net[3] = dotted_to_addr(s.c_str()); + } +#else val->val.subnet_val.net = dotted_to_addr(s.c_str()); +#endif break; } case TYPE_ADDR: { // NOTE: dottet_to_addr BREAKS THREAD SAFETY! it uses reporter. // Solve this some other time.... - addr_type t = dotted_to_addr(s.c_str()); #ifdef BROv6 - copy_addr(t, val->val.addr_val); + if ( s.find(':') != s.npos ) { + uint32* addr = dotted_to_addr6(s.c_str()); + copy_addr(val->val.addr_val, addr); + delete addr; + } else { + val->val.addr_val[0] = val->val.addr_val[1] = val->val.addr_val[2] = 0; + val->val.addr_val[3] = dotted_to_addr(s.c_str()); + } #else + uint32 t = dotted_to_addr(s.c_str()); copy_addr(&t, val->val.addr_val); #endif break; From ac1708f8436126d1be5b522461dc88c8e1fa6158 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 20 Jan 2012 12:33:48 -0800 Subject: [PATCH 071/651] fix handling of predicates - now the second argument that is sent to the predicate really is a recordVal and not a ListVal. --- src/InputMgr.cc | 52 ++++++++++++++++++++++++++++++------------------- src/InputMgr.h | 1 + 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 1fe3a82abe..0e01bd6333 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -737,22 +737,7 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c } else if ( filter->num_val_fields == 1 && !filter->want_record ) { valval = LogValToVal(vals[position], filter->rtype->FieldType(0)); } else { - RecordVal * r = new RecordVal(filter->rtype); - - for ( int j = 0; j < filter->rtype->NumFields(); j++) { - - Val* val = 0; - if ( filter->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { - val = LogValToRecordVal(vals, filter->rtype->FieldType(j)->AsRecordType(), &position); - } else { - val = LogValToVal(vals[position], filter->rtype->FieldType(j)); - position++; - } - - r->Assign(j,val); - - } - valval = r; + valval = LogValToRecordVal(vals, filter->rtype, &position); } @@ -767,7 +752,9 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c // call filter first to determine if we really add / change the entry if ( filter->pred ) { EnumVal* ev; - Ref(idxval); + //Ref(idxval); + int startpos = 0; + Val* predidx = LogValToRecordVal(vals, filter->itype, &startpos); Ref(valval); if ( updated ) { @@ -778,7 +765,7 @@ void InputMgr::SendEntryTable(const InputReader* reader, int id, const LogVal* c val_list vl( 2 + (filter->num_val_fields > 0) ); // 2 if we don't have values, 3 otherwise. vl.append(ev); - vl.append(idxval); + vl.append(predidx); if ( filter->num_val_fields > 0 ) vl.append(valval); @@ -882,12 +869,14 @@ void InputMgr::EndCurrentSend(const InputReader* reader, int id) { // ask predicate, if we want to expire this element... EnumVal* ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - Ref(idx); + //Ref(idx); + int startpos = 0; + Val* predidx = ListValToRecordVal(idx, filter->itype, &startpos); Ref(val); val_list vl(3); vl.append(ev); - vl.append(idx); + vl.append(predidx); vl.append(val); Val* v = filter->pred->Call(&vl); bool result = v->AsBool(); @@ -1123,6 +1112,29 @@ void InputMgr::SendEvent(EventHandlerPtr ev, list events) } +RecordVal* InputMgr::ListValToRecordVal(ListVal* list, RecordType *request_type, int* position) { + RecordVal* rec = new RecordVal(request_type->AsRecordType()); + + int maxpos = list->Length(); + + for ( int i = 0; i < request_type->NumFields(); i++ ) { + assert ( (*position) <= maxpos ); + + Val* fieldVal = 0; + if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) { + fieldVal = ListValToRecordVal(list, request_type->FieldType(i)->AsRecordType(), position); + } else { + fieldVal = list->Index(*position); + (*position)++; + } + + rec->Assign(i, fieldVal); + } + + return rec; +} + + RecordVal* InputMgr::LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position) { if ( position == 0 ) { diff --git a/src/InputMgr.h b/src/InputMgr.h index 21af018a47..5623c15338 100644 --- a/src/InputMgr.h +++ b/src/InputMgr.h @@ -67,6 +67,7 @@ private: Val* LogValToVal(const LogVal* val, BroType* request_type); Val* LogValToIndexVal(int num_fields, const RecordType* type, const LogVal* const *vals); RecordVal* LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position); + RecordVal* ListValToRecordVal(ListVal* list, RecordType *request_type, int* position); ReaderInfo* FindReader(const InputReader* reader); ReaderInfo* FindReader(const EnumVal* id); From f24c50b49a88b22db612d098489eeac042122a76 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 20 Jan 2012 12:42:23 -0800 Subject: [PATCH 072/651] remove unnecessary stuff from function. --- src/InputMgr.cc | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/src/InputMgr.cc b/src/InputMgr.cc index 0e01bd6333..89530febe6 100644 --- a/src/InputMgr.cc +++ b/src/InputMgr.cc @@ -955,7 +955,6 @@ void InputMgr::SendEventFilterEvent(const InputReader* reader, EnumVal* type, in if ( filter->want_record ) { RecordVal * r = LogValToRecordVal(vals, filter->fields, &position); out_vals.push_back(r); - } else { for ( int j = 0; j < filter->fields->NumFields(); j++) { Val* val = 0; @@ -991,27 +990,7 @@ void InputMgr::PutTable(const InputReader* reader, int id, const LogVal* const * } else if ( filter->num_val_fields == 1 && !filter->want_record ) { valval = LogValToVal(vals[filter->num_idx_fields], filter->rtype->FieldType(filter->num_idx_fields)); } else { - RecordVal * r = new RecordVal(filter->rtype); - - for ( int j = 0; j < filter->rtype->NumFields(); j++) { - - Val* val = 0; - if ( filter->rtype->FieldType(j)->Tag() == TYPE_RECORD ) { - val = LogValToRecordVal(vals, filter->rtype->FieldType(j)->AsRecordType(), &position); - } else { - val = LogValToVal(vals[position], filter->rtype->FieldType(j)); - position++; - } - - if ( val == 0 ) { - reporter->InternalError("conversion error"); - return; - } - - r->Assign(j,val); - - } - valval = r; + valval = LogValToRecordVal(vals, filter->rtype, &position); } filter->tab->Assign(idxval, valval); From e4e770d47517ad6b9c730a7132037feb265ffe30 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 26 Jan 2012 17:47:36 -0800 Subject: [PATCH 073/651] Threaded logging framework. This is based on Gilbert's code but I ended up refactoring it quite a bit. That's why I didn't do a direct merge but started with a new branch and copied things over to adapt. It looks quite a bit different now as I tried to generalize things a bit more to also support the Input Framework. The larger changes code are: - Moved all logging code into subdirectory src/logging/. Code here is in namespace "logging". - Moved all threading code into subdirectory src/threading/. Code here is in namespace "threading". - Introduced a central thread manager that tracks threads and is in charge of termination and (eventually) statistics. - Refactored logging independent threading code into base classes BasicThread and MsgThread. The former encapsulates all the pthread code with simple start/stop methods and provides a single Run() method to override. The latter is derived from BasicThread and adds bi-directional message passing between main and child threads. The hope is that the Input Framework can reuse this part quite directly. - A log writer is now split into a general WriterFrontend (LogEmissary in Gilbert's code) and a type-specific WriterBackend. Specific writers are implemented by deriving from the latter. (The plugin interface is almost unchanged compared to the 2.0 version.). Frontend and backend communicate via MsgThread's message passing. - MsgThread (and thus WriterBackend) has a Heartbeat() method that a thread can override to execute code on a regular basis. It's triggered roughly once a second by the main thread. - Integration into "the rest of Bro". Threads can send messages to the reporter and do debugging output; they are hooked into the I/O loop for sending messages back; and there's a new debugging stream "threading" that logs, well, threading activity. This all seems to work for the most part, but it's not done yet. TODO list: - Not all tests pass yet. In particular, diffs for the external tests seem to indicate some memory problem (no crashes, just an occasional weird character). - Only tested in --enable-debug mode. - Only tested on Linux. - Needs leak check. - Each log write is currently a single inter-thread message. Bring Gilbert's bulk writes back. - Code needs further cleanup. - Document the class API. - Document the internal structure of the logging framework. - Check for robustness: live traffic, aborting, signals, etc. - Add thread statistics to profile.log (most of the code is there). - Customize the OS-visible thread names on platforms that support it. --- src/Attr.cc | 4 +- src/CMakeLists.txt | 19 +- src/DebugLogger.cc | 2 +- src/DebugLogger.h | 1 + src/LogWriter.cc | 158 ---------- src/LogWriterNone.cc | 16 - src/LogWriterNone.h | 30 -- src/RemoteSerializer.cc | 22 +- src/RemoteSerializer.h | 15 +- src/logging.bif | 3 +- src/{LogMgr.cc => logging/Manager.cc} | 289 +++++++++--------- src/{LogMgr.h => logging/Manager.h} | 75 +++-- src/logging/WriterBackend.cc | 161 ++++++++++ src/{LogWriter.h => logging/WriterBackend.h} | 123 ++++---- src/logging/WriterFrontend.cc | 175 +++++++++++ src/logging/WriterFrontend.h | 66 ++++ .../writers/Ascii.cc} | 41 +-- .../writers/Ascii.h} | 34 ++- src/logging/writers/None.cc | 19 ++ src/logging/writers/None.h | 35 +++ src/main.cc | 20 +- src/threading/BasicThread.cc | 129 ++++++++ src/threading/BasicThread.h | 63 ++++ src/threading/Manager.cc | 104 +++++++ src/threading/Manager.h | 52 ++++ src/threading/MsgThread.cc | 285 +++++++++++++++++ src/threading/MsgThread.h | 157 ++++++++++ src/threading/Queue.h | 150 +++++++++ 28 files changed, 1745 insertions(+), 503 deletions(-) delete mode 100644 src/LogWriter.cc delete mode 100644 src/LogWriterNone.cc delete mode 100644 src/LogWriterNone.h rename src/{LogMgr.cc => logging/Manager.cc} (87%) rename src/{LogMgr.h => logging/Manager.h} (68%) create mode 100644 src/logging/WriterBackend.cc rename src/{LogWriter.h => logging/WriterBackend.h} (70%) create mode 100644 src/logging/WriterFrontend.cc create mode 100644 src/logging/WriterFrontend.h rename src/{LogWriterAscii.cc => logging/writers/Ascii.cc} (89%) rename src/{LogWriterAscii.h => logging/writers/Ascii.h} (57%) create mode 100644 src/logging/writers/None.cc create mode 100644 src/logging/writers/None.h create mode 100644 src/threading/BasicThread.cc create mode 100644 src/threading/BasicThread.h create mode 100644 src/threading/Manager.cc create mode 100644 src/threading/Manager.h create mode 100644 src/threading/MsgThread.cc create mode 100644 src/threading/MsgThread.h create mode 100644 src/threading/Queue.h diff --git a/src/Attr.cc b/src/Attr.cc index aed9165182..b877250f52 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -5,7 +5,7 @@ #include "Attr.h" #include "Expr.h" #include "Serializer.h" -#include "LogMgr.h" +#include "logging/Manager.h" const char* attr_name(attr_tag t) { @@ -416,7 +416,7 @@ void Attributes::CheckAttr(Attr* a) break; case ATTR_LOG: - if ( ! LogVal::IsCompatibleType(type) ) + if ( ! logging::Value::IsCompatibleType(type) ) Error("&log applied to a type that cannot be logged"); break; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0e29082db3..61a4847b70 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -213,6 +213,8 @@ binpac_target(syslog.pac ######################################################################## ## bro target +find_package (Threads) + # This macro stores associated headers for any C/C++ source files given # as arguments (past _var) as a list in the CMake variable named "_var". macro(COLLECT_HEADERS _var) @@ -334,10 +336,6 @@ set(bro_SRCS IRC.cc List.cc Reporter.cc - LogMgr.cc - LogWriter.cc - LogWriterAscii.cc - LogWriterNone.cc Login.cc MIME.cc NCP.cc @@ -409,6 +407,17 @@ set(bro_SRCS PacketDumper.cc strsep.c modp_numtoa.c + + threading/BasicThread.cc + threading/Manager.cc + threading/MsgThread.cc + + logging/Manager.cc + logging/WriterBackend.cc + logging/WriterFrontend.cc + logging/writers/Ascii.cc + logging/writers/None.cc + ${dns_SRCS} ${openssl_SRCS} ) @@ -421,7 +430,7 @@ add_definitions(-DBRO_BUILD_PATH="${CMAKE_CURRENT_BINARY_DIR}") add_executable(bro ${bro_SRCS} ${bro_HEADERS}) -target_link_libraries(bro ${brodeps}) +target_link_libraries(bro ${brodeps} ${CMAKE_THREAD_LIBS_INIT}) install(TARGETS bro DESTINATION bin) install(FILES ${INSTALL_BIF_OUTPUTS} DESTINATION ${BRO_SCRIPT_INSTALL_PATH}/base) diff --git a/src/DebugLogger.cc b/src/DebugLogger.cc index d60fdd70c8..c41a0552c6 100644 --- a/src/DebugLogger.cc +++ b/src/DebugLogger.cc @@ -15,7 +15,7 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = { { "compressor", 0, false }, {"string", 0, false }, { "notifiers", 0, false }, { "main-loop", 0, false }, { "dpd", 0, false }, { "tm", 0, false }, - { "logging", 0, false } + { "logging", 0, false }, { "threading", 0, false } }; DebugLogger::DebugLogger(const char* filename) diff --git a/src/DebugLogger.h b/src/DebugLogger.h index a2dece5b3c..71e21bfa26 100644 --- a/src/DebugLogger.h +++ b/src/DebugLogger.h @@ -24,6 +24,7 @@ enum DebugStream { DBG_DPD, // Dynamic application detection framework DBG_TM, // Time-machine packet input via Brocolli DBG_LOGGING, // Logging streams + DBG_THREADING, // Threading system NUM_DBGS // Has to be last }; diff --git a/src/LogWriter.cc b/src/LogWriter.cc deleted file mode 100644 index 8584a0b0b5..0000000000 --- a/src/LogWriter.cc +++ /dev/null @@ -1,158 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include "util.h" -#include "LogWriter.h" - -LogWriter::LogWriter() - { - buf = 0; - buf_len = 1024; - buffering = true; - disabled = false; - } - -LogWriter::~LogWriter() - { - if ( buf ) - free(buf); - - for(int i = 0; i < num_fields; ++i) - delete fields[i]; - - delete [] fields; - } - -bool LogWriter::Init(string arg_path, int arg_num_fields, - const LogField* const * arg_fields) - { - path = arg_path; - num_fields = arg_num_fields; - fields = arg_fields; - - if ( ! DoInit(arg_path, arg_num_fields, arg_fields) ) - { - disabled = true; - return false; - } - - return true; - } - -bool LogWriter::Write(int arg_num_fields, LogVal** vals) - { - // Double-check that the arguments match. If we get this from remote, - // something might be mixed up. - if ( num_fields != arg_num_fields ) - { - DBG_LOG(DBG_LOGGING, "Number of fields don't match in LogWriter::Write() (%d vs. %d)", - arg_num_fields, num_fields); - - DeleteVals(vals); - return false; - } - - for ( int i = 0; i < num_fields; ++i ) - { - if ( vals[i]->type != fields[i]->type ) - { - DBG_LOG(DBG_LOGGING, "Field type doesn't match in LogWriter::Write() (%d vs. %d)", - vals[i]->type, fields[i]->type); - DeleteVals(vals); - return false; - } - } - - bool result = DoWrite(num_fields, fields, vals); - - DeleteVals(vals); - - if ( ! result ) - disabled = true; - - return result; - } - -bool LogWriter::SetBuf(bool enabled) - { - if ( enabled == buffering ) - // No change. - return true; - - buffering = enabled; - - if ( ! DoSetBuf(enabled) ) - { - disabled = true; - return false; - } - - return true; - } - -bool LogWriter::Rotate(string rotated_path, double open, - double close, bool terminating) - { - if ( ! DoRotate(rotated_path, open, close, terminating) ) - { - disabled = true; - return false; - } - - return true; - } - -bool LogWriter::Flush() - { - if ( ! DoFlush() ) - { - disabled = true; - return false; - } - - return true; - } - -void LogWriter::Finish() - { - DoFinish(); - } - -const char* LogWriter::Fmt(const char* format, ...) - { - if ( ! buf ) - buf = (char*) malloc(buf_len); - - va_list al; - va_start(al, format); - int n = safe_vsnprintf(buf, buf_len, format, al); - va_end(al); - - if ( (unsigned int) n >= buf_len ) - { // Not enough room, grow the buffer. - buf_len = n + 32; - buf = (char*) realloc(buf, buf_len); - - // Is it portable to restart? - va_start(al, format); - n = safe_vsnprintf(buf, buf_len, format, al); - va_end(al); - } - - return buf; - } - -void LogWriter::Error(const char *msg) - { - log_mgr->Error(this, msg); - } - -void LogWriter::DeleteVals(LogVal** vals) - { - log_mgr->DeleteVals(num_fields, vals); - } - -bool LogWriter::FinishedRotation(string new_name, string old_name, double open, - double close, bool terminating) - { - return log_mgr->FinishedRotation(this, new_name, old_name, open, close, terminating); - } diff --git a/src/LogWriterNone.cc b/src/LogWriterNone.cc deleted file mode 100644 index 592772afdb..0000000000 --- a/src/LogWriterNone.cc +++ /dev/null @@ -1,16 +0,0 @@ - -#include "LogWriterNone.h" - -bool LogWriterNone::DoRotate(string rotated_path, double open, - double close, bool terminating) - { - if ( ! FinishedRotation(string("/dev/null"), Path(), open, close, terminating)) - { - Error(Fmt("error rotating %s", Path().c_str())); - return false; - } - - return true; - } - - diff --git a/src/LogWriterNone.h b/src/LogWriterNone.h deleted file mode 100644 index 3811a19469..0000000000 --- a/src/LogWriterNone.h +++ /dev/null @@ -1,30 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// Dummy log writer that just discards everything (but still pretends to rotate). - -#ifndef LOGWRITERNONE_H -#define LOGWRITERNONE_H - -#include "LogWriter.h" - -class LogWriterNone : public LogWriter { -public: - LogWriterNone() {} - ~LogWriterNone() {}; - - static LogWriter* Instantiate() { return new LogWriterNone; } - -protected: - virtual bool DoInit(string path, int num_fields, - const LogField* const * fields) { return true; } - - virtual bool DoWrite(int num_fields, const LogField* const * fields, - LogVal** vals) { return true; } - virtual bool DoSetBuf(bool enabled) { return true; } - virtual bool DoRotate(string rotated_path, double open, double close, - bool terminating); - virtual bool DoFlush() { return true; } - virtual void DoFinish() {} -}; - -#endif diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index b72a6dcc1a..a75812b42b 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -183,8 +183,8 @@ #include "Sessions.h" #include "File.h" #include "Conn.h" -#include "LogMgr.h" #include "Reporter.h" +#include "logging/Manager.h" extern "C" { #include "setsignal.h" @@ -2476,7 +2476,7 @@ bool RemoteSerializer::ProcessRemotePrint() return true; } -bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const LogField* const * fields) +bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields) { loop_over_list(peers, i) { @@ -2486,7 +2486,7 @@ bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, string return true; } -bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* writer, string path, int num_fields, const LogField* const * fields) +bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields) { SetErrorDescr("logging"); @@ -2540,7 +2540,7 @@ error: return false; } -bool RemoteSerializer::SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const LogVal* const * vals) +bool RemoteSerializer::SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals) { loop_over_list(peers, i) { @@ -2550,7 +2550,7 @@ bool RemoteSerializer::SendLogWrite(EnumVal* id, EnumVal* writer, string path, i return true; } -bool RemoteSerializer::SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const LogVal* const * vals) +bool RemoteSerializer::SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals) { if ( peer->phase != Peer::HANDSHAKE && peer->phase != Peer::RUNNING ) return false; @@ -2641,7 +2641,7 @@ bool RemoteSerializer::ProcessLogCreateWriter() EnumVal* id_val = 0; EnumVal* writer_val = 0; - LogField** fields = 0; + logging::Field** fields = 0; BinarySerializationFormat fmt; fmt.StartRead(current_args->data, current_args->len); @@ -2658,11 +2658,11 @@ bool RemoteSerializer::ProcessLogCreateWriter() if ( ! success ) goto error; - fields = new LogField* [num_fields]; + fields = new logging::Field* [num_fields]; for ( int i = 0; i < num_fields; i++ ) { - fields[i] = new LogField; + fields[i] = new logging::Field; if ( ! fields[i]->Read(&fmt) ) goto error; } @@ -2703,7 +2703,7 @@ bool RemoteSerializer::ProcessLogWrite() // Unserialize one entry. EnumVal* id_val = 0; EnumVal* writer_val = 0; - LogVal** vals = 0; + logging::Value** vals = 0; int id, writer; string path; @@ -2717,11 +2717,11 @@ bool RemoteSerializer::ProcessLogWrite() if ( ! success ) goto error; - vals = new LogVal* [num_fields]; + vals = new logging::Value* [num_fields]; for ( int i = 0; i < num_fields; i++ ) { - vals[i] = new LogVal; + vals[i] = new logging::Value; if ( ! vals[i]->Read(&fmt) ) goto error; } diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index b64fdcbe66..ba0bde7d41 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -14,8 +14,11 @@ // FIXME: Change this to network byte order class IncrementalSendTimer; -class LogField; -class LogVal; + +namespace logging { + class Field; + class Value; +} // This class handles the communication done in Bro's main loop. class RemoteSerializer : public Serializer, public IOSource { @@ -99,13 +102,13 @@ public: bool SendPrintHookEvent(BroFile* f, const char* txt, size_t len); // Send a request to create a writer on a remote side. - bool SendLogCreateWriter(PeerID peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const LogField* const * fields); + bool SendLogCreateWriter(PeerID peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields); // Broadcasts a request to create a writer. - bool SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const LogField* const * fields); + bool SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields); // Broadcast a log entry to everybody interested. - bool SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const LogVal* const * vals); + bool SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals); // Synchronzizes time with all connected peers. Returns number of // current sync-point, or -1 on error. @@ -300,7 +303,7 @@ protected: bool SendID(SerialInfo* info, Peer* peer, const ID& id); bool SendCapabilities(Peer* peer); bool SendPacket(SerialInfo* info, Peer* peer, const Packet& p); - bool SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const LogVal* const * vals); + bool SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals); void UnregisterHandlers(Peer* peer); void RaiseEvent(EventHandlerPtr event, Peer* peer, const char* arg = 0); diff --git a/src/logging.bif b/src/logging.bif index 31e1bebacd..c8960b4e38 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -3,8 +3,9 @@ module Log; %%{ -#include "LogMgr.h" #include "NetVar.h" + +#include "logging/Manager.h" %%} type Filter: record; diff --git a/src/LogMgr.cc b/src/logging/Manager.cc similarity index 87% rename from src/LogMgr.cc rename to src/logging/Manager.cc index 28e9a2ac1f..09c5030fdc 100644 --- a/src/LogMgr.cc +++ b/src/logging/Manager.cc @@ -2,33 +2,38 @@ #include -#include "LogMgr.h" -#include "Event.h" -#include "EventHandler.h" -#include "NetVar.h" -#include "Net.h" +#include "../Event.h" +#include "../EventHandler.h" +#include "../NetVar.h" +#include "../Net.h" -#include "LogWriterAscii.h" -#include "LogWriterNone.h" +#include "Manager.h" +#include "WriterFrontend.h" +#include "WriterBackend.h" + +#include "writers/Ascii.h" +#include "writers/None.h" + +using namespace logging; // Structure describing a log writer type. -struct LogWriterDefinition { +struct WriterDefinition { bro_int_t type; // The type. const char *name; // Descriptive name for error messages. bool (*init)(); // An optional one-time initialization function. - LogWriter* (*factory)(); // A factory function creating instances. + WriterBackend* (*factory)(); // A factory function creating instances. }; // Static table defining all availabel log writers. -LogWriterDefinition log_writers[] = { - { BifEnum::Log::WRITER_NONE, "None", 0, LogWriterNone::Instantiate }, - { BifEnum::Log::WRITER_ASCII, "Ascii", 0, LogWriterAscii::Instantiate }, +WriterDefinition log_writers[] = { + { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, + { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, // End marker, don't touch. - { BifEnum::Log::WRITER_DEFAULT, "None", 0, (LogWriter* (*)())0 } + { BifEnum::Log::WRITER_DEFAULT, "None", 0, (WriterBackend* (*)())0 } }; -struct LogMgr::Filter { +struct Manager::Filter { string name; EnumVal* id; Func* pred; @@ -42,7 +47,7 @@ struct LogMgr::Filter { Func* postprocessor; int num_fields; - LogField** fields; + Field** fields; // Vector indexed by field number. Each element is a list of record // indices defining a path leading to the value across potential @@ -52,16 +57,16 @@ struct LogMgr::Filter { ~Filter(); }; -struct LogMgr::WriterInfo { +struct Manager::WriterInfo { EnumVal* type; double open_time; Timer* rotation_timer; double interval; Func* postprocessor; - LogWriter* writer; + WriterFrontend* writer; }; -struct LogMgr::Stream { +struct Manager::Stream { EnumVal* id; bool enabled; string name; @@ -78,7 +83,7 @@ struct LogMgr::Stream { ~Stream(); }; -bool LogField::Read(SerializationFormat* fmt) +bool Field::Read(SerializationFormat* fmt) { int t; int st; @@ -90,12 +95,12 @@ bool LogField::Read(SerializationFormat* fmt) return success; } -bool LogField::Write(SerializationFormat* fmt) const +bool Field::Write(SerializationFormat* fmt) const { return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); } -LogVal::~LogVal() +Value::~Value() { if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) && present ) @@ -118,7 +123,7 @@ LogVal::~LogVal() } } -bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) +bool Value::IsCompatibleType(BroType* t, bool atomic_only) { if ( ! t ) return false; @@ -169,7 +174,7 @@ bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) return false; } -bool LogVal::Read(SerializationFormat* fmt) +bool Value::Read(SerializationFormat* fmt) { int ty; @@ -249,11 +254,11 @@ bool LogVal::Read(SerializationFormat* fmt) if ( ! fmt->Read(&val.set_val.size, "set_size") ) return false; - val.set_val.vals = new LogVal* [val.set_val.size]; + val.set_val.vals = new Value* [val.set_val.size]; for ( int i = 0; i < val.set_val.size; ++i ) { - val.set_val.vals[i] = new LogVal; + val.set_val.vals[i] = new Value; if ( ! val.set_val.vals[i]->Read(fmt) ) return false; @@ -267,11 +272,11 @@ bool LogVal::Read(SerializationFormat* fmt) if ( ! fmt->Read(&val.vector_val.size, "vector_size") ) return false; - val.vector_val.vals = new LogVal* [val.vector_val.size]; + val.vector_val.vals = new Value* [val.vector_val.size]; for ( int i = 0; i < val.vector_val.size; ++i ) { - val.vector_val.vals[i] = new LogVal; + val.vector_val.vals[i] = new Value; if ( ! val.vector_val.vals[i]->Read(fmt) ) return false; @@ -281,13 +286,13 @@ bool LogVal::Read(SerializationFormat* fmt) } default: - reporter->InternalError("unsupported type %s in LogVal::Write", type_name(type)); + reporter->InternalError("unsupported type %s in Value::Write", type_name(type)); } return false; } -bool LogVal::Write(SerializationFormat* fmt) const +bool Value::Write(SerializationFormat* fmt) const { if ( ! (fmt->Write((int)type, "type") && fmt->Write(present, "present")) ) @@ -382,13 +387,13 @@ bool LogVal::Write(SerializationFormat* fmt) const } default: - reporter->InternalError("unsupported type %s in LogVal::REad", type_name(type)); + reporter->InternalError("unsupported type %s in Value::REad", type_name(type)); } return false; } -LogMgr::Filter::~Filter() +Manager::Filter::~Filter() { for ( int i = 0; i < num_fields; ++i ) delete fields[i]; @@ -398,7 +403,7 @@ LogMgr::Filter::~Filter() Unref(path_val); } -LogMgr::Stream::~Stream() +Manager::Stream::~Stream() { Unref(columns); @@ -421,17 +426,64 @@ LogMgr::Stream::~Stream() delete *f; } -LogMgr::LogMgr() +Manager::Manager() { } -LogMgr::~LogMgr() +Manager::~Manager() { for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) delete *s; } -LogMgr::Stream* LogMgr::FindStream(EnumVal* id) +WriterBackend* Manager::CreateBackend(bro_int_t type) + { + WriterDefinition* ld = log_writers; + + while ( true ) + { + if ( ld->type == BifEnum::Log::WRITER_DEFAULT ) + { + reporter->Error("unknow writer when creating writer"); + return 0; + } + + if ( ld->type == type ) + break; + + if ( ! ld->factory ) + // Oops, we can't instantiate this guy. + return 0; + + // If the writer has an init function, call it. + if ( ld->init ) + { + if ( (*ld->init)() ) + // Clear the init function so that we won't + // call it again later. + ld->init = 0; + else + // Init failed, disable by deleting factory + // function. + ld->factory = 0; + + DBG_LOG(DBG_LOGGING, "failed to init writer class %s", + ld->name); + + return false; + } + + ++ld; + } + + assert(ld->factory); + + WriterBackend* backend = (*ld->factory)(); + assert(backend); + return backend; + } + +Manager::Stream* Manager::FindStream(EnumVal* id) { unsigned int idx = id->AsEnum(); @@ -441,7 +493,7 @@ LogMgr::Stream* LogMgr::FindStream(EnumVal* id) return streams[idx]; } -LogMgr::WriterInfo* LogMgr::FindWriter(LogWriter* writer) +Manager::WriterInfo* Manager::FindWriter(WriterFrontend* writer) { for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) { @@ -460,7 +512,7 @@ LogMgr::WriterInfo* LogMgr::FindWriter(LogWriter* writer) return 0; } -void LogMgr::RemoveDisabledWriters(Stream* stream) +void Manager::RemoveDisabledWriters(Stream* stream) { list disabled; @@ -468,6 +520,7 @@ void LogMgr::RemoveDisabledWriters(Stream* stream) { if ( j->second && j->second->writer->Disabled() ) { + j->second->writer->Stop(); delete j->second; disabled.push_back(j->first); } @@ -477,7 +530,7 @@ void LogMgr::RemoveDisabledWriters(Stream* stream) stream->writers.erase(*j); } -bool LogMgr::CreateStream(EnumVal* id, RecordVal* sval) +bool Manager::CreateStream(EnumVal* id, RecordVal* sval) { RecordType* rtype = sval->Type()->AsRecordType(); @@ -497,7 +550,7 @@ bool LogMgr::CreateStream(EnumVal* id, RecordVal* sval) if ( ! (columns->FieldDecl(i)->FindAttr(ATTR_LOG)) ) continue; - if ( ! LogVal::IsCompatibleType(columns->FieldType(i)) ) + if ( ! Value::IsCompatibleType(columns->FieldType(i)) ) { reporter->Error("type of field '%s' is not support for logging output", columns->FieldName(i)); @@ -569,7 +622,7 @@ bool LogMgr::CreateStream(EnumVal* id, RecordVal* sval) return true; } -bool LogMgr::EnableStream(EnumVal* id) +bool Manager::EnableStream(EnumVal* id) { Stream* stream = FindStream(id); @@ -585,7 +638,7 @@ bool LogMgr::EnableStream(EnumVal* id) return true; } -bool LogMgr::DisableStream(EnumVal* id) +bool Manager::DisableStream(EnumVal* id) { Stream* stream = FindStream(id); @@ -602,7 +655,7 @@ bool LogMgr::DisableStream(EnumVal* id) } // Helper for recursive record field unrolling. -bool LogMgr::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, +bool Manager::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, TableVal* include, TableVal* exclude, string path, list indices) { for ( int i = 0; i < rt->NumFields(); ++i ) @@ -696,9 +749,9 @@ bool LogMgr::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, filter->indices.push_back(new_indices); - filter->fields = (LogField**) + filter->fields = (Field**) realloc(filter->fields, - sizeof(LogField) * ++filter->num_fields); + sizeof(Field) * ++filter->num_fields); if ( ! filter->fields ) { @@ -706,14 +759,14 @@ bool LogMgr::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, return false; } - LogField* field = new LogField(); + Field* field = new Field(); field->name = new_path; field->type = t->Tag(); - if ( field->type == TYPE_TABLE ) + if ( field->type == TYPE_TABLE ) { field->subtype = t->AsSetType()->Indices()->PureType()->Tag(); - } - else if ( field->type == TYPE_VECTOR ) + } + else if ( field->type == TYPE_VECTOR ) { field->subtype = t->AsVectorType()->YieldType()->Tag(); } @@ -723,7 +776,7 @@ bool LogMgr::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, return true; } -bool LogMgr::AddFilter(EnumVal* id, RecordVal* fval) +bool Manager::AddFilter(EnumVal* id, RecordVal* fval) { RecordType* rtype = fval->Type()->AsRecordType(); @@ -819,7 +872,7 @@ bool LogMgr::AddFilter(EnumVal* id, RecordVal* fval) for ( int i = 0; i < filter->num_fields; i++ ) { - LogField* field = filter->fields[i]; + Field* field = filter->fields[i]; DBG_LOG(DBG_LOGGING, " field %10s: %s", field->name.c_str(), type_name(field->type)); } @@ -828,12 +881,12 @@ bool LogMgr::AddFilter(EnumVal* id, RecordVal* fval) return true; } -bool LogMgr::RemoveFilter(EnumVal* id, StringVal* name) +bool Manager::RemoveFilter(EnumVal* id, StringVal* name) { return RemoveFilter(id, name->AsString()->CheckString()); } -bool LogMgr::RemoveFilter(EnumVal* id, string name) +bool Manager::RemoveFilter(EnumVal* id, string name) { Stream* stream = FindStream(id); if ( ! stream ) @@ -860,7 +913,7 @@ bool LogMgr::RemoveFilter(EnumVal* id, string name) return true; } -bool LogMgr::Write(EnumVal* id, RecordVal* columns) +bool Manager::Write(EnumVal* id, RecordVal* columns) { bool error = false; @@ -980,7 +1033,7 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) Stream::WriterMap::iterator w = stream->writers.find(Stream::WriterPathPair(filter->writer->AsEnum(), path)); - LogWriter* writer = 0; + WriterFrontend* writer = 0; if ( w != stream->writers.end() ) // We know this writer already. @@ -990,12 +1043,12 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) { // No, need to create one. - // Copy the fields for LogWriter::Init() as it will take - // ownership. - LogField** arg_fields = new LogField*[filter->num_fields]; + // Copy the fields for WriterFrontend::Init() as it + // will take ownership. + Field** arg_fields = new Field*[filter->num_fields]; for ( int j = 0; j < filter->num_fields; ++j ) - arg_fields[j] = new LogField(*filter->fields[j]); + arg_fields[j] = new Field(*filter->fields[j]); if ( filter->remote ) remote_serializer->SendLogCreateWriter(stream->id, @@ -1034,7 +1087,7 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) if ( filter->local || filter->remote ) { - LogVal** vals = RecordToFilterVals(stream, filter, columns); + Value** vals = RecordToFilterVals(stream, filter, columns); if ( filter->remote ) remote_serializer->SendLogWrite(stream->id, @@ -1045,11 +1098,9 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) if ( filter->local ) { - assert(writer); - // Write takes ownership of vals. - if ( ! writer->Write(filter->num_fields, vals) ) - error = true; + assert(writer); + writer->Write(filter->num_fields, vals); } else @@ -1072,15 +1123,15 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) return true; } -LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) +Value* Manager::ValToLogVal(Val* val, BroType* ty) { if ( ! ty ) ty = val->Type(); if ( ! val ) - return new LogVal(ty->Tag(), false); + return new Value(ty->Tag(), false); - LogVal* lval = new LogVal(ty->Tag()); + Value* lval = new Value(ty->Tag()); switch ( lval->type ) { case TYPE_BOOL: @@ -1160,7 +1211,7 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) set = new ListVal(TYPE_INT); lval->val.set_val.size = set->Length(); - lval->val.set_val.vals = new LogVal* [lval->val.set_val.size]; + lval->val.set_val.vals = new Value* [lval->val.set_val.size]; for ( int i = 0; i < lval->val.set_val.size; i++ ) lval->val.set_val.vals[i] = ValToLogVal(set->Index(i)); @@ -1174,7 +1225,7 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) VectorVal* vec = val->AsVectorVal(); lval->val.vector_val.size = vec->Size(); lval->val.vector_val.vals = - new LogVal* [lval->val.vector_val.size]; + new Value* [lval->val.vector_val.size]; for ( int i = 0; i < lval->val.vector_val.size; i++ ) { @@ -1193,10 +1244,10 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) return lval; } -LogVal** LogMgr::RecordToFilterVals(Stream* stream, Filter* filter, +Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, RecordVal* columns) { - LogVal** vals = new LogVal*[filter->num_fields]; + Value** vals = new Value*[filter->num_fields]; for ( int i = 0; i < filter->num_fields; ++i ) { @@ -1215,7 +1266,7 @@ LogVal** LogMgr::RecordToFilterVals(Stream* stream, Filter* filter, if ( ! val ) { // Value, or any of its parents, is not set. - vals[i] = new LogVal(filter->fields[i]->type, false); + vals[i] = new Value(filter->fields[i]->type, false); break; } } @@ -1227,8 +1278,8 @@ LogVal** LogMgr::RecordToFilterVals(Stream* stream, Filter* filter, return vals; } -LogWriter* LogMgr::CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, LogField** fields) +WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, + int num_fields, Field** fields) { Stream* stream = FindStream(id); @@ -1244,56 +1295,10 @@ LogWriter* LogMgr::CreateWriter(EnumVal* id, EnumVal* writer, string path, // return it. return w->second->writer; - // Need to instantiate a new writer. + WriterFrontend* writer_obj = new WriterFrontend(writer->AsEnum()); + assert(writer_obj); - LogWriterDefinition* ld = log_writers; - - while ( true ) - { - if ( ld->type == BifEnum::Log::WRITER_DEFAULT ) - { - reporter->Error("unknow writer when creating writer"); - return 0; - } - - if ( ld->type == writer->AsEnum() ) - break; - - if ( ! ld->factory ) - // Oops, we can't instantiate this guy. - return 0; - - // If the writer has an init function, call it. - if ( ld->init ) - { - if ( (*ld->init)() ) - // Clear the init function so that we won't - // call it again later. - ld->init = 0; - else - // Init failed, disable by deleting factory - // function. - ld->factory = 0; - - DBG_LOG(DBG_LOGGING, "failed to init writer class %s", - ld->name); - - return false; - } - - ++ld; - } - - assert(ld->factory); - LogWriter* writer_obj = (*ld->factory)(); - - if ( ! writer_obj->Init(path, num_fields, fields) ) - { - DBG_LOG(DBG_LOGGING, "failed to init instance of writer %s", - ld->name); - - return 0; - } + writer_obj->Init(path, num_fields, fields); WriterInfo* winfo = new WriterInfo; winfo->type = writer->Ref()->AsEnumVal(); @@ -1338,16 +1343,17 @@ LogWriter* LogMgr::CreateWriter(EnumVal* id, EnumVal* writer, string path, return writer_obj; } -void LogMgr::DeleteVals(int num_fields, LogVal** vals) +void Manager::DeleteVals(int num_fields, Value** vals) { + // Note this code is duplicated in WriterBackend::DeleteVals(). for ( int i = 0; i < num_fields; i++ ) delete vals[i]; delete [] vals; } -bool LogMgr::Write(EnumVal* id, EnumVal* writer, string path, int num_fields, - LogVal** vals) +bool Manager::Write(EnumVal* id, EnumVal* writer, string path, int num_fields, + Value** vals) { Stream* stream = FindStream(id); @@ -1357,7 +1363,7 @@ bool LogMgr::Write(EnumVal* id, EnumVal* writer, string path, int num_fields, #ifdef DEBUG ODesc desc; id->Describe(&desc); - DBG_LOG(DBG_LOGGING, "unknown stream %s in LogMgr::Write()", + DBG_LOG(DBG_LOGGING, "unknown stream %s in Manager::Write()", desc.Description()); #endif DeleteVals(num_fields, vals); @@ -1379,23 +1385,24 @@ bool LogMgr::Write(EnumVal* id, EnumVal* writer, string path, int num_fields, #ifdef DEBUG ODesc desc; id->Describe(&desc); - DBG_LOG(DBG_LOGGING, "unknown writer %s in LogMgr::Write()", + DBG_LOG(DBG_LOGGING, "unknown writer %s in Manager::Write()", desc.Description()); #endif DeleteVals(num_fields, vals); return false; } - bool success = (w->second ? w->second->writer->Write(num_fields, vals) : true); + if ( w->second ) + w->second->writer->Write(num_fields, vals); DBG_LOG(DBG_LOGGING, - "Wrote pre-filtered record to path '%s' on stream '%s' [%s]", - path.c_str(), stream->name.c_str(), (success ? "ok" : "error")); + "Wrote pre-filtered record to path '%s' on stream '%s'", + path.c_str(), stream->name.c_str()); - return success; + return true; } -void LogMgr::SendAllWritersTo(RemoteSerializer::PeerID peer) +void Manager::SendAllWritersTo(RemoteSerializer::PeerID peer) { for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) { @@ -1410,7 +1417,7 @@ void LogMgr::SendAllWritersTo(RemoteSerializer::PeerID peer) if ( ! i->second ) continue; - LogWriter* writer = i->second->writer; + WriterFrontend* writer = i->second->writer; EnumVal writer_val(i->first.first, BifType::Enum::Log::Writer); remote_serializer->SendLogCreateWriter(peer, (*s)->id, @@ -1422,7 +1429,7 @@ void LogMgr::SendAllWritersTo(RemoteSerializer::PeerID peer) } } -bool LogMgr::SetBuf(EnumVal* id, bool enabled) +bool Manager::SetBuf(EnumVal* id, bool enabled) { Stream* stream = FindStream(id); if ( ! stream ) @@ -1440,7 +1447,7 @@ bool LogMgr::SetBuf(EnumVal* id, bool enabled) return true; } -bool LogMgr::Flush(EnumVal* id) +bool Manager::Flush(EnumVal* id) { Stream* stream = FindStream(id); if ( ! stream ) @@ -1461,7 +1468,7 @@ bool LogMgr::Flush(EnumVal* id) return true; } -void LogMgr::Error(LogWriter* writer, const char* msg) +void Manager::Error(WriterFrontend* writer, const char* msg) { reporter->Error("error with writer for %s: %s", writer->Path().c_str(), msg); @@ -1470,7 +1477,7 @@ void LogMgr::Error(LogWriter* writer, const char* msg) // Timer which on dispatching rotates the filter. class RotationTimer : public Timer { public: - RotationTimer(double t, LogMgr::WriterInfo* arg_winfo, bool arg_rotate) + RotationTimer(double t, Manager::WriterInfo* arg_winfo, bool arg_rotate) : Timer(t, TIMER_ROTATE) { winfo = arg_winfo; @@ -1482,7 +1489,7 @@ public: void Dispatch(double t, int is_expire); protected: - LogMgr::WriterInfo* winfo; + Manager::WriterInfo* winfo; bool rotate; }; @@ -1506,7 +1513,7 @@ void RotationTimer::Dispatch(double t, int is_expire) } } -void LogMgr::InstallRotationTimer(WriterInfo* winfo) +void Manager::InstallRotationTimer(WriterInfo* winfo) { if ( terminating ) return; @@ -1548,7 +1555,7 @@ void LogMgr::InstallRotationTimer(WriterInfo* winfo) } } -void LogMgr::Rotate(WriterInfo* winfo) +void Manager::Rotate(WriterInfo* winfo) { DBG_LOG(DBG_LOGGING, "Rotating %s at %.6f", winfo->writer->Path().c_str(), network_time); @@ -1568,7 +1575,7 @@ void LogMgr::Rotate(WriterInfo* winfo) winfo->writer->Rotate(tmp, winfo->open_time, network_time, terminating); } -bool LogMgr::FinishedRotation(LogWriter* writer, string new_name, string old_name, +bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string old_name, double open, double close, bool terminating) { DBG_LOG(DBG_LOGGING, "Finished rotating %s at %.6f, new name %s", diff --git a/src/LogMgr.h b/src/logging/Manager.h similarity index 68% rename from src/LogMgr.h rename to src/logging/Manager.h index 3eaba360d5..7fa2c271db 100644 --- a/src/LogMgr.h +++ b/src/logging/Manager.h @@ -2,24 +2,28 @@ // // A class managing log writers and filters. -#ifndef LOGMGR_H -#define LOGMGR_H +#ifndef LOGGING_MANAGER_H +#define LOGGING_MANAGER_H -#include "Val.h" -#include "EventHandler.h" -#include "RemoteSerializer.h" +#include "../Val.h" +#include "../EventHandler.h" +#include "../RemoteSerializer.h" class SerializationFormat; +class RemoteSerializer; +class RotationTimer; + +namespace logging { // Description of a log field. -struct LogField { +struct Field { string name; TypeTag type; // inner type of sets TypeTag subtype; - LogField() { subtype = TYPE_VOID; } - LogField(const LogField& other) + Field() { subtype = TYPE_VOID; } + Field(const Field& other) : name(other.name), type(other.type), subtype(other.subtype) { } // (Un-)serialize. @@ -28,13 +32,13 @@ struct LogField { }; // Values as logged by a writer. -struct LogVal { +struct Value { TypeTag type; bool present; // False for unset fields. // The following union is a subset of BroValUnion, including only the // types we can log directly. - struct set_t { bro_int_t size; LogVal** vals; }; + struct set_t { bro_int_t size; Value** vals; }; typedef set_t vec_t; union _val { @@ -48,9 +52,9 @@ struct LogVal { vec_t vector_val; } val; - LogVal(TypeTag arg_type = TYPE_ERROR, bool arg_present = true) + Value(TypeTag arg_type = TYPE_ERROR, bool arg_present = true) : type(arg_type), present(arg_present) {} - ~LogVal(); + ~Value(); // (Un-)serialize. bool Read(SerializationFormat* fmt); @@ -61,17 +65,17 @@ struct LogVal { static bool IsCompatibleType(BroType* t, bool atomic_only=false); private: - LogVal(const LogVal& other) { } + Value(const Value& other) { } }; -class LogWriter; -class RemoteSerializer; -class RotationTimer; +class WriterBackend; +class WriterFrontend; +class RotationFinishedMessage; -class LogMgr { +class Manager { public: - LogMgr(); - ~LogMgr(); + Manager(); + ~Manager(); // These correspond to the BiFs visible on the scripting layer. The // actual BiFs just forward here. @@ -86,19 +90,24 @@ public: bool Flush(EnumVal* id); // Flushes all writers.. protected: - friend class LogWriter; - friend class RemoteSerializer; - friend class RotationTimer; + friend class WriterFrontend; + friend class RotationFinishedMessage; + friend class ::RemoteSerializer; + friend class ::RotationTimer; + + // Instantiates a new WriterBackend of the given type (note that + // doing so creates a new thread!). + WriterBackend* CreateBackend(bro_int_t type); //// Function also used by the RemoteSerializer. // Takes ownership of fields. - LogWriter* CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, LogField** fields); + WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, string path, + int num_fields, Field** fields); // Takes ownership of values.. bool Write(EnumVal* id, EnumVal* writer, string path, - int num_fields, LogVal** vals); + int num_fields, Value** vals); // Announces all instantiated writers to peer. void SendAllWritersTo(RemoteSerializer::PeerID peer); @@ -106,14 +115,14 @@ protected: //// Functions safe to use by writers. // Signals that a file has been rotated. - bool FinishedRotation(LogWriter* writer, string new_name, string old_name, + bool FinishedRotation(WriterFrontend* writer, string new_name, string old_name, double open, double close, bool terminating); // Reports an error for the given writer. - void Error(LogWriter* writer, const char* msg); + void Error(WriterFrontend* writer, const char* msg); // Deletes the values as passed into Write(). - void DeleteVals(int num_fields, LogVal** vals); + void DeleteVals(int num_fields, Value** vals); private: struct Filter; @@ -123,20 +132,22 @@ private: bool TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, TableVal* include, TableVal* exclude, string path, list indices); - LogVal** RecordToFilterVals(Stream* stream, Filter* filter, + Value** RecordToFilterVals(Stream* stream, Filter* filter, RecordVal* columns); - LogVal* ValToLogVal(Val* val, BroType* ty = 0); + Value* ValToLogVal(Val* val, BroType* ty = 0); Stream* FindStream(EnumVal* id); void RemoveDisabledWriters(Stream* stream); void InstallRotationTimer(WriterInfo* winfo); void Rotate(WriterInfo* info); Filter* FindFilter(EnumVal* id, StringVal* filter); - WriterInfo* FindWriter(LogWriter* writer); + WriterInfo* FindWriter(WriterFrontend* writer); vector streams; // Indexed by stream enum. }; -extern LogMgr* log_mgr; +} + +extern logging::Manager* log_mgr; #endif diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc new file mode 100644 index 0000000000..095490edc4 --- /dev/null +++ b/src/logging/WriterBackend.cc @@ -0,0 +1,161 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "util.h" + +#include "WriterBackend.h" +#include "WriterFrontend.h" + +// Messages sent from backend to frontend (i.e., "OutputMessages"). + +namespace logging { + +class RotationFinishedMessage : public threading::OutputMessage +{ +public: + RotationFinishedMessage(WriterFrontend* writer, string new_name, string old_name, + double open, double close, bool terminating) + : threading::OutputMessage("RotationFinished", writer), + new_name(new_name), old_name(old_name), open(open), + close(close), terminating(terminating) { } + + virtual bool Process() + { + return log_mgr->FinishedRotation(Object(), new_name, old_name, open, close, terminating); + } + +private: + string new_name; + string old_name; + double open; + double close; + bool terminating; +}; + +class DisableMessage : public threading::OutputMessage +{ +public: + DisableMessage(WriterFrontend* writer) + : threading::OutputMessage("Disable", writer) {} + + virtual bool Process() { Object()->SetDisable(); return true; } +}; + +} + +// Backend methods. + +using namespace logging; + +WriterBackend::WriterBackend(const string& name) : MsgThread(name) + { + path = ""; + num_fields = 0; + fields = 0; + buffering = true; + } + +WriterBackend::~WriterBackend() + { + if ( fields ) + { + for(int i = 0; i < num_fields; ++i) + delete fields[i]; + + delete [] fields; + } + } + +void WriterBackend::DeleteVals(Value** vals) + { + // Note this code is duplicated in Manager::DeleteVals(). + for ( int i = 0; i < num_fields; i++ ) + delete vals[i]; + + delete [] vals; + } + +bool WriterBackend::FinishedRotation(WriterFrontend* writer, string new_name, string old_name, + double open, double close, bool terminating) + { + SendOut(new RotationFinishedMessage(writer, new_name, old_name, open, close, terminating)); + return true; + } + +bool WriterBackend::Init(string arg_path, int arg_num_fields, + const Field* const * arg_fields) + { + path = arg_path; + num_fields = arg_num_fields; + fields = arg_fields; + + if ( ! DoInit(arg_path, arg_num_fields, arg_fields) ) + return false; + + return true; + } + +bool WriterBackend::Write(int arg_num_fields, Value** vals) + { + // Double-check that the arguments match. If we get this from remote, + // something might be mixed up. + if ( num_fields != arg_num_fields ) + { + +#ifdef DEBUG + const char* msg = Fmt("Number of fields don't match in WriterBackend::Write() (%d vs. %d)", + arg_num_fields, num_fields); + Debug(DBG_LOGGING, msg); +#endif + + DeleteVals(vals); + return false; + } + + for ( int i = 0; i < num_fields; ++i ) + { + if ( vals[i]->type != fields[i]->type ) + { +#ifdef DEBUG + const char* msg = Fmt("Field type doesn't match in WriterBackend::Write() (%d vs. %d)", + vals[i]->type, fields[i]->type); + Debug(DBG_LOGGING, msg); +#endif + + DeleteVals(vals); + return false; + } + } + + bool result = DoWrite(num_fields, fields, vals); + + DeleteVals(vals); + + return result; + } + +bool WriterBackend::SetBuf(bool enabled) + { + if ( enabled == buffering ) + // No change. + return true; + + buffering = enabled; + + return DoSetBuf(enabled); + } + +bool WriterBackend::Rotate(WriterFrontend* writer, string rotated_path, + double open, double close, bool terminating) + { + return DoRotate(writer, rotated_path, open, close, terminating); + } + +bool WriterBackend::Flush() + { + return DoFlush(); + } + +bool WriterBackend::Finish() + { + return DoFinish(); + } diff --git a/src/LogWriter.h b/src/logging/WriterBackend.h similarity index 70% rename from src/LogWriter.h rename to src/logging/WriterBackend.h index 1d2f9fa4b2..d1e4634e6d 100644 --- a/src/LogWriter.h +++ b/src/logging/WriterBackend.h @@ -1,32 +1,22 @@ // See the file "COPYING" in the main distribution directory for copyright. // -// Interface API for a log writer backend. The LogMgr creates a separate -// writer instance of pair of (writer type, output path). -// -// Note thay classes derived from LogWriter must be fully thread-safe and not -// use any non-thread-safe Bro functionality (which includes almost -// everything ...). In particular, do not use fmt() but LogWriter::Fmt()!. -// -// The one exception to this rule is the constructor: it is guaranteed to be -// executed inside the main thread and can thus in particular access global -// script variables. +// Bridge class between main process and writer threads. -#ifndef LOGWRITER_H -#define LOGWRITER_H +#ifndef LOGGING_WRITERBACKEND_H +#define LOGGING_WRITERBACKEND_H -#include "LogMgr.h" -#include "BroString.h" +#include "Manager.h" -class LogWriter { +#include "threading/MsgThread.h" + +namespace logging { + +// The backend runs in its own thread, separate from the main process. +class WriterBackend : public threading::MsgThread +{ public: - LogWriter(); - virtual ~LogWriter(); - - //// Interface methods to interact with the writer. Note that these - //// methods are not necessarily thread-safe and must be called only - //// from the main thread (which will typically mean only from the - //// LogMgr). In particular, they must not be called from the - //// writer's derived implementation. + WriterBackend(const string& name); + virtual ~WriterBackend(); // One-time initialization of the writer to define the logged fields. // Interpretation of "path" is left to the writer, and will be @@ -37,18 +27,18 @@ public: // // The new instance takes ownership of "fields", and will delete them // when done. - bool Init(string path, int num_fields, const LogField* const * fields); + bool Init(string path, int num_fields, const Field* const * fields); // Writes one log entry. The method takes ownership of "vals" and // will return immediately after queueing the write request, which is // potentially before output has actually been written out. // - // num_fields and the types of the LogVals must match what was passed + // num_fields and the types of the Values must match what was passed // to Init(). // // Returns false if an error occured, in which case the writer must // not be used any further. - bool Write(int num_fields, LogVal** vals); + bool Write(int num_fields, Value** vals); // Sets the buffering status for the writer, if the writer supports // that. (If not, it will be ignored). @@ -60,12 +50,12 @@ public: // Triggers rotation, if the writer supports that. (If not, it will // be ignored). - bool Rotate(string rotated_path, double open, double close, bool terminating); + bool Rotate(WriterFrontend* writer, string rotated_path, double open, double close, bool terminating); // Finishes writing to this logger regularly. Must not be called if // an error has been indicated earlier. After calling this, no // further writing must be performed. - void Finish(); + bool Finish(); //// Thread-safe methods that may be called from the writer //// implementation. @@ -73,24 +63,43 @@ public: // The following methods return the information as passed to Init(). const string Path() const { return path; } int NumFields() const { return num_fields; } - const LogField* const * Fields() const { return fields; } + const Field* const * Fields() const { return fields; } + + // Returns the current buffering state. + bool IsBuf() { return buffering; } + + // Signals to the log manager that a file has been rotated. + // + // writer: The frontend writer that triggered the rotation. This must + // be the value passed into DoRotate(). + // + // new_name: The filename of the rotated file. old_name: The filename + // of the origina file. + // + // open/close: The timestamps when the original file was opened and + // closed, respectively. + // + // terminating: True if rotation request occured due to the main Bro + // process shutting down. + bool FinishedRotation(WriterFrontend* writer, string new_name, string old_name, + double open, double close, bool terminating); protected: // Methods for writers to override. If any of these returs false, it // will be assumed that a fatal error has occured that prevents the // writer from further operation. It will then be disabled and - // deleted. When return false, the writer should also report the + // deleted. When returning false, the writer should also report the // error via Error(). Note that even if a writer does not support the // functionality for one these methods (like rotation), it must still // return true if that is not to be considered a fatal error. // // Called once for initialization of the writer. virtual bool DoInit(string path, int num_fields, - const LogField* const * fields) = 0; + const Field* const * fields) = 0; // Called once per log entry to record. - virtual bool DoWrite(int num_fields, const LogField* const * fields, - LogVal** vals) = 0; + virtual bool DoWrite(int num_fields, const Field* const * fields, + Value** vals) = 0; // Called when the buffering status for this writer is changed. If // buffering is disabled, the writer should attempt to write out @@ -119,6 +128,11 @@ protected: // RotationDone() to signal the log manager that potential // postprocessors can now run. // + // "writer" is the frontend writer that triggered the rotation. The + // *only* purpose of this value is to be passed into + // FinishedRotation() once done. You must not otherwise access the + // frontend, it's running in a different thread. + // // "rotate_path" reflects the path to where the rotated output is to // be moved, with specifics depending on the writer. It should // generally be interpreted in a way consistent with that of "path" @@ -135,52 +149,31 @@ protected: // // A writer may ignore rotation requests if it doesn't fit with its // semantics (but must still return true in that case). - virtual bool DoRotate(string rotated_path, double open, double close, - bool terminating) = 0; + virtual bool DoRotate(WriterFrontend* writer, string rotated_path, + double open, double close, bool terminating) = 0; // Called once on termination. Not called when any of the other // methods has previously signaled an error, i.e., executing this // method signals a regular shutdown of the writer. - virtual void DoFinish() = 0; + virtual bool DoFinish() = 0; - //// Methods for writers to use. These are thread-safe. - - // A thread-safe version of fmt(). - const char* Fmt(const char* format, ...); - - // Returns the current buffering state. - bool IsBuf() { return buffering; } - - // Reports an error to the user. - void Error(const char *msg); - - // Signals to the log manager that a file has been rotated. - // - // new_name: The filename of the rotated file. old_name: The filename - // of the origina file. - // - // open/close: The timestamps when the original file was opened and - // closed, respectively. - // - // terminating: True if rotation request occured due to the main Bro - // process shutting down. - bool FinishedRotation(string new_name, string old_name, double open, - double close, bool terminating); + // Triggered by regular heartbeat messages from the main process. + virtual bool DoHeartbeat(double network_time, double current_time) { return true; }; private: - friend class LogMgr; + friend class Manager; // When an error occurs, we call this method to set a flag marking - // the writer as disabled. The LogMgr will check the flag later and + // the writer as disabled. The Manager will check the flag later and // remove the writer. bool Disabled() { return disabled; } // Deletes the values as passed into Write(). - void DeleteVals(LogVal** vals); + void DeleteVals(Value** vals); string path; int num_fields; - const LogField* const * fields; + const Field* const * fields; bool buffering; bool disabled; @@ -189,4 +182,8 @@ private: unsigned int buf_len; }; + +} + #endif + diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc new file mode 100644 index 0000000000..92c93c1c56 --- /dev/null +++ b/src/logging/WriterFrontend.cc @@ -0,0 +1,175 @@ + +#include "WriterFrontend.h" +#include "WriterBackend.h" + +namespace logging { + +// Messages sent from frontend to backend (i.e., "InputMessages"). + +class InitMessage : public threading::InputMessage +{ +public: + InitMessage(WriterBackend* backend, const string path, const int num_fields, const Field* const *fields) + : threading::InputMessage("Init", backend), + path(path), num_fields(num_fields), fields(fields) { } + + virtual bool Process() { return Object()->Init(path, num_fields, fields); } + +private: + const string path; + const int num_fields; + const Field * const* fields; +}; + +class RotateMessage : public threading::InputMessage +{ +public: + RotateMessage(WriterBackend* backend, WriterFrontend* frontend, const string rotated_path, const double open, + const double close, const bool terminating) + : threading::InputMessage("Rotate", backend), + frontend(frontend), + rotated_path(rotated_path), open(open), + close(close), terminating(terminating) { } + + virtual bool Process() { return Object()->Rotate(frontend, rotated_path, open, close, terminating); } + +private: + WriterFrontend* frontend; + const string rotated_path; + const double open; + const double close; + const bool terminating; +}; + +class WriteMessage : public threading::InputMessage +{ +public: + WriteMessage(WriterBackend* backend, const int num_fields, Value **vals) + : threading::InputMessage("Write", backend), + num_fields(num_fields), fields(fields), vals(vals) {} + + virtual bool Process() { return Object()->Write(num_fields, vals); } + +private: + int num_fields; + Field* const* fields; + Value **vals; +}; + +class SetBufMessage : public threading::InputMessage +{ +public: + SetBufMessage(WriterBackend* backend, const bool enabled) + : threading::InputMessage("SetBuf", backend), + enabled(enabled) { } + + virtual bool Process() { return Object()->SetBuf(enabled); } + +private: + const bool enabled; +}; + +class FlushMessage : public threading::InputMessage +{ +public: + FlushMessage(WriterBackend* backend) + : threading::InputMessage("Flush", backend) {} + + virtual bool Process() { return Object()->Flush(); } +}; + +class FinishMessage : public threading::InputMessage +{ +public: + FinishMessage(WriterBackend* backend) + : threading::InputMessage("Finish", backend) {} + + virtual bool Process() { return Object()->Finish(); } +}; + +} + +// Frontend methods. + +using namespace logging; + +WriterFrontend::WriterFrontend(bro_int_t type) + { + disabled = initialized = false; + backend = log_mgr->CreateBackend(type); + + assert(backend); + backend->Start(); + } + +WriterFrontend::~WriterFrontend() + { + } + +void WriterFrontend::Stop() + { + SetDisable(); + backend->Stop(); + } + +void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* const * arg_fields) + { + if ( disabled ) + return; + + if ( initialized ) + reporter->InternalError("writer initialize twice"); + + path = arg_path; + num_fields = arg_num_fields; + fields = arg_fields; + + initialized = true; + backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields)); + } + +void WriterFrontend::Write(int num_fields, Value** vals) + { + if ( disabled ) + return; + + backend->SendIn(new WriteMessage(backend, num_fields, vals)); + } + +void WriterFrontend::SetBuf(bool enabled) + { + if ( disabled ) + return; + + backend->SendIn(new SetBufMessage(backend, enabled)); + } + +void WriterFrontend::Flush() + { + if ( disabled ) + return; + + backend->SendIn(new FlushMessage(backend)); + } + +void WriterFrontend::Rotate(string rotated_path, double open, double close, bool terminating) + { + if ( disabled ) + return; + + backend->SendIn(new RotateMessage(backend, this, rotated_path, open, close, terminating)); + } + +void WriterFrontend::Finish() + { + if ( disabled ) + return; + + backend->SendIn(new FinishMessage(backend)); + } + + + + + + diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h new file mode 100644 index 0000000000..1998429d38 --- /dev/null +++ b/src/logging/WriterFrontend.h @@ -0,0 +1,66 @@ +// See the file "COPYING" in the main distribution directory for copyright. +// +// Bridge class between main process and writer threads. + +#ifndef LOGGING_WRITERFRONTEND_H +#define LOGGING_WRITERFRONTEND_H + +#include "Manager.h" + +#include "threading/MsgThread.h" + +namespace logging { + +class WriterBackend; + +class WriterFrontend { +public: + WriterFrontend(bro_int_t type); + virtual ~WriterFrontend(); + + // Disables the writers and stop the backend thread. + void Stop(); + + // Interface methods to interact with the writer from the main thread + // (and only from the main thread), typicalli from the log manager. + // All these methods forward (via inter-thread messaging) to the + // corresponding methods of an internally created WriterBackend. See + // there for documentation. + // + // If any of these operations fails, the writer will be automatically + // (but asynchronoulsy) disabled. + + void Init(string path, int num_fields, const Field* const * fields); + void Write(int num_fields, Value** vals); + void SetBuf(bool enabled); + void Flush(); + void Rotate(string rotated_path, double open, double close, bool terminating); + void Finish(); + + // Calling this disable the writer. All methods calls will be no-ops + // from now on. The Manager will eventually remove disabled writers. + void SetDisable() { disabled = true; } + bool Disabled() { return disabled; } + + const string Path() const { return path; } + int NumFields() const { return num_fields; } + const Field* const * Fields() const { return fields; } + +protected: + friend class Manager; + + + WriterBackend* backend; + bool disabled; + bool initialized; + + string path; + int num_fields; + const Field* const * fields; +}; + +} + + + +#endif diff --git a/src/LogWriterAscii.cc b/src/logging/writers/Ascii.cc similarity index 89% rename from src/LogWriterAscii.cc rename to src/logging/writers/Ascii.cc index d2c1d91370..70f513be3b 100644 --- a/src/LogWriterAscii.cc +++ b/src/logging/writers/Ascii.cc @@ -3,10 +3,14 @@ #include #include -#include "LogWriterAscii.h" -#include "NetVar.h" +#include "../../NetVar.h" -LogWriterAscii::LogWriterAscii() +#include "Ascii.h" + +using namespace logging; +using namespace writer; + +Ascii::Ascii() : WriterBackend("Ascii") { file = 0; @@ -42,7 +46,7 @@ LogWriterAscii::LogWriterAscii() desc.AddEscapeSequence(separator, separator_len); } -LogWriterAscii::~LogWriterAscii() +Ascii::~Ascii() { if ( file ) fclose(file); @@ -54,7 +58,7 @@ LogWriterAscii::~LogWriterAscii() delete [] header_prefix; } -bool LogWriterAscii::WriteHeaderField(const string& key, const string& val) +bool Ascii::WriteHeaderField(const string& key, const string& val) { string str = string(header_prefix, header_prefix_len) + key + string(separator, separator_len) + val + "\n"; @@ -62,8 +66,8 @@ bool LogWriterAscii::WriteHeaderField(const string& key, const string& val) return (fwrite(str.c_str(), str.length(), 1, file) == 1); } -bool LogWriterAscii::DoInit(string path, int num_fields, - const LogField* const * fields) +bool Ascii::DoInit(string path, int num_fields, + const Field* const * fields) { if ( output_to_stdout ) path = "/dev/stdout"; @@ -108,7 +112,7 @@ bool LogWriterAscii::DoInit(string path, int num_fields, types += string(separator, separator_len); } - const LogField* field = fields[i]; + const Field* field = fields[i]; names += field->name; types += type_name(field->type); if ( (field->type == TYPE_TABLE) || (field->type == TYPE_VECTOR) ) @@ -131,17 +135,18 @@ write_error: return false; } -bool LogWriterAscii::DoFlush() +bool Ascii::DoFlush() { fflush(file); return true; } -void LogWriterAscii::DoFinish() +bool Ascii::DoFinish() { + return true; } -bool LogWriterAscii::DoWriteOne(ODesc* desc, LogVal* val, const LogField* field) +bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) { if ( ! val->present ) { @@ -281,8 +286,8 @@ bool LogWriterAscii::DoWriteOne(ODesc* desc, LogVal* val, const LogField* field) return true; } -bool LogWriterAscii::DoWrite(int num_fields, const LogField* const * fields, - LogVal** vals) +bool Ascii::DoWrite(int num_fields, const Field* const * fields, + Value** vals) { if ( ! file ) DoInit(Path(), NumFields(), Fields()); @@ -312,8 +317,8 @@ bool LogWriterAscii::DoWrite(int num_fields, const LogField* const * fields, return true; } -bool LogWriterAscii::DoRotate(string rotated_path, double open, - double close, bool terminating) +bool Ascii::DoRotate(WriterFrontend* writer, string rotated_path, double open, + double close, bool terminating) { // Don't rotate special files or if there's not one currently open. if ( ! file || IsSpecial(Path()) ) @@ -325,7 +330,7 @@ bool LogWriterAscii::DoRotate(string rotated_path, double open, string nname = rotated_path + "." + LogExt(); rename(fname.c_str(), nname.c_str()); - if ( ! FinishedRotation(nname, fname, open, close, terminating) ) + if ( ! FinishedRotation(writer, nname, fname, open, close, terminating) ) { Error(Fmt("error rotating %s to %s", fname.c_str(), nname.c_str())); return false; @@ -334,13 +339,13 @@ bool LogWriterAscii::DoRotate(string rotated_path, double open, return true; } -bool LogWriterAscii::DoSetBuf(bool enabled) +bool Ascii::DoSetBuf(bool enabled) { // Nothing to do. return true; } -string LogWriterAscii::LogExt() +string Ascii::LogExt() { const char* ext = getenv("BRO_LOG_SUFFIX"); if ( ! ext ) ext = "log"; diff --git a/src/LogWriterAscii.h b/src/logging/writers/Ascii.h similarity index 57% rename from src/LogWriterAscii.h rename to src/logging/writers/Ascii.h index 72127c8b1f..37fcfef267 100644 --- a/src/LogWriterAscii.h +++ b/src/logging/writers/Ascii.h @@ -2,33 +2,35 @@ // // Log writer for delimiter-separated ASCII logs. -#ifndef LOGWRITERASCII_H -#define LOGWRITERASCII_H +#ifndef LOGGING_WRITER_ASCII_H +#define LOGGING_WRITER_ASCII_H -#include "LogWriter.h" +#include "../WriterBackend.h" -class LogWriterAscii : public LogWriter { +namespace logging { namespace writer { + +class Ascii : public WriterBackend { public: - LogWriterAscii(); - ~LogWriterAscii(); + Ascii(); + ~Ascii(); - static LogWriter* Instantiate() { return new LogWriterAscii; } + static WriterBackend* Instantiate() { return new Ascii; } static string LogExt(); protected: virtual bool DoInit(string path, int num_fields, - const LogField* const * fields); - virtual bool DoWrite(int num_fields, const LogField* const * fields, - LogVal** vals); + const Field* const * fields); + virtual bool DoWrite(int num_fields, const Field* const * fields, + Value** vals); virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(string rotated_path, double open, double close, - bool terminating); + virtual bool DoRotate(WriterFrontend* writer, string rotated_path, + double open, double close, bool terminating); virtual bool DoFlush(); - virtual void DoFinish(); + virtual bool DoFinish(); private: bool IsSpecial(string path) { return path.find("/dev/") == 0; } - bool DoWriteOne(ODesc* desc, LogVal* val, const LogField* field); + bool DoWriteOne(ODesc* desc, Value* val, const Field* field); bool WriteHeaderField(const string& key, const string& value); FILE* file; @@ -55,4 +57,8 @@ private: int header_prefix_len; }; +} +} + + #endif diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc new file mode 100644 index 0000000000..e419d88a6b --- /dev/null +++ b/src/logging/writers/None.cc @@ -0,0 +1,19 @@ + +#include "None.h" + +using namespace logging; +using namespace writer; + +bool None::DoRotate(WriterFrontend* writer, string rotated_path, + double open, double close, bool terminating) + { + if ( ! FinishedRotation(writer, string("/dev/null"), Path(), open, close, terminating)) + { + Error(Fmt("error rotating %s", Path().c_str())); + return false; + } + + return true; + } + + diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h new file mode 100644 index 0000000000..9b2ab6c698 --- /dev/null +++ b/src/logging/writers/None.h @@ -0,0 +1,35 @@ +// See the file "COPYING" in the main distribution directory for copyright. +// +// Dummy log writer that just discards everything (but still pretends to rotate). + +#ifndef LOGGING_WRITER_NONE_H +#define LOGGING_WRITER_NONE_H + +#include "../WriterBackend.h" + +namespace logging { namespace writer { + +class None : public WriterBackend { +public: + None() : WriterBackend("None") {} + ~None() {}; + + static WriterBackend* Instantiate() { return new None; } + +protected: + virtual bool DoInit(string path, int num_fields, + const Field* const * fields) { return true; } + + virtual bool DoWrite(int num_fields, const Field* const * fields, + Value** vals) { return true; } + virtual bool DoSetBuf(bool enabled) { return true; } + virtual bool DoRotate(WriterFrontend* writer, string rotated_path, + double open, double close, bool terminating); + virtual bool DoFlush() { return true; } + virtual bool DoFinish() { return true; } +}; + +} +} + +#endif diff --git a/src/main.cc b/src/main.cc index bcc0498123..58a23e6c80 100644 --- a/src/main.cc +++ b/src/main.cc @@ -29,7 +29,6 @@ extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "Event.h" #include "File.h" #include "Reporter.h" -#include "LogMgr.h" #include "Net.h" #include "NetVar.h" #include "Var.h" @@ -48,7 +47,10 @@ extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "DPM.h" #include "BroDoc.h" #include "Brofiler.h" -#include "LogWriterAscii.h" + +#include "threading/Manager.h" +#include "logging/Manager.h" +#include "logging/writers/Ascii.h" #include "binpac_bro.h" @@ -75,7 +77,8 @@ char* writefile = 0; name_list prefixes; DNS_Mgr* dns_mgr; TimerMgr* timer_mgr; -LogMgr* log_mgr; +logging::Manager* log_mgr = 0; +threading::Manager* thread_mgr = 0; Stmt* stmts; EventHandlerPtr net_done = 0; RuleMatcher* rule_matcher = 0; @@ -197,7 +200,7 @@ void usage() fprintf(stderr, " $BRO_PREFIXES | prefix list (%s)\n", bro_prefixes()); fprintf(stderr, " $BRO_DNS_FAKE | disable DNS lookups (%s)\n", bro_dns_fake()); fprintf(stderr, " $BRO_SEED_FILE | file to load seeds from (not set)\n"); - fprintf(stderr, " $BRO_LOG_SUFFIX | ASCII log file extension (.%s)\n", LogWriterAscii::LogExt().c_str()); + fprintf(stderr, " $BRO_LOG_SUFFIX | ASCII log file extension (.%s)\n", logging::writer::Ascii::LogExt().c_str()); fprintf(stderr, " $BRO_PROFILER_FILE | Output file for script execution statistics (not set)\n"); exit(1); @@ -287,6 +290,8 @@ void terminate_bro() if ( remote_serializer ) remote_serializer->LogStats(); + thread_mgr->Terminate(); + delete timer_mgr; delete dns_mgr; delete persistence_serializer; @@ -299,6 +304,7 @@ void terminate_bro() delete remote_serializer; delete dpm; delete log_mgr; + delete thread_mgr; delete reporter; } @@ -661,7 +667,9 @@ int main(int argc, char** argv) set_processing_status("INITIALIZING", "main"); bro_start_time = current_time(true); + reporter = new Reporter(); + thread_mgr = new threading::Manager(); #ifdef DEBUG if ( debug_streams ) @@ -727,7 +735,7 @@ int main(int argc, char** argv) persistence_serializer = new PersistenceSerializer(); remote_serializer = new RemoteSerializer(); event_registry = new EventRegistry(); - log_mgr = new LogMgr(); + log_mgr = new logging::Manager(); if ( events_file ) event_player = new EventPlayer(events_file); @@ -1001,6 +1009,8 @@ int main(int argc, char** argv) have_pending_timers = ! reading_traces && timer_mgr->Size() > 0; + io_sources.Register(thread_mgr, true); + if ( io_sources.Size() > 0 || have_pending_timers ) { if ( profiling_logger ) diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc new file mode 100644 index 0000000000..273a192de3 --- /dev/null +++ b/src/threading/BasicThread.cc @@ -0,0 +1,129 @@ + +#include +#include + +#include "BasicThread.h" +#include "Manager.h" + +using namespace threading; + +BasicThread::BasicThread(const string& arg_name) + { + started = false; + terminating = false; + pthread = 0; + + buf = 0; + buf_len = 1024; + + char tmp[128]; + snprintf(tmp, sizeof(tmp), "%s@%p", arg_name.c_str(), this); + name = string(tmp); + + thread_mgr->AddThread(this); + } + +BasicThread::~BasicThread() + { + } + +const char* BasicThread::Fmt(const char* format, ...) + { + if ( ! buf ) + buf = (char*) malloc(buf_len); + + va_list al; + va_start(al, format); + int n = safe_vsnprintf(buf, buf_len, format, al); + va_end(al); + + if ( (unsigned int) n >= buf_len ) + { // Not enough room, grow the buffer. + buf_len = n + 32; + buf = (char*) realloc(buf, buf_len); + + // Is it portable to restart? + va_start(al, format); + n = safe_vsnprintf(buf, buf_len, format, al); + va_end(al); + } + + return buf; + } + +void BasicThread::Start() + { + if ( sem_init(&terminate, 0, 0) != 0 ) + reporter->FatalError("Cannot create terminate semaphore for thread %s", name.c_str()); + + if ( pthread_create(&pthread, 0, BasicThread::launcher, this) != 0 ) + reporter->FatalError("Cannot create thread %s", name.c_str()); + + DBG_LOG(DBG_THREADING, "Started thread %s", name.c_str()); + + started = true; + + OnStart(); + } + +void BasicThread::Stop() + { + if ( ! started ) + return; + + if ( terminating ) + return; + + DBG_LOG(DBG_THREADING, "Signaling thread %s to terminate ...", name.c_str()); + + // Signal that it's ok for the thread to exit now. + if ( sem_post(&terminate) != 0 ) + reporter->FatalError("Failure flagging terminate condition for thread %s", name.c_str()); + + terminating = true; + + OnStop(); + } + +void BasicThread::Join() + { + if ( ! started ) + return; + + if ( ! terminating ) + Stop(); + + DBG_LOG(DBG_THREADING, "Joining thread %s ...", name.c_str()); + + if ( pthread_join(pthread, 0) != 0 ) + reporter->FatalError("Failure joining thread %s", name.c_str()); + + sem_destroy(&terminate); + + DBG_LOG(DBG_THREADING, "Done with thread %s", name.c_str()); + + pthread = 0; + } + +void* BasicThread::launcher(void *arg) + { + BasicThread* thread = (BasicThread *)arg; + + // Block signals in thread. We handle signals only in the main + // process. + sigset_t mask_set; + sigfillset(&mask_set); + int res = pthread_sigmask(SIG_BLOCK, &mask_set, 0); + assert(res == 0); // + + // Run thread's main function. + thread->Run(); + + // Wait until somebody actually wants us to terminate. + + if ( sem_wait(&thread->terminate) != 0 ) + reporter->FatalError("Failure flagging terminate condition for thread %s", thread->Name().c_str()); + + return 0; + } + diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h new file mode 100644 index 0000000000..30a11b4505 --- /dev/null +++ b/src/threading/BasicThread.h @@ -0,0 +1,63 @@ + +#ifndef THREADING_BASICTHREAD_H +#define THREADING_BASICTHREAD_H + +#include +#include + +#include "Queue.h" +#include "util.h" + +using namespace std; + +namespace threading { + +class Manager; + +class BasicThread +{ +public: + BasicThread(const string& name); // Managed by manager, must not delete otherwise. + virtual ~BasicThread(); + + const string& Name() const { return name; } + + void Start(); // Spawns the thread and enters Run(). + void Stop(); // Signals the thread to terminate. + + bool Terminating() const { return terminating; } + + // A thread-safe version of fmt(). + const char* Fmt(const char* format, ...); + +protected: + virtual void Run() = 0; + + virtual void OnStart() {} + virtual void OnStop() {} + +private: + friend class Manager; + + static void* launcher(void *arg); + + // Used from the ThreadMgr. + void Join(); // Waits until the thread has terminated and then joins it. + + bool started; // Set to to true once running. + bool terminating; // Set to to true to signal termination. + string name; + + pthread_t pthread; + sem_t terminate; + + // For implementing Fmt(). + char* buf; + unsigned int buf_len; +}; + +} + +extern threading::Manager* thread_mgr; + +#endif diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc new file mode 100644 index 0000000000..ed4d9cf623 --- /dev/null +++ b/src/threading/Manager.cc @@ -0,0 +1,104 @@ + +#include "Manager.h" + +using namespace threading; + +Manager::Manager() + { + DBG_LOG(DBG_THREADING, "Creating thread manager ..."); + + did_process = false; + next_beat = 0; + } + +Manager::~Manager() + { + if ( all_threads.size() ) + Terminate(); + } + +void Manager::Terminate() + { + DBG_LOG(DBG_THREADING, "Terminating thread manager ..."); + + // First process remaining thread output for the message threads. + do Process(); while ( did_process ); + + // Signal all to stop. + for ( all_thread_list::iterator i = all_threads.begin(); i != all_threads.end(); i++ ) + (*i)->Stop(); + + // Then join them all. + for ( all_thread_list::iterator i = all_threads.begin(); i != all_threads.end(); i++ ) + { + (*i)->Join(); + delete *i; + } + + all_threads.clear(); + msg_threads.clear(); + } + +void Manager::AddThread(BasicThread* thread) + { + DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name().c_str()); + all_threads.push_back(thread); + } + +void Manager::AddMsgThread(MsgThread* thread) + { + DBG_LOG(DBG_THREADING, "%s is a MsgThread ...", thread->Name().c_str()); + msg_threads.push_back(thread); + } + +void Manager::GetFds(int* read, int* write, int* except) + { + } + +double Manager::NextTimestamp(double* network_time) + { + if ( did_process || ! next_beat == 0 ) + // If we had something to process last time (or haven't had a + // chance to check yet), we want to check for more asap. + return timer_mgr->Time(); + + // Else we assume we don't have much to do at all and wait for the next heart beat. + return next_beat; + } + +void Manager::Process() + { + bool do_beat = (next_beat == 0 || network_time >= next_beat); + + did_process = false; + + for ( msg_thread_list::iterator i = msg_threads.begin(); i != msg_threads.end(); i++ ) + { + MsgThread* t = *i; + + if ( do_beat ) + t->Heartbeat(); + + if ( ! t->HasOut() ) + continue; + + Message* msg = t->RetrieveOut(); + + if ( msg->Process() ) + did_process = true; + + else + { + string s = msg->Name() + " failed, terminating thread"; + reporter->Error(s.c_str()); + t->Stop(); + } + + delete msg; + } + + if ( do_beat ) + next_beat = network_time + HEART_BEAT_INTERVAL; + } + + diff --git a/src/threading/Manager.h b/src/threading/Manager.h new file mode 100644 index 0000000000..aa7292ee81 --- /dev/null +++ b/src/threading/Manager.h @@ -0,0 +1,52 @@ + +#ifndef THREADING_MANAGER_H +#define THREADING_MANAGER_H + +#include + +#include "IOSource.h" + +#include "BasicThread.h" +#include "MsgThread.h" + +namespace threading { + +class Manager : public IOSource +{ +public: + Manager(); + ~Manager(); + + void Terminate(); + +protected: + friend class BasicThread; + friend class MsgThread; + + void AddThread(BasicThread* thread); + void AddMsgThread(MsgThread* thread); + + // IOSource interface. + virtual void GetFds(int* read, int* write, int* except); + virtual double NextTimestamp(double* network_time); + virtual void Process(); + virtual const char* Tag() { return "threading::Manager"; } + +private: + static const int HEART_BEAT_INTERVAL = 1; + + typedef std::list all_thread_list; + all_thread_list all_threads; + + typedef std::list msg_thread_list; + msg_thread_list msg_threads; + + bool did_process; + double next_beat; +}; + +} + +extern threading::Manager* thread_mgr; + +#endif diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc new file mode 100644 index 0000000000..e2d81cf47f --- /dev/null +++ b/src/threading/MsgThread.cc @@ -0,0 +1,285 @@ + +#include "DebugLogger.h" + +#include "MsgThread.h" +#include "Manager.h" + +using namespace threading; + +static void strreplace(const string& s, const string& o, const string& n) + { + string r = s; + + while ( true ) + { + size_t i = r.find(o); + + if ( i == std::string::npos ) + break; + + r.replace(i, o.size(), n); + } + } + +namespace threading { + +// Standard messages. + +class TerminateMessage : public InputMessage +{ +public: + TerminateMessage(MsgThread* thread) : InputMessage("Terminate", thread) { } + + virtual bool Process() { return true; } +}; + +class ReporterMessage : public OutputMessage +{ +public: + enum Type { + INFO, WARNING, ERROR, FATAL_ERROR, FATAL_ERROR_WITH_CORE, + INTERNAL_WARNING, INTERNAL_ERROR + }; + + ReporterMessage(Type arg_type, MsgThread* thread, const string& arg_msg) + : OutputMessage("ReporterMessage", thread) + { type = arg_type; msg = arg_msg; } + + virtual bool Process(); + +private: + string msg; + Type type; +}; + +class HeartbeatMessage : public InputMessage +{ +public: + HeartbeatMessage(MsgThread* thread, double arg_network_time, double arg_current_time) + : InputMessage("Heartbeat", thread) + { network_time = arg_network_time; current_time = arg_current_time; } + + virtual bool Process() { return Object()->DoHeartbeat(network_time, current_time); } + +private: + double network_time; + double current_time; +}; + +#ifdef DEBUG +class DebugMessage : public OutputMessage +{ +public: + DebugMessage(DebugStream arg_stream, MsgThread* thread, const string& arg_msg) + : OutputMessage("DebugMessage", thread) + { stream = arg_stream; msg = arg_msg; } + + virtual bool Process() + { + string s = Object()->Name() + ": " + msg; + strreplace(s, "%", "%%"); + debug_logger.Log(stream, s.c_str()); + return true; + } +private: + string msg; + DebugStream stream; +}; +#endif + +} + +// Methods. + +Message::~Message() + { + } + +bool ReporterMessage::Process() + { + string s = Object()->Name() + ": " + msg; + strreplace(s, "%", "%%"); + + const char* cmsg = s.c_str(); + + switch ( type ) { + + case INFO: + reporter->Info(cmsg); + break; + + case WARNING: + reporter->Warning(cmsg); + break; + + case ERROR: + reporter->Error(cmsg); + break; + + case FATAL_ERROR: + reporter->FatalError(cmsg); + break; + + case FATAL_ERROR_WITH_CORE: + reporter->FatalErrorWithCore(cmsg); + break; + + case INTERNAL_WARNING: + reporter->InternalWarning(cmsg); + break; + + case INTERNAL_ERROR : + reporter->InternalError(cmsg); + break; + + default: + reporter->InternalError("unknown ReporterMessage type %d", type); + } + + return true; + } + +MsgThread::MsgThread(const string& name) : BasicThread(name) + { + cnt_sent_in = cnt_sent_out = 0; + thread_mgr->AddMsgThread(this); + } + +void MsgThread::OnStop() + { + // This is to unblock the current queue read operation. + SendIn(new TerminateMessage(this), true); + } + +void MsgThread::Heartbeat() + { + SendIn(new HeartbeatMessage(this, network_time, current_time())); + } + +void MsgThread::Info(const char* msg) + { + SendOut(new ReporterMessage(ReporterMessage::INFO, this, msg)); + } + +void MsgThread::Warning(const char* msg) + { + SendOut(new ReporterMessage(ReporterMessage::WARNING, this, msg)); + } + +void MsgThread::Error(const char* msg) + { + SendOut(new ReporterMessage(ReporterMessage::ERROR, this, msg)); + } + +void MsgThread::FatalError(const char* msg) + { + SendOut(new ReporterMessage(ReporterMessage::FATAL_ERROR, this, msg)); + } + +void MsgThread::FatalErrorWithCore(const char* msg) + { + SendOut(new ReporterMessage(ReporterMessage::FATAL_ERROR_WITH_CORE, this, msg)); + } + +void MsgThread::InternalWarning(const char* msg) + { + SendOut(new ReporterMessage(ReporterMessage::INTERNAL_WARNING, this, msg)); + } + +void MsgThread::InternalError(const char* msg) + { + SendOut(new ReporterMessage(ReporterMessage::INTERNAL_ERROR, this, msg)); + } + +#ifdef DEBUG + +void MsgThread::Debug(DebugStream stream, const char* msg) + { + SendOut(new DebugMessage(stream, this, msg)); + } + +#endif + +void MsgThread::SendIn(BasicInputMessage* msg, bool force) + { + if ( Terminating() && ! force ) + return; + + DBG_LOG(DBG_THREADING, "Sending '%s' to %s ...", msg->Name().c_str(), Name().c_str()); + + queue_in.Put(msg); + ++cnt_sent_in; + } + + +void MsgThread::SendOut(BasicOutputMessage* msg, bool force) + { + if ( Terminating() && ! force ) + return; + + queue_out.Put(msg); + ++cnt_sent_out; + } + +BasicOutputMessage* MsgThread::RetrieveOut() + { + BasicOutputMessage* msg = queue_out.Get(); + assert(msg); + +#ifdef DEBUG + if ( msg->Name() != "DebugMessage" ) // Avoid recursion. + { + string s = Fmt("Retrieved '%s' from %s", msg->Name().c_str(), Name().c_str()); + Debug(DBG_THREADING, s.c_str()); + } +#endif + + return msg; + } + +BasicInputMessage* MsgThread::RetrieveIn() + { + BasicInputMessage* msg = queue_in.Get(); + assert(msg); + +#ifdef DEBUG + string s = Fmt("Retrieved '%s' in %s", msg->Name().c_str(), Name().c_str()); + Debug(DBG_THREADING, s.c_str()); +#endif + + return msg; + } + +void MsgThread::Run() + { + while ( true ) + { + // When requested to terminate, we only do so when + // all input has been processed. + if ( Terminating() && ! queue_in.Ready() ) + break; + + BasicInputMessage* msg = RetrieveIn(); + + bool result = msg->Process(); + + if ( ! result ) + { + string s = msg->Name() + " failed, terminating thread"; + Error(s.c_str()); + Stop(); + break; + } + + delete msg; + } + } + +void MsgThread::GetStats(Stats* stats) + { + stats->sent_in = cnt_sent_in; + stats->sent_out = cnt_sent_out; + stats->pending_in = cnt_sent_in - queue_in.Size(); + stats->pending_out = cnt_sent_out - queue_out.Size(); + } + diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h new file mode 100644 index 0000000000..2e976c1773 --- /dev/null +++ b/src/threading/MsgThread.h @@ -0,0 +1,157 @@ + +#ifndef THREADING_MSGTHREAD_H +#define THREADING_MSGTHREAD_H + +#include + +#include "DebugLogger.h" + +#include "BasicThread.h" +#include "Queue.h" + +namespace threading { + +class BasicInputMessage; +class BasicOutputMessage; +class HeartbeatMessage; + +class MsgThread : public BasicThread +{ +public: + MsgThread(const string& name); + + void SendIn(BasicInputMessage* msg) { return SendIn(msg, false); } + void SendOut(BasicOutputMessage* msg) { return SendOut(msg, false); } + + BasicOutputMessage* RetrieveOut(); + + // Report an informational message, nothing that needs specific + // attention. + void Info(const char* msg); + + // Report a warning that may indicate a problem. + void Warning(const char* msg); + + // Report a non-fatal error. Processing proceeds normally after the error + // has been reported. + void Error(const char* msg); + + // Report a fatal error. Bro will terminate after the message has been + // reported. + void FatalError(const char* msg); + + // Report a fatal error. Bro will terminate after the message has been + // reported and always generate a core dump. + void FatalErrorWithCore(const char* msg); + + // Report about a potential internal problem. Bro will continue + // normally. + void InternalWarning(const char* msg); + + // Report an internal program error. Bro will terminate with a core + // dump after the message has been reported. + void InternalError(const char* msg); + +#ifdef DEBUG + // Records a debug message for the given stream. + void Debug(DebugStream stream, const char* msg); +#endif + + void Heartbeat(); + + struct Stats + { + uint64_t sent_in; + uint64_t sent_out; + uint64_t pending_in; + uint64_t pending_out; + }; + + void GetStats(Stats* stats); + +protected: + friend class HeartbeatMessage; + + virtual void Run(); + virtual void OnStop(); + + virtual bool DoHeartbeat(double network_time, double current_time) { return true; } + +private: + friend class Manager; + + BasicInputMessage* RetrieveIn(); + + void SendIn(BasicInputMessage* msg, bool force); + void SendOut(BasicOutputMessage* msg, bool force); + + bool HasIn() { return queue_in.Ready(); } + bool HasOut() { return queue_out.Ready(); } + + Queue_ queue_in; + Queue_ queue_out; + + uint64_t cnt_sent_in; + uint64_t cnt_sent_out; +}; + +class Message +{ +public: + virtual ~Message(); + + const string& Name() const { return name; } + + virtual bool Process() = 0; // Thread will be terminated if returngin false. + +protected: + Message(const string& arg_name) { name = arg_name; } + +private: + string name; +}; + +class BasicInputMessage : public Message +{ +protected: + BasicInputMessage(const string& name) : Message(name) {} +}; + +class BasicOutputMessage : public Message +{ +protected: + BasicOutputMessage(const string& name) : Message(name) {} +}; + +template +class InputMessage : public BasicInputMessage +{ +public: + O* Object() const { return object; } + +protected: + InputMessage(const string& name, O* arg_object) : BasicInputMessage(name) + { object = arg_object; } + +private: + O* object; +}; + +template +class OutputMessage : public BasicOutputMessage +{ +public: + O* Object() const { return object; } + +protected: + OutputMessage(const string& name, O* arg_object) : BasicOutputMessage(name) + { object = arg_object; } + +private: + O* object; +}; + +} + + +#endif diff --git a/src/threading/Queue.h b/src/threading/Queue.h new file mode 100644 index 0000000000..49859dc051 --- /dev/null +++ b/src/threading/Queue.h @@ -0,0 +1,150 @@ + +#ifndef THREADING_QUEUE_H +#define THREADING_QUEUE_H + +#include +#include +#include +#include + +#include "Reporter.h" + +namespace threading { + +/** + * Just a simple threaded queue wrapper class. Uses multiple queues and reads / writes in rotary fashion in an attempt to limit contention. + * Due to locking granularity, bulk put / get is no faster than single put / get as long as FIFO guarantee is required. + */ + +template +class Queue_ +{ +public: + Queue_(); + ~Queue_(); + + T Get(); + void Put(T data); + bool Ready(); + uint64_t Size(); + +private: + static const int NUM_QUEUES = 8; + + pthread_mutex_t mutex[NUM_QUEUES]; // Mutex protected shared accesses. + pthread_cond_t has_data[NUM_QUEUES]; // Signals when data becomes available + std::queue messages[NUM_QUEUES]; // Actually holds the queued messages + + int read_ptr; // Where the next operation will read from + int write_ptr; // Where the next operation will write to + uint64_t size; +}; + +inline static void safe_lock(pthread_mutex_t* mutex) + { + if ( pthread_mutex_lock(mutex) != 0 ) + reporter->FatalErrorWithCore("cannot lock mutex"); + } + +inline static void safe_unlock(pthread_mutex_t* mutex) + { + if ( pthread_mutex_unlock(mutex) != 0 ) + reporter->FatalErrorWithCore("cannot unlock mutex"); + } + +template +inline Queue_::Queue_() + { + read_ptr = 0; + write_ptr = 0; + + for( int i = 0; i < NUM_QUEUES; ++i ) + { + if ( pthread_cond_init(&has_data[i], NULL) != 0 ) + reporter->FatalError("cannot init queue condition variable"); + + if ( pthread_mutex_init(&mutex[i], NULL) != 0 ) + reporter->FatalError("cannot init queue mutex"); + } + } + +template +inline Queue_::~Queue_() + { + for( int i = 0; i < NUM_QUEUES; ++i ) + { + pthread_cond_destroy(&has_data[i]); + pthread_mutex_destroy(&mutex[i]); + } + } + +template +inline T Queue_::Get() + { + safe_lock(&mutex[read_ptr]); + + int old_read_ptr = read_ptr; + + if ( messages[read_ptr].empty() ) + pthread_cond_wait(&has_data[read_ptr], &mutex[read_ptr]); + + T data = messages[read_ptr].front(); + messages[read_ptr].pop(); + --size; + + read_ptr = (read_ptr + 1) % NUM_QUEUES; + + safe_unlock(&mutex[old_read_ptr]); + + return data; + } + +template +inline void Queue_::Put(T data) + { + safe_lock(&mutex[write_ptr]); + + int old_write_ptr = write_ptr; + + bool need_signal = messages[write_ptr].empty(); + + messages[write_ptr].push(data); + ++size; + + if ( need_signal ) + pthread_cond_signal(&has_data[write_ptr]); + + write_ptr = (write_ptr + 1) % NUM_QUEUES; + + safe_unlock(&mutex[old_write_ptr]); + } + + +template +inline bool Queue_::Ready() + { + safe_lock(&mutex[read_ptr]); + + bool ret = (messages[read_ptr].size()); + + safe_unlock(&mutex[read_ptr]); + + return ret; + } + +template +inline uint64_t Queue_::Size() + { + safe_lock(&mutex[read_ptr]); + + uint64_t s = size; + + safe_unlock(&mutex[read_ptr]); + + return s; + } + +} + +#endif + From c03efbb5701b57f1cfb809de67db008a9ddee16b Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 27 Jan 2012 22:55:42 -0500 Subject: [PATCH 074/651] Small updates for the bittorrent analyzer to support 64bit types in binpac. - This branch removes the attempt at bittorrent resynchronization. I don't think that the bittorrent resynchronization would really work very well anyway. - This need to be merged after the topic/seth/64bit-type branch in binpac. --- aux/binpac | 2 +- src/BitTorrent.cc | 67 ++++++++++++++++++++----------------- src/bittorrent-analyzer.pac | 24 ++++++------- src/bittorrent-protocol.pac | 14 ++++---- 4 files changed, 56 insertions(+), 51 deletions(-) diff --git a/aux/binpac b/aux/binpac index 43308aab47..35d69ffd88 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 43308aab47a3357ca1885e1b6954154a2744d821 +Subproject commit 35d69ffd88f14820c495a7b66c103f9b94a604ae diff --git a/src/BitTorrent.cc b/src/BitTorrent.cc index c58eb4cf65..66ebe31252 100644 --- a/src/BitTorrent.cc +++ b/src/BitTorrent.cc @@ -66,39 +66,44 @@ void BitTorrent_Analyzer::DeliverStream(int len, const u_char* data, bool orig) void BitTorrent_Analyzer::Undelivered(int seq, int len, bool orig) { - uint64 entry_offset = orig ? - *interp->upflow()->next_message_offset() : - *interp->downflow()->next_message_offset(); - uint64& this_stream_len = orig ? stream_len_orig : stream_len_resp; - bool& this_stop = orig ? stop_orig : stop_resp; - TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); - this_stream_len += len; - - if ( entry_offset < this_stream_len ) - { // entry point is somewhere in the gap - DeliverWeird("Stopping BitTorrent analysis: cannot recover from content gap", orig); - this_stop = true; - if ( stop_orig && stop_resp ) - ProtocolViolation("BitTorrent: content gap and/or protocol violation"); - } - else - { // fill the gap - try - { - u_char gap[len]; - memset(gap, 0, len); - interp->NewData(orig, gap, gap + len); - } - catch ( binpac::Exception const &e ) - { - DeliverWeird("Stopping BitTorrent analysis: filling content gap failed", orig); - this_stop = true; - if ( stop_orig && stop_resp ) - ProtocolViolation("BitTorrent: content gap and/or protocol violation"); - } - } + // I think that shoving data that is definitely wrong into the + // parser seems like a really bad idea. The way it's currently + // tracking the next message offset isn't compatible with + // new 64bit int support in binpac either. + + //uint64 entry_offset = orig ? + // *interp->upflow()->next_message_offset() : + // *interp->downflow()->next_message_offset(); + //uint64& this_stream_len = orig ? stream_len_orig : stream_len_resp; + //bool& this_stop = orig ? stop_orig : stop_resp; + // + //this_stream_len += len; + // + //if ( entry_offset < this_stream_len ) + // { // entry point is somewhere in the gap + // DeliverWeird("Stopping BitTorrent analysis: cannot recover from content gap", orig); + // this_stop = true; + // if ( stop_orig && stop_resp ) + // ProtocolViolation("BitTorrent: content gap and/or protocol violation"); + // } + //else + // { // fill the gap + // try + // { + // u_char gap[len]; + // memset(gap, 0, len); + // interp->NewData(orig, gap, gap + len); + // } + // catch ( binpac::Exception const &e ) + // { + // DeliverWeird("Stopping BitTorrent analysis: filling content gap failed", orig); + // this_stop = true; + // if ( stop_orig && stop_resp ) + // ProtocolViolation("BitTorrent: content gap and/or protocol violation"); + // } + // } } void BitTorrent_Analyzer::EndpointEOF(TCP_Reassembler* endp) diff --git a/src/bittorrent-analyzer.pac b/src/bittorrent-analyzer.pac index ee7a70ea21..3bc6d90230 100644 --- a/src/bittorrent-analyzer.pac +++ b/src/bittorrent-analyzer.pac @@ -10,25 +10,25 @@ flow BitTorrent_Flow(is_orig: bool) { %member{ bool handshake_ok; - uint64 _next_message_offset; + //uint64 _next_message_offset; %} %init{ handshake_ok = false; - _next_message_offset = 0; + //_next_message_offset = 0; %} - function next_message_offset(): uint64 - %{ - return &_next_message_offset; - %} + #function next_message_offset(): uint64 + # %{ + # return &_next_message_offset; + # %} - function increment_next_message_offset(go: bool, len: uint32): bool - %{ - if ( go ) - _next_message_offset += len; - return true; - %} + #function increment_next_message_offset(go: bool, len: uint32): bool + # %{ + # if ( go ) + # _next_message_offset += len; + # return true; + # %} function is_handshake_delivered(): bool %{ diff --git a/src/bittorrent-protocol.pac b/src/bittorrent-protocol.pac index d3a147f157..76bbafbf20 100644 --- a/src/bittorrent-protocol.pac +++ b/src/bittorrent-protocol.pac @@ -22,8 +22,8 @@ type BitTorrent_Handshake = record { } &length = 68, &let { validate: bool = $context.flow.validate_handshake(pstrlen, pstr); - incoffsetffset: bool = - $context.flow.increment_next_message_offset(true, 68); + #incoffsetffset: bool = + # $context.flow.increment_next_message_offset(true, 68); deliver: bool = $context.flow.deliver_handshake(reserved, info_hash, peer_id); }; @@ -72,8 +72,8 @@ type BitTorrent_PieceHeader(len: uint32) = record { index: uint32; begin: uint32; } &let { - incoffset: bool = - $context.flow.increment_next_message_offset(true, len + 5); + #incoffset: bool = + # $context.flow.increment_next_message_offset(true, len + 5); }; type BitTorrent_Piece(len: uint32) = record { @@ -134,9 +134,9 @@ type BitTorrent_Message = record { default -> message_id: BitTorrent_MessageID(len.len); }; } &length = 4 + len.len, &let { - incoffset: bool = $context.flow.increment_next_message_offset( - len.len == 0 || message_id.id != TYPE_PIECE, - 4 + len.len); + #incoffset: bool = $context.flow.increment_next_message_offset( + # len.len == 0 || message_id.id != TYPE_PIECE, + # 4 + len.len); }; type BitTorrent_PDU = case $context.flow.is_handshake_delivered() of { From 6cc29a78328cf463a4706e36b77d63ba256b9bd6 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 30 Jan 2012 12:12:14 -0800 Subject: [PATCH 075/651] make logging with threads compile on mac os and fix a couple of string literal warnings. --- src/net_util.cc | 2 +- src/threading/Manager.cc | 2 +- src/threading/MsgThread.cc | 20 +++++++++----------- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/src/net_util.cc b/src/net_util.cc index c0bacc98b2..ebe0392e2a 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -322,7 +322,7 @@ const uint32* mask_addr(const uint32* a, uint32 top_bits_to_keep) if ( top_bits_to_keep == 0 || top_bits_to_keep > max_bits ) { - reporter->Error("bad address mask value %s", top_bits_to_keep); + reporter->Error("bad address mask value %u", top_bits_to_keep); return addr; } diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index ed4d9cf623..d963876755 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -90,7 +90,7 @@ void Manager::Process() else { string s = msg->Name() + " failed, terminating thread"; - reporter->Error(s.c_str()); + reporter->Error("%s", s.c_str()); t->Stop(); } diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index e2d81cf47f..d78c7533a3 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -28,7 +28,7 @@ namespace threading { class TerminateMessage : public InputMessage { public: - TerminateMessage(MsgThread* thread) : InputMessage("Terminate", thread) { } + TerminateMessage(MsgThread* thread) : InputMessage("Terminate", thread) { } virtual bool Process() { return true; } }; @@ -56,7 +56,7 @@ class HeartbeatMessage : public InputMessage { public: HeartbeatMessage(MsgThread* thread, double arg_network_time, double arg_current_time) - : InputMessage("Heartbeat", thread) + : InputMessage("Heartbeat", thread) { network_time = arg_network_time; current_time = arg_current_time; } virtual bool Process() { return Object()->DoHeartbeat(network_time, current_time); } @@ -98,38 +98,36 @@ Message::~Message() bool ReporterMessage::Process() { string s = Object()->Name() + ": " + msg; - strreplace(s, "%", "%%"); - const char* cmsg = s.c_str(); switch ( type ) { case INFO: - reporter->Info(cmsg); + reporter->Info("%s", cmsg); break; case WARNING: - reporter->Warning(cmsg); + reporter->Warning("%s", cmsg); break; case ERROR: - reporter->Error(cmsg); + reporter->Error("%s", cmsg); break; case FATAL_ERROR: - reporter->FatalError(cmsg); + reporter->FatalError("%s", cmsg); break; case FATAL_ERROR_WITH_CORE: - reporter->FatalErrorWithCore(cmsg); + reporter->FatalErrorWithCore("%s", cmsg); break; case INTERNAL_WARNING: - reporter->InternalWarning(cmsg); + reporter->InternalWarning("%s", cmsg); break; case INTERNAL_ERROR : - reporter->InternalError(cmsg); + reporter->InternalError("%s", cmsg); break; default: From a428645b2a88f20c6e9f48c573e88b74c3c03398 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 31 Jan 2012 23:47:33 -0800 Subject: [PATCH 076/651] Documenting the threading/* classes. Also switching from semaphores to mutexes as the former don't seem to be fully supported on MacOS. --- src/threading/BasicThread.cc | 29 ++-- src/threading/BasicThread.h | 119 ++++++++++++--- src/threading/Manager.h | 63 +++++++- src/threading/MsgThread.cc | 56 +++---- src/threading/MsgThread.h | 282 +++++++++++++++++++++++++++++++---- src/threading/Queue.h | 56 +++++-- 6 files changed, 499 insertions(+), 106 deletions(-) diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 273a192de3..f7bd2afbcd 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -7,6 +7,8 @@ using namespace threading; +uint64_t BasicThread::thread_counter = 0; + BasicThread::BasicThread(const string& arg_name) { started = false; @@ -16,9 +18,7 @@ BasicThread::BasicThread(const string& arg_name) buf = 0; buf_len = 1024; - char tmp[128]; - snprintf(tmp, sizeof(tmp), "%s@%p", arg_name.c_str(), this); - name = string(tmp); + name = Fmt("%s@%d", arg_name.c_str(), ++thread_counter); thread_mgr->AddThread(this); } @@ -53,8 +53,15 @@ const char* BasicThread::Fmt(const char* format, ...) void BasicThread::Start() { - if ( sem_init(&terminate, 0, 0) != 0 ) - reporter->FatalError("Cannot create terminate semaphore for thread %s", name.c_str()); + if ( started ) + return; + + if ( pthread_mutex_init(&terminate, 0) != 0 ) + reporter->FatalError("Cannot create terminate mutex for thread %s", name.c_str()); + + // We use this like a binary semaphore and acquire it immediately. + if ( pthread_mutex_lock(&terminate) != 0 ) + reporter->FatalError("Cannot aquire terminate mutex for thread %s", name.c_str()); if ( pthread_create(&pthread, 0, BasicThread::launcher, this) != 0 ) reporter->FatalError("Cannot create thread %s", name.c_str()); @@ -76,8 +83,9 @@ void BasicThread::Stop() DBG_LOG(DBG_THREADING, "Signaling thread %s to terminate ...", name.c_str()); - // Signal that it's ok for the thread to exit now. - if ( sem_post(&terminate) != 0 ) + // Signal that it's ok for the thread to exit now by unlocking the + // mutex. + if ( pthread_mutex_unlock(&terminate) != 0 ) reporter->FatalError("Failure flagging terminate condition for thread %s", name.c_str()); terminating = true; @@ -98,7 +106,7 @@ void BasicThread::Join() if ( pthread_join(pthread, 0) != 0 ) reporter->FatalError("Failure joining thread %s", name.c_str()); - sem_destroy(&terminate); + pthread_mutex_destroy(&terminate); DBG_LOG(DBG_THREADING, "Done with thread %s", name.c_str()); @@ -120,9 +128,8 @@ void* BasicThread::launcher(void *arg) thread->Run(); // Wait until somebody actually wants us to terminate. - - if ( sem_wait(&thread->terminate) != 0 ) - reporter->FatalError("Failure flagging terminate condition for thread %s", thread->Name().c_str()); + if ( pthread_mutex_lock(&thread->terminate) != 0 ) + reporter->FatalError("Failure acquiring terminate mutex at end of thread %s", thread->Name().c_str()); return 0; } diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h index 30a11b4505..df5665c464 100644 --- a/src/threading/BasicThread.h +++ b/src/threading/BasicThread.h @@ -14,50 +14,133 @@ namespace threading { class Manager; +/** + * Base class for all threads. + * + * This class encapsulates all the OS-level thread handling. All thread + * instances are automatically added to the threading::Manager for management. The + * manager also takes care of deleting them (which must not be done + * manually). + */ class BasicThread { public: + /** + * Creates a new thread object. Instantiating the object does however + * not yet start the actual OS thread, that requires calling Start(). + * + * Only Bro's main thread may create new thread instances. + * + * @param name A descriptive name for thread the thread. This may + * show up in messages to the user. + */ BasicThread(const string& name); // Managed by manager, must not delete otherwise. - virtual ~BasicThread(); + /** + * Returns a descriptive name for the thread. This is the name passed + * into the constructor. + * + * This method is safe to call from any thread. + */ const string& Name() const { return name; } - void Start(); // Spawns the thread and enters Run(). - void Stop(); // Signals the thread to terminate. + /** + * Starts the thread. Calling this methods will spawn a new OS thread + * executing Run(). Note that one can't restart a thread after a + * Stop(), doing so will be ignored. + * + * Only Bro's main thread must call this method. + */ + void Start(); + /** + * Signals the thread to stop. The method lets Terminating() now + * return true. It does however not force the thread to terminate. + * It's up to the Run() method to to query Terminating() and exit + * eventually. + * + * Calling this method has no effect if Start() hasn't been executed + * yet. + * + * Only Bro's main thread must call this method. + */ + void Stop(); + + /** + * Returns true if Terminate() has been called. + * + * This method is safe to call from any thread. + */ bool Terminating() const { return terminating; } - // A thread-safe version of fmt(). + /** + * A version of fmt() that the thread can safely use. + * + * This is safe to call from Run() but must not be used from any + * other thread than the current one. + */ const char* Fmt(const char* format, ...); protected: - virtual void Run() = 0; - - virtual void OnStart() {} - virtual void OnStop() {} - -private: friend class Manager; + /** + * Entry point for the thread. This must be overridden by derived + * classes and will execute in a separate thread once Start() is + * called. The thread will not terminate before this method finishes. + * An implementation should regularly check Terminating() to see if + * exiting has been requested. + */ + virtual void Run() = 0; + + /** + * Executed with Start(). This is a hook into starting the thread. It + * will be called from Bro's main thread after the OS thread has been + * started. + */ + virtual void OnStart() {} + + /** + * Executed with Stop(). This is a hook into stopping the thread. It + * will be called from Bro's main thread after the thread has been + * signaled to stop. + */ + virtual void OnStop() {} + + /** + * Destructor. This will be called by the manager. + * + * Only Bro's main thread may delete thread instances. + * + */ + virtual ~BasicThread(); + + /** + * Waits until the thread's Run() method has finished and then joins + * it. This is called from the threading::Manager. + */ + void Join(); + +private: + // pthread entry function. static void* launcher(void *arg); - // Used from the ThreadMgr. - void Join(); // Waits until the thread has terminated and then joins it. - + string name; + pthread_t pthread; bool started; // Set to to true once running. bool terminating; // Set to to true to signal termination. - string name; - pthread_t pthread; - sem_t terminate; + // Used as a semaphore to tell the pthread thread when it may + // terminate. + pthread_mutex_t terminate; // For implementing Fmt(). char* buf; unsigned int buf_len; + + static uint64_t thread_counter; }; } -extern threading::Manager* thread_mgr; - #endif diff --git a/src/threading/Manager.h b/src/threading/Manager.h index aa7292ee81..2c4f88fa1e 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -11,25 +11,78 @@ namespace threading { +/** + * The thread manager coordinates all child threads. Once a BasicThread is + * instantitated, it gets addedd to the manager, which will delete it later + * once it has terminated. + * + * In addition to basic threads, the manager also provides additional + * functionality specific to MsgThread instances. In particular, it polls + * their outgoing message queue on a regular basis and feeds data sent into + * the rest of Bro. It also triggers the regular heartbeats. + */ class Manager : public IOSource { public: + /** + * Constructor. Only a single instance of the manager must be + * created. + */ Manager(); + + /** + * Destructir. + */ ~Manager(); + /** + * Terminates the manager's processor. The method signals all threads + * to terminates and wait for them to do so. It then joins them and + * returns to the caller. Afterwards, no more thread instances may be + * created. + */ void Terminate(); protected: friend class BasicThread; friend class MsgThread; + /** + * Registers a new basic thread with the manager. This is + * automatically called by the thread's constructor. + * + * @param thread The thread. + */ void AddThread(BasicThread* thread); + + /** + * Registers a new message thread with the manager. This is + * automatically called by the thread's constructor. This must be + * called \a in \a addition to AddThread(BasicThread* thread). The + * MsgThread constructor makes sure to do so. + * + * @param thread The thread. + */ void AddMsgThread(MsgThread* thread); - // IOSource interface. + /** + * Part of the IOSource interface. + */ virtual void GetFds(int* read, int* write, int* except); + + /** + * Part of the IOSource interface. + */ virtual double NextTimestamp(double* network_time); + + /** + * Part of the IOSource interface. + */ virtual void Process(); + + /** + * Part of the IOSource interface. + */ virtual const char* Tag() { return "threading::Manager"; } private: @@ -41,12 +94,16 @@ private: typedef std::list msg_thread_list; msg_thread_list msg_threads; - bool did_process; - double next_beat; + bool did_process; // True if the last Process() found some work to do. + double next_beat; // Timestamp when the next heartbeat will be sent. }; } +/** + * A singleton instance of the thread manager. All methods must only be + * called from Bro's main thread. + */ extern threading::Manager* thread_mgr; #endif diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index d78c7533a3..455c177df6 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -6,25 +6,13 @@ using namespace threading; -static void strreplace(const string& s, const string& o, const string& n) - { - string r = s; - - while ( true ) - { - size_t i = r.find(o); - - if ( i == std::string::npos ) - break; - - r.replace(i, o.size(), n); - } - } - namespace threading { -// Standard messages. +////// Messages. +// Signals child thread to terminate. This is actually a no-op; its only +// purpose is unblock the current read operation so that the child's Run() +// methods can check the termination status. class TerminateMessage : public InputMessage { public: @@ -33,6 +21,22 @@ public: virtual bool Process() { return true; } }; +/// Sends a heartbeat to the child thread. +class HeartbeatMessage : public InputMessage +{ +public: + HeartbeatMessage(MsgThread* thread, double arg_network_time, double arg_current_time) + : InputMessage("Heartbeat", thread) + { network_time = arg_network_time; current_time = arg_current_time; } + + virtual bool Process() { return Object()->DoHeartbeat(network_time, current_time); } + +private: + double network_time; + double current_time; +}; + +// A message from the child to be passed on to the Reporter. class ReporterMessage : public OutputMessage { public: @@ -52,21 +56,8 @@ private: Type type; }; -class HeartbeatMessage : public InputMessage -{ -public: - HeartbeatMessage(MsgThread* thread, double arg_network_time, double arg_current_time) - : InputMessage("Heartbeat", thread) - { network_time = arg_network_time; current_time = arg_current_time; } - - virtual bool Process() { return Object()->DoHeartbeat(network_time, current_time); } - -private: - double network_time; - double current_time; -}; - #ifdef DEBUG +// A debug message from the child to be passed on to the DebugLogger. class DebugMessage : public OutputMessage { public: @@ -77,8 +68,7 @@ public: virtual bool Process() { string s = Object()->Name() + ": " + msg; - strreplace(s, "%", "%%"); - debug_logger.Log(stream, s.c_str()); + debug_logger.Log(stream, "%s", s.c_str()); return true; } private: @@ -89,7 +79,7 @@ private: } -// Methods. +////// Methods. Message::~Message() { diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 2e976c1773..8f37041bb6 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -15,121 +15,332 @@ class BasicInputMessage; class BasicOutputMessage; class HeartbeatMessage; +/** + * A specialized thread that provides bi-directional message passing between + * Bro's main thread and the child thread. Messages are instances of + * BasicInputMessage and BasicOutputMessage for message sent \a to the child + * thread and received \a from the child thread, respectively. + * + * The thread's Run() method implements main loop that processes incoming + * messages until Terminating() indicates that execution should stop. Once + * that happens, the thread stops accepting any new messages, finishes + * processes all remaining ones still in the queue, and then exits. + */ class MsgThread : public BasicThread { public: + /** + * Constructor. It automatically registers the thread with the + * threading::Manager. + * + * Only Bro's main thread may instantiate a new thread. + * + * @param name A descriptive name. This is passed on to BasicThread(). + */ MsgThread(const string& name); + /** + * Sends a message to the child thread. The message will be proceesed + * once the thread has retrieved it from its incoming queue. + * + * Only the main thread may call this method. + * + * @param msg The message. + */ void SendIn(BasicInputMessage* msg) { return SendIn(msg, false); } + + /** + * Sends a message from the child thread to the main thread. + * + * Only the child thread may call this method. + * + * @param msg The mesasge. + */ void SendOut(BasicOutputMessage* msg) { return SendOut(msg, false); } - BasicOutputMessage* RetrieveOut(); - - // Report an informational message, nothing that needs specific - // attention. + /** + * Reports an informational message from the child thread. The main + * thread will pass this to the Reporter once received. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void Info(const char* msg); - // Report a warning that may indicate a problem. + /** + * Reports a warning from the child thread that may indicate a + * problem. The main thread will pass this to the Reporter once + * received. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void Warning(const char* msg); - // Report a non-fatal error. Processing proceeds normally after the error - // has been reported. + /** + * Reports a non-fatal error from the child thread. The main thread + * will pass this to the Reporter once received. Processing proceeds + * normally after the error has been reported. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void Error(const char* msg); - // Report a fatal error. Bro will terminate after the message has been - // reported. + /** + * Reports a fatal error from the child thread. The main thread will + * pass this to the Reporter once received. Bro will terminate after + * the message has been reported. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void FatalError(const char* msg); - // Report a fatal error. Bro will terminate after the message has been - // reported and always generate a core dump. + /** + * Reports a fatal error from the child thread. The main thread will + * pass this to the Reporter once received. Bro will terminate with a + * core dump after the message has been reported. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void FatalErrorWithCore(const char* msg); - // Report about a potential internal problem. Bro will continue - // normally. + /** + * Reports a potential internal problem from the child thread. The + * main thread will pass this to the Reporter once received. Bro will + * continue normally. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void InternalWarning(const char* msg); - // Report an internal program error. Bro will terminate with a core - // dump after the message has been reported. + /** + * Reports an internal program error from the child thread. The main + * thread will pass this to the Reporter once received. Bro will + * terminate with a core dump after the message has been reported. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void InternalError(const char* msg); #ifdef DEBUG - // Records a debug message for the given stream. + /** + * Records a debug message for the given stream from the child + * thread. The main thread will pass this to the DebugLogger once + * received. + * + * Only the child thread may call this method. + * + * @param msg The message. It will be prefixed with the thread's name. + */ void Debug(DebugStream stream, const char* msg); #endif - void Heartbeat(); - + /** + * Statistics about inter-thread communication. + */ struct Stats { - uint64_t sent_in; - uint64_t sent_out; - uint64_t pending_in; - uint64_t pending_out; + uint64_t sent_in; //! Number of messages sent to the child thread. + uint64_t sent_out; //! Number of messages sent from the child thread to the main thread + uint64_t pending_in; //! Number of messages sent to the child but not yet processed. + uint64_t pending_out; //! Number of messages sent from the child but not yet processed by the main thread. }; + /** + * Returns statistics about the inter-thread communication. + * + * @param stats A pointer to a structure that will be filled with + * current numbers. + */ void GetStats(Stats* stats); protected: + friend class Manager; friend class HeartbeatMessage; + /** + * Pops a message sent by the child from the child-to-main queue. + * + * This is method is called regularly by the threading::Manager. + * + * @return The message, wth ownership passed to caller. Returns null + * if the queue is empty. + */ + BasicOutputMessage* RetrieveOut(); + + /** + * Triggers a heartbeat message being sent to the client thread. + * + * This is method is called regularly by the threading::Manager. + */ + void Heartbeat(); + + /** + * Overriden from BasicThread. + * + */ virtual void Run(); virtual void OnStop(); virtual bool DoHeartbeat(double network_time, double current_time) { return true; } private: - friend class Manager; - + /** + * Pops a message sent by the main thread from the main-to-chold + * queue. + * + * Must only be called by the child thread. + * + * @return The message, wth ownership passed to caller. Returns null + * if the queue is empty. + */ BasicInputMessage* RetrieveIn(); + /** + * Queues a message for the child. + * + * Must only be called by the main thread. + * + * @param msg The message. + * + * @param force: If true, the message will be queued even when we're already + * Terminating(). Normally, the message would be discarded in that + * case. + */ void SendIn(BasicInputMessage* msg, bool force); + + /** + * Queues a message for the main thread. + * + * Must only be called by the child thread. + * + * @param msg The message. + * + * @param force: If true, the message will be queued even when we're already + * Terminating(). Normally, the message would be discarded in that + * case. + */ void SendOut(BasicOutputMessage* msg, bool force); + /** + * Returns true if there's at least one message pending for the child + * thread. + */ bool HasIn() { return queue_in.Ready(); } + + /** + * Returns true if there's at least one message pending for the main + * thread. + */ bool HasOut() { return queue_out.Ready(); } - Queue_ queue_in; - Queue_ queue_out; + Queue queue_in; + Queue queue_out; - uint64_t cnt_sent_in; - uint64_t cnt_sent_out; + uint64_t cnt_sent_in; // Counts message sent to child. + uint64_t cnt_sent_out; // Counts message sent by child. }; +/** + * Base class for all message between Bro's main process and a MsgThread. + */ class Message { public: + /** + * Destructor. + */ virtual ~Message(); + /** + * Returns a descriptive name for the message's general type. This is + * what's passed into the constructor and used mainly for debugging + * purposes. + */ const string& Name() const { return name; } + /** + * Callback that must be overriden for processing a message. + */ virtual bool Process() = 0; // Thread will be terminated if returngin false. protected: + /** + * Constructor. + * + * @param arg_name A descriptive name for the type of message. Used + * mainly for debugging purposes. + */ Message(const string& arg_name) { name = arg_name; } private: string name; }; +/** + * Base class for messages sent from Bro's main thread to a child MsgThread. + */ class BasicInputMessage : public Message { protected: + /** + * Constructor. + * + * @param name A descriptive name for the type of message. Used + * mainly for debugging purposes. + */ BasicInputMessage(const string& name) : Message(name) {} }; +/** + * Base class for messages sent from a child MsgThread to Bro's main thread. + */ class BasicOutputMessage : public Message { protected: + /** + * Constructor. + * + * @param name A descriptive name for the type of message. Used + * mainly for debugging purposes. + */ BasicOutputMessage(const string& name) : Message(name) {} }; +/** + * A paremeterized InputMessage that stores a pointer to an argument object. + * Normally, the objects will be used from the Process() callback. + */ template class InputMessage : public BasicInputMessage { public: + /** + * Returns the objects passed to the constructor. + */ O* Object() const { return object; } protected: + /** + * Constructor. + * + * @param name: A descriptive name for the type of message. Used + * mainly for debugging purposes. + * + * @param arg_object: An object to store with the message. + */ InputMessage(const string& name, O* arg_object) : BasicInputMessage(name) { object = arg_object; } @@ -137,13 +348,28 @@ private: O* object; }; +/** + * A paremeterized OututMessage that stores a pointer to an argument object. + * Normally, the objects will be used from the Process() callback. + */ template class OutputMessage : public BasicOutputMessage { public: + /** + * Returns the objects passed to the constructor. + */ O* Object() const { return object; } protected: + /** + * Constructor. + * + * @param name A descriptive name for the type of message. Used + * mainly for debugging purposes. + * + * @param arg_object An object to store with the message. + */ OutputMessage(const string& name, O* arg_object) : BasicOutputMessage(name) { object = arg_object; } diff --git a/src/threading/Queue.h b/src/threading/Queue.h index 49859dc051..add7019f9c 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -9,23 +9,53 @@ #include "Reporter.h" +#undef Queue // Defined elsewhere unfortunately. + namespace threading { /** - * Just a simple threaded queue wrapper class. Uses multiple queues and reads / writes in rotary fashion in an attempt to limit contention. - * Due to locking granularity, bulk put / get is no faster than single put / get as long as FIFO guarantee is required. + * A thread-safe single-reader single-writer queue. + * + * The implementation uses multiple queues and reads/writes in rotary fashion + * in an attempt to limit contention. + * + * All Queue instances must be instantiated by Bro's main thread. + * + * TODO: Unclear how critical performance is for this qeueue. We could like;y + * optimize it further if helpful. */ - template -class Queue_ +class Queue { public: - Queue_(); - ~Queue_(); + /** + * Constructor. + */ + Queue(); + /** + * Destructor. + */ + ~Queue(); + + /** + * Retrieves one elment. + */ T Get(); + + /** + * Queues one element. + */ void Put(T data); + + /** + * Returns true if the next Get() operation will succeed. + */ bool Ready(); + + /** + * Returns the number of queued items not yet retrieved. + */ uint64_t Size(); private: @@ -37,7 +67,7 @@ private: int read_ptr; // Where the next operation will read from int write_ptr; // Where the next operation will write to - uint64_t size; + uint64_t size; // Current queue size. }; inline static void safe_lock(pthread_mutex_t* mutex) @@ -53,7 +83,7 @@ inline static void safe_unlock(pthread_mutex_t* mutex) } template -inline Queue_::Queue_() +inline Queue::Queue() { read_ptr = 0; write_ptr = 0; @@ -69,7 +99,7 @@ inline Queue_::Queue_() } template -inline Queue_::~Queue_() +inline Queue::~Queue() { for( int i = 0; i < NUM_QUEUES; ++i ) { @@ -79,7 +109,7 @@ inline Queue_::~Queue_() } template -inline T Queue_::Get() +inline T Queue::Get() { safe_lock(&mutex[read_ptr]); @@ -100,7 +130,7 @@ inline T Queue_::Get() } template -inline void Queue_::Put(T data) +inline void Queue::Put(T data) { safe_lock(&mutex[write_ptr]); @@ -121,7 +151,7 @@ inline void Queue_::Put(T data) template -inline bool Queue_::Ready() +inline bool Queue::Ready() { safe_lock(&mutex[read_ptr]); @@ -133,7 +163,7 @@ inline bool Queue_::Ready() } template -inline uint64_t Queue_::Size() +inline uint64_t Queue::Size() { safe_lock(&mutex[read_ptr]); From 4f0fc571eff7e6fc3db5c06f8097159c765ee69d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 1 Feb 2012 00:34:18 -0800 Subject: [PATCH 077/651] Doing bulkd writes instead of individual writes now. Also slight change to Writer API, going back to how the rotate methods were before. --- src/logging/WriterBackend.cc | 86 +++++++++++++++++++++++------------ src/logging/WriterBackend.h | 31 +++++++------ src/logging/WriterFrontend.cc | 60 ++++++++++++++++++++---- src/logging/WriterFrontend.h | 11 +++-- src/logging/writers/Ascii.cc | 5 +- src/logging/writers/Ascii.h | 4 +- src/logging/writers/None.cc | 5 +- src/logging/writers/None.h | 4 +- src/threading/MsgThread.h | 16 ++++++- 9 files changed, 155 insertions(+), 67 deletions(-) diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 095490edc4..7c9c1d10ca 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -31,13 +31,13 @@ private: bool terminating; }; -class DisableMessage : public threading::OutputMessage +class FlushWriteBufferMessage : public threading::OutputMessage { public: - DisableMessage(WriterFrontend* writer) - : threading::OutputMessage("Disable", writer) {} + FlushWriteBufferMessage(WriterFrontend* writer) + : threading::OutputMessage("FlushWriteBuffer", writer) {} - virtual bool Process() { Object()->SetDisable(); return true; } + virtual bool Process() { Object()->FlushWriteBuffer(); return true; } }; } @@ -65,25 +65,31 @@ WriterBackend::~WriterBackend() } } -void WriterBackend::DeleteVals(Value** vals) +void WriterBackend::DeleteVals(int num_writes, Value*** vals) { - // Note this code is duplicated in Manager::DeleteVals(). - for ( int i = 0; i < num_fields; i++ ) - delete vals[i]; + for ( int j = 0; j < num_writes; ++j ) + { + // Note this code is duplicated in Manager::DeleteVals(). + for ( int i = 0; i < num_fields; i++ ) + delete vals[j][i]; + + delete [] vals[j]; + } delete [] vals; } -bool WriterBackend::FinishedRotation(WriterFrontend* writer, string new_name, string old_name, +bool WriterBackend::FinishedRotation(string new_name, string old_name, double open, double close, bool terminating) { - SendOut(new RotationFinishedMessage(writer, new_name, old_name, open, close, terminating)); + SendOut(new RotationFinishedMessage(frontend, new_name, old_name, open, close, terminating)); return true; } -bool WriterBackend::Init(string arg_path, int arg_num_fields, - const Field* const * arg_fields) +bool WriterBackend::Init(WriterFrontend* arg_frontend, string arg_path, int arg_num_fields, + const Field* const * arg_fields) { + frontend = arg_frontend; path = arg_path; num_fields = arg_num_fields; fields = arg_fields; @@ -94,7 +100,7 @@ bool WriterBackend::Init(string arg_path, int arg_num_fields, return true; } -bool WriterBackend::Write(int arg_num_fields, Value** vals) +bool WriterBackend::Write(int arg_num_fields, int num_writes, Value*** vals) { // Double-check that the arguments match. If we get this from remote, // something might be mixed up. @@ -107,30 +113,42 @@ bool WriterBackend::Write(int arg_num_fields, Value** vals) Debug(DBG_LOGGING, msg); #endif - DeleteVals(vals); + DeleteVals(num_writes, vals); return false; } - for ( int i = 0; i < num_fields; ++i ) - { - if ( vals[i]->type != fields[i]->type ) - { #ifdef DEBUG - const char* msg = Fmt("Field type doesn't match in WriterBackend::Write() (%d vs. %d)", - vals[i]->type, fields[i]->type); - Debug(DBG_LOGGING, msg); -#endif + // Double-check all the types match. + for ( int j = 0; j < num_writes; j++ ) + { + for ( int i = 0; i < num_fields; ++i ) + { + if ( vals[j][i]->type != fields[i]->type ) + { + const char* msg = Fmt("Field type doesn't match in WriterBackend::Write() (%d vs. %d)", + vals[j][i]->type, fields[i]->type); + Debug(DBG_LOGGING, msg); - DeleteVals(vals); - return false; + DeleteVals(num_writes, vals); + return false; + } } } +#endif - bool result = DoWrite(num_fields, fields, vals); + bool success = true; - DeleteVals(vals); + for ( int j = 0; j < num_writes; j++ ) + { + success = DoWrite(num_fields, fields, vals[j]); - return result; + if ( ! success ) + break; + } + + DeleteVals(num_writes, vals); + + return success; } bool WriterBackend::SetBuf(bool enabled) @@ -144,10 +162,10 @@ bool WriterBackend::SetBuf(bool enabled) return DoSetBuf(enabled); } -bool WriterBackend::Rotate(WriterFrontend* writer, string rotated_path, - double open, double close, bool terminating) +bool WriterBackend::Rotate(string rotated_path, double open, + double close, bool terminating) { - return DoRotate(writer, rotated_path, open, close, terminating); + return DoRotate(rotated_path, open, close, terminating); } bool WriterBackend::Flush() @@ -159,3 +177,11 @@ bool WriterBackend::Finish() { return DoFinish(); } + +bool WriterBackend::DoHeartbeat(double network_time, double current_time) + { + SendOut(new FlushWriteBufferMessage(frontend)); + return true; + } + + diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index d1e4634e6d..27f4fe45a5 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -19,6 +19,12 @@ public: virtual ~WriterBackend(); // One-time initialization of the writer to define the logged fields. + // + // "frontend" is the frontend writer that created this backend. The + // *only* purpose of this value is to be passed back via messages as + // a argument to callbacks. One must not otherwise access the + // frontend, it's running in a different thread. + // // Interpretation of "path" is left to the writer, and will be // corresponding the value configured on the script-level. // @@ -27,7 +33,7 @@ public: // // The new instance takes ownership of "fields", and will delete them // when done. - bool Init(string path, int num_fields, const Field* const * fields); + bool Init(WriterFrontend* frontend, string path, int num_fields, const Field* const * fields); // Writes one log entry. The method takes ownership of "vals" and // will return immediately after queueing the write request, which is @@ -38,7 +44,7 @@ public: // // Returns false if an error occured, in which case the writer must // not be used any further. - bool Write(int num_fields, Value** vals); + bool Write(int num_fields, int num_writes, Value*** vals); // Sets the buffering status for the writer, if the writer supports // that. (If not, it will be ignored). @@ -50,7 +56,7 @@ public: // Triggers rotation, if the writer supports that. (If not, it will // be ignored). - bool Rotate(WriterFrontend* writer, string rotated_path, double open, double close, bool terminating); + bool Rotate(string rotated_path, double open, double close, bool terminating); // Finishes writing to this logger regularly. Must not be called if // an error has been indicated earlier. After calling this, no @@ -81,9 +87,10 @@ public: // // terminating: True if rotation request occured due to the main Bro // process shutting down. - bool FinishedRotation(WriterFrontend* writer, string new_name, string old_name, + bool FinishedRotation(string new_name, string old_name, double open, double close, bool terminating); + protected: // Methods for writers to override. If any of these returs false, it // will be assumed that a fatal error has occured that prevents the @@ -128,11 +135,6 @@ protected: // RotationDone() to signal the log manager that potential // postprocessors can now run. // - // "writer" is the frontend writer that triggered the rotation. The - // *only* purpose of this value is to be passed into - // FinishedRotation() once done. You must not otherwise access the - // frontend, it's running in a different thread. - // // "rotate_path" reflects the path to where the rotated output is to // be moved, with specifics depending on the writer. It should // generally be interpreted in a way consistent with that of "path" @@ -149,8 +151,8 @@ protected: // // A writer may ignore rotation requests if it doesn't fit with its // semantics (but must still return true in that case). - virtual bool DoRotate(WriterFrontend* writer, string rotated_path, - double open, double close, bool terminating) = 0; + virtual bool DoRotate(string rotated_path, double open, double close, + bool terminating) = 0; // Called once on termination. Not called when any of the other // methods has previously signaled an error, i.e., executing this @@ -158,7 +160,9 @@ protected: virtual bool DoFinish() = 0; // Triggered by regular heartbeat messages from the main process. - virtual bool DoHeartbeat(double network_time, double current_time) { return true; }; + // + // Note when overriding, you must call WriterBackend::DoHeartbeat(). + virtual bool DoHeartbeat(double network_time, double current_time); private: friend class Manager; @@ -169,8 +173,9 @@ private: bool Disabled() { return disabled; } // Deletes the values as passed into Write(). - void DeleteVals(Value** vals); + void DeleteVals(int num_writes, Value*** vals); + WriterFrontend* frontend; string path; int num_fields; const Field* const * fields; diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 92c93c1c56..2f7c1d6e7e 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -9,13 +9,14 @@ namespace logging { class InitMessage : public threading::InputMessage { public: - InitMessage(WriterBackend* backend, const string path, const int num_fields, const Field* const *fields) + InitMessage(WriterBackend* backend, WriterFrontend* frontend, const string path, const int num_fields, const Field* const *fields) : threading::InputMessage("Init", backend), path(path), num_fields(num_fields), fields(fields) { } - virtual bool Process() { return Object()->Init(path, num_fields, fields); } + virtual bool Process() { return Object()->Init(frontend, path, num_fields, fields); } private: + WriterFrontend* frontend; const string path; const int num_fields; const Field * const* fields; @@ -31,7 +32,7 @@ public: rotated_path(rotated_path), open(open), close(close), terminating(terminating) { } - virtual bool Process() { return Object()->Rotate(frontend, rotated_path, open, close, terminating); } + virtual bool Process() { return Object()->Rotate(rotated_path, open, close, terminating); } private: WriterFrontend* frontend; @@ -44,16 +45,16 @@ private: class WriteMessage : public threading::InputMessage { public: - WriteMessage(WriterBackend* backend, const int num_fields, Value **vals) + WriteMessage(WriterBackend* backend, int num_fields, int num_writes, Value*** vals) : threading::InputMessage("Write", backend), - num_fields(num_fields), fields(fields), vals(vals) {} + num_fields(num_fields), vals(vals) {} - virtual bool Process() { return Object()->Write(num_fields, vals); } + virtual bool Process() { return Object()->Write(num_fields, num_writes, vals); } private: int num_fields; - Field* const* fields; - Value **vals; + int num_writes; + Value ***vals; }; class SetBufMessage : public threading::InputMessage @@ -96,6 +97,8 @@ using namespace logging; WriterFrontend::WriterFrontend(bro_int_t type) { disabled = initialized = false; + buf = true; + write_buffer_pos = 0; backend = log_mgr->CreateBackend(type); assert(backend); @@ -108,6 +111,7 @@ WriterFrontend::~WriterFrontend() void WriterFrontend::Stop() { + FlushWriteBuffer(); SetDisable(); backend->Stop(); } @@ -125,7 +129,7 @@ void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* cons fields = arg_fields; initialized = true; - backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields)); + backend->SendIn(new InitMessage(backend, this, arg_path, arg_num_fields, arg_fields)); } void WriterFrontend::Write(int num_fields, Value** vals) @@ -133,7 +137,34 @@ void WriterFrontend::Write(int num_fields, Value** vals) if ( disabled ) return; - backend->SendIn(new WriteMessage(backend, num_fields, vals)); + if ( ! write_buffer ) + { + // Need new buffer. + write_buffer = new Value**[WRITER_BUFFER_SIZE]; + write_buffer_pos = 0; + } + + if ( write_buffer_pos >= WRITER_BUFFER_SIZE ) + // Buffer full. + FlushWriteBuffer(); + + write_buffer[write_buffer_pos++] = vals; + + if ( ! buf ) + // Send out immediately if we don't want buffering. + FlushWriteBuffer(); + } + +void WriterFrontend::FlushWriteBuffer() + { + if ( ! write_buffer_pos ) + // Nothing to do. + return; + + backend->SendIn(new WriteMessage(backend, num_fields, write_buffer_pos, write_buffer)); + + // Clear buffer (no delete, we pass ownership to child thread.) + write_buffer = 0; } void WriterFrontend::SetBuf(bool enabled) @@ -141,7 +172,13 @@ void WriterFrontend::SetBuf(bool enabled) if ( disabled ) return; + buf = enabled; + backend->SendIn(new SetBufMessage(backend, enabled)); + + if ( ! buf ) + // Make sure no longer buffer any still queued data. + FlushWriteBuffer(); } void WriterFrontend::Flush() @@ -149,6 +186,7 @@ void WriterFrontend::Flush() if ( disabled ) return; + FlushWriteBuffer(); backend->SendIn(new FlushMessage(backend)); } @@ -157,6 +195,7 @@ void WriterFrontend::Rotate(string rotated_path, double open, double close, bool if ( disabled ) return; + FlushWriteBuffer(); backend->SendIn(new RotateMessage(backend, this, rotated_path, open, close, terminating)); } @@ -165,6 +204,7 @@ void WriterFrontend::Finish() if ( disabled ) return; + FlushWriteBuffer(); backend->SendIn(new FinishMessage(backend)); } diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index 1998429d38..ed1a674842 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -34,6 +34,7 @@ public: void Write(int num_fields, Value** vals); void SetBuf(bool enabled); void Flush(); + void FlushWriteBuffer(); void Rotate(string rotated_path, double open, double close, bool terminating); void Finish(); @@ -49,18 +50,22 @@ public: protected: friend class Manager; - WriterBackend* backend; bool disabled; bool initialized; + bool buf; string path; int num_fields; const Field* const * fields; + + // Buffer for bulk writes. + static const int WRITER_BUFFER_SIZE = 50; + + int write_buffer_pos; + Value*** write_buffer; }; } - - #endif diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 70f513be3b..a1ceb6e217 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -317,8 +317,7 @@ bool Ascii::DoWrite(int num_fields, const Field* const * fields, return true; } -bool Ascii::DoRotate(WriterFrontend* writer, string rotated_path, double open, - double close, bool terminating) +bool Ascii::DoRotate(string rotated_path, double open, double close, bool terminating) { // Don't rotate special files or if there's not one currently open. if ( ! file || IsSpecial(Path()) ) @@ -330,7 +329,7 @@ bool Ascii::DoRotate(WriterFrontend* writer, string rotated_path, double open, string nname = rotated_path + "." + LogExt(); rename(fname.c_str(), nname.c_str()); - if ( ! FinishedRotation(writer, nname, fname, open, close, terminating) ) + if ( ! FinishedRotation(nname, fname, open, close, terminating) ) { Error(Fmt("error rotating %s to %s", fname.c_str(), nname.c_str())); return false; diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index 37fcfef267..0c627c68e9 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -23,8 +23,8 @@ protected: virtual bool DoWrite(int num_fields, const Field* const * fields, Value** vals); virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(WriterFrontend* writer, string rotated_path, - double open, double close, bool terminating); + virtual bool DoRotate(string rotated_path, double open, + double close, bool terminating); virtual bool DoFlush(); virtual bool DoFinish(); diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index e419d88a6b..a9a7872f85 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -4,10 +4,9 @@ using namespace logging; using namespace writer; -bool None::DoRotate(WriterFrontend* writer, string rotated_path, - double open, double close, bool terminating) +bool None::DoRotate(string rotated_path, double open, double close, bool terminating) { - if ( ! FinishedRotation(writer, string("/dev/null"), Path(), open, close, terminating)) + if ( ! FinishedRotation(string("/dev/null"), Path(), open, close, terminating)) { Error(Fmt("error rotating %s", Path().c_str())); return false; diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index 9b2ab6c698..9360ef44f6 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -23,8 +23,8 @@ protected: virtual bool DoWrite(int num_fields, const Field* const * fields, Value** vals) { return true; } virtual bool DoSetBuf(bool enabled) { return true; } - virtual bool DoRotate(WriterFrontend* writer, string rotated_path, - double open, double close, bool terminating); + virtual bool DoRotate(string rotated_path, double open, + double close, bool terminating); virtual bool DoFlush() { return true; } virtual bool DoFinish() { return true; } }; diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 8f37041bb6..ec249e90ad 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -184,8 +184,11 @@ protected: * Triggers a heartbeat message being sent to the client thread. * * This is method is called regularly by the threading::Manager. + * + * Can be overriden in derived classed to hook into the heart beat, + * but must call the parent implementation. */ - void Heartbeat(); + virtual void Heartbeat(); /** * Overriden from BasicThread. @@ -194,6 +197,17 @@ protected: virtual void Run(); virtual void OnStop(); + /** + * Regulatly triggered for execution in the child thread. + * + * When overriding, one must call the parent class' implementation. + * + * network_time: The network_time when the heartbeat was trigger by + * the main thread. + * + * current_time: Wall clock when the heartbeat was trigger by the + * main thread. + */ virtual bool DoHeartbeat(double network_time, double current_time) { return true; } private: From 1058e11ffbe9c6e659f963a87a9c696965207e58 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 1 Feb 2012 04:40:01 -0800 Subject: [PATCH 078/651] Adding thread statistics to prof.log --- src/Stats.cc | 15 ++++++++++++++- src/threading/Manager.cc | 17 +++++++++++++++++ src/threading/Manager.h | 21 +++++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/src/Stats.cc b/src/Stats.cc index 55835613e9..05ce33daed 100644 --- a/src/Stats.cc +++ b/src/Stats.cc @@ -9,7 +9,7 @@ #include "ConnCompressor.h" #include "DNS_Mgr.h" #include "Trigger.h" - +#include "threading/Manager.h" int killed_by_inactivity = 0; @@ -217,6 +217,19 @@ void ProfileLogger::Log() current_timers[i])); } + file->Write(fmt("%0.6f Threads: current=%d\n", network_time, thread_mgr->NumThreads())); + + const threading::Manager::msg_stats_list& thread_stats = thread_mgr->GetMsgThreadStats(); + for ( threading::Manager::msg_stats_list::const_iterator i = thread_stats.begin(); + i != thread_stats.end(); ++i ) + { + threading::MsgThread::Stats s = i->second; + file->Write(fmt(" %20s in=%" PRIu64 " out=%" PRIu64 " pending=%" PRIu64 "/%" PRIu64 "\n", + i->first.c_str(), + s.sent_in, s.sent_out, + s.pending_in, s.pending_out)); + } + // Script-level state. unsigned int size, mem = 0; PDict(ID)* globals = global_scope()->Vars(); diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index d963876755..2e8f6eb1fc 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -101,4 +101,21 @@ void Manager::Process() next_beat = network_time + HEART_BEAT_INTERVAL; } +const threading::Manager::msg_stats_list& threading::Manager::GetMsgThreadStats() + { + stats.clear(); + + for ( msg_thread_list::iterator i = msg_threads.begin(); i != msg_threads.end(); i++ ) + { + MsgThread* t = *i; + + MsgThread::Stats s; + t->GetStats(&s); + + stats.push_back(std::make_pair(t->Name(),s)); + } + + return stats; + } + diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 2c4f88fa1e..d2f97209c9 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -43,6 +43,25 @@ public: */ void Terminate(); + typedef std::list > msg_stats_list; + + /** + * Returns statistics from all current MsgThread instances. + * + * @return A list of statistics, with one entry for each MsgThread. + * Each entry is a tuple of thread name and statistics. The list + * reference remains valid until the next call to this method (or + * termination of the manager). + */ + const msg_stats_list& GetMsgThreadStats(); + + /** + * Returns the number of currently active threads. This counts all + * threads that are not yet joined, includingt any potentially in + * Terminating() state. + */ + int NumThreads() const { return all_threads.size(); } + protected: friend class BasicThread; friend class MsgThread; @@ -96,6 +115,8 @@ private: bool did_process; // True if the last Process() found some work to do. double next_beat; // Timestamp when the next heartbeat will be sent. + + msg_stats_list stats; }; } From 29fc56105ddabcc3f29c9b3457c4b9d53f0bca5e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 1 Feb 2012 07:16:24 -0800 Subject: [PATCH 079/651] Documenting logging API. --- src/logging/Manager.cc | 2 +- src/logging/Manager.h | 204 ++++++++++++++++--- src/logging/WriterBackend.cc | 56 +++++- src/logging/WriterBackend.h | 357 ++++++++++++++++++++++------------ src/logging/WriterFrontend.cc | 2 +- src/logging/WriterFrontend.h | 178 ++++++++++++++--- src/logging/writers/Ascii.h | 4 +- src/logging/writers/None.h | 2 +- 8 files changed, 625 insertions(+), 180 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 09c5030fdc..0876b10eb2 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -1279,7 +1279,7 @@ Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, } WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, Field** fields) + int num_fields, const Field* const* fields) { Stream* stream = FindStream(id); diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 7fa2c271db..1267a19ca7 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -15,32 +15,63 @@ class RotationTimer; namespace logging { -// Description of a log field. +/** + * Definition of a log file, i.e., one column of a log stream. + */ struct Field { - string name; - TypeTag type; - // inner type of sets - TypeTag subtype; + string name; //! Name of the field. + TypeTag type; //! Type of the field. + TypeTag subtype; //! Inner type for sets. + /** + * Constructor. + */ Field() { subtype = TYPE_VOID; } + + /** + * Copy constructor. + */ Field(const Field& other) : name(other.name), type(other.type), subtype(other.subtype) { } - // (Un-)serialize. + /** + * Unserializes a field. + * + * @param fmt The serialization format to use. The format handles + * low-level I/O. + * + * @return False if an error occured. + */ bool Read(SerializationFormat* fmt); + + /** + * Serializes a field. + * + * @param fmt The serialization format to use. The format handles + * low-level I/O. + * + * @return False if an error occured. + */ bool Write(SerializationFormat* fmt) const; }; -// Values as logged by a writer. +/** + * Definition of a log value, i.e., a entry logged by a stream. + * + * This struct essentialy represents a serialization of a Val instance (for + * those Vals supported). + */ struct Value { - TypeTag type; - bool present; // False for unset fields. + TypeTag type; //! The type of the value. + bool present; //! False for optional record fields that are not set. - // The following union is a subset of BroValUnion, including only the - // types we can log directly. struct set_t { bro_int_t size; Value** vals; }; typedef set_t vec_t; + /** + * This union is a subset of BroValUnion, including only the types we + * can log directly. See IsCompatibleType(). + */ union _val { bro_int_t int_val; bro_uint_t uint_val; @@ -52,42 +83,173 @@ struct Value { vec_t vector_val; } val; + /** + * Constructor. + * + * arg_type: The type of the value. + * + * arg_present: False if the value represents an optional record field + * that is not set. + */ Value(TypeTag arg_type = TYPE_ERROR, bool arg_present = true) : type(arg_type), present(arg_present) {} + + /** + * Destructor. + */ ~Value(); - // (Un-)serialize. + /** + * Unserializes a value. + * + * @param fmt The serialization format to use. The format handles low-level I/O. + * + * @return False if an error occured. + */ bool Read(SerializationFormat* fmt); + + /** + * Serializes a value. + * + * @param fmt The serialization format to use. The format handles + * low-level I/O. + * + * @return False if an error occured. + */ bool Write(SerializationFormat* fmt) const; - // Returns true if the type can be logged the framework. If - // `atomic_only` is true, will not permit composite types. + /** + * Returns true if the type can be represented by a Value. If + * `atomic_only` is true, will not permit composite types. + */ static bool IsCompatibleType(BroType* t, bool atomic_only=false); private: - Value(const Value& other) { } + Value(const Value& other) { } // Disabled. }; class WriterBackend; class WriterFrontend; class RotationFinishedMessage; +/** + * Singleton class for managing log streams. + */ class Manager { public: + /** + * Constructor. + */ Manager(); + + /** + * Destructor. + */ ~Manager(); - // These correspond to the BiFs visible on the scripting layer. The - // actual BiFs just forward here. + /** + * Creates a new log stream. + * + * @param id The enum value corresponding the log stream. + * + * @param stream A record of script type \c Log::Stream. + * + * This method corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ bool CreateStream(EnumVal* id, RecordVal* stream); + + /** + * Enables a log log stream. + * + * @param id The enum value corresponding the log stream. + * + * This method corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ bool EnableStream(EnumVal* id); + + /** + * Disables a log stream. + * + * @param id The enum value corresponding the log stream. + * + * This methods corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ bool DisableStream(EnumVal* id); + + /** + * Adds a filter to a log stream. + * + * @param id The enum value corresponding the log stream. + * + * @param filter A record of script type \c Log::Filter. + * + * This methods corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ bool AddFilter(EnumVal* id, RecordVal* filter); + + /** + * Removes a filter from a log stream. + * + * @param id The enum value corresponding the log stream. + * + * @param name The name of the filter to remove. + * + * This methods corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ bool RemoveFilter(EnumVal* id, StringVal* name); + + /** + * Removes a filter from a log stream. + * + * @param id The enum value corresponding the log stream. + * + * @param name The name of the filter to remove. + * + * This methods corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ bool RemoveFilter(EnumVal* id, string name); + + /** + * Write a record to a log stream. + * + * @param id The enum value corresponding the log stream. + * + * @param colums A record of the type defined for the stream's + * columns. + * + * This methods corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ bool Write(EnumVal* id, RecordVal* columns); - bool SetBuf(EnumVal* id, bool enabled); // Adjusts all writers. - bool Flush(EnumVal* id); // Flushes all writers.. + + /** + * Sets log streams buffering state. This adjusts all associated + * writers to the new state. + * + * @param id The enum value corresponding the log stream. + * + * @param enabled False to disable buffering (default is enabled). + * + * This methods corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ + bool SetBuf(EnumVal* id, bool enabled); + + /** + * Flushes a log stream. This flushed all associated writers. + * + * @param id The enum value corresponding the log stream. + * + * This methods corresponds directly to the internal BiF defined in + * logging.bif, which just forwards here. + */ + bool Flush(EnumVal* id); protected: friend class WriterFrontend; @@ -103,7 +265,7 @@ protected: // Takes ownership of fields. WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, Field** fields); + int num_fields, const Field* const* fields); // Takes ownership of values.. bool Write(EnumVal* id, EnumVal* writer, string path, @@ -112,8 +274,6 @@ protected: // Announces all instantiated writers to peer. void SendAllWritersTo(RemoteSerializer::PeerID peer); - //// Functions safe to use by writers. - // Signals that a file has been rotated. bool FinishedRotation(WriterFrontend* writer, string new_name, string old_name, double open, double close, bool terminating); diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 7c9c1d10ca..e361ca69d3 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -40,6 +40,15 @@ public: virtual bool Process() { Object()->FlushWriteBuffer(); return true; } }; +class DisableMessage : public threading::OutputMessage +{ +public: + DisableMessage(WriterFrontend* writer) + : threading::OutputMessage("Disable", writer) {} + + virtual bool Process() { Object()->SetDisable(); return true; } +}; + } // Backend methods. @@ -86,8 +95,13 @@ bool WriterBackend::FinishedRotation(string new_name, string old_name, return true; } +void WriterBackend::DisableFrontend() + { + SendOut(new DisableMessage(frontend)); + } + bool WriterBackend::Init(WriterFrontend* arg_frontend, string arg_path, int arg_num_fields, - const Field* const * arg_fields) + const Field* const* arg_fields) { frontend = arg_frontend; path = arg_path; @@ -95,7 +109,10 @@ bool WriterBackend::Init(WriterFrontend* arg_frontend, string arg_path, int arg_ fields = arg_fields; if ( ! DoInit(arg_path, arg_num_fields, arg_fields) ) + { + DisableFrontend(); return false; + } return true; } @@ -114,6 +131,7 @@ bool WriterBackend::Write(int arg_num_fields, int num_writes, Value*** vals) #endif DeleteVals(num_writes, vals); + DisableFrontend(); return false; } @@ -129,6 +147,7 @@ bool WriterBackend::Write(int arg_num_fields, int num_writes, Value*** vals) vals[j][i]->type, fields[i]->type); Debug(DBG_LOGGING, msg); + DisableFrontend(); DeleteVals(num_writes, vals); return false; } @@ -148,6 +167,9 @@ bool WriterBackend::Write(int arg_num_fields, int num_writes, Value*** vals) DeleteVals(num_writes, vals); + if ( ! success ) + DisableFrontend(); + return success; } @@ -159,23 +181,47 @@ bool WriterBackend::SetBuf(bool enabled) buffering = enabled; - return DoSetBuf(enabled); + if ( ! DoSetBuf(enabled) ) + { + DisableFrontend(); + return false; + } + + return true; } bool WriterBackend::Rotate(string rotated_path, double open, double close, bool terminating) { - return DoRotate(rotated_path, open, close, terminating); + if ( ! DoRotate(rotated_path, open, close, terminating) ) + { + DisableFrontend(); + return false; + } + + return true; } bool WriterBackend::Flush() { - return DoFlush(); + if ( ! DoFlush() ) + { + DisableFrontend(); + return false; + } + + return true; } bool WriterBackend::Finish() { - return DoFinish(); + if ( ! DoFlush() ) + { + DisableFrontend(); + return false; + } + + return true; } bool WriterBackend::DoHeartbeat(double network_time, double current_time) diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 27f4fe45a5..b5d313a480 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -11,180 +11,291 @@ namespace logging { -// The backend runs in its own thread, separate from the main process. +/** + * Base class for writer implementation. When the logging::Manager creates a + * new logging filter, it instantiates a WriterFrontend. That then in turn + * creates a WriterBackend of the right type. The frontend then forwards + * message over the backend as its methods are called. + * + * All of this methods must be called only from the corresponding child + * thread (the constructor is the one exception.) + */ class WriterBackend : public threading::MsgThread { public: + /** + * Constructor. + * + * @param name A descriptive name for writer's type (e.g., \c Ascii). + */ WriterBackend(const string& name); + + /** + * Destructor. + */ virtual ~WriterBackend(); - // One-time initialization of the writer to define the logged fields. - // - // "frontend" is the frontend writer that created this backend. The - // *only* purpose of this value is to be passed back via messages as - // a argument to callbacks. One must not otherwise access the - // frontend, it's running in a different thread. - // - // Interpretation of "path" is left to the writer, and will be - // corresponding the value configured on the script-level. - // - // Returns false if an error occured, in which case the writer must - // not be used further. - // - // The new instance takes ownership of "fields", and will delete them - // when done. - bool Init(WriterFrontend* frontend, string path, int num_fields, const Field* const * fields); + /** + * One-time initialization of the writer to define the logged fields. + * + * @param frontend The frontend writer that created this backend. The + * *only* purpose of this value is to be passed back via messages as + * a argument to callbacks. One must not otherwise access the + * frontend, it's running in a different thread. + * + * @param path A string left to the interpretation of the writer + * implementation; it corresponds to the value configured on the + * script-level for the logging filter. + * + * @param num_fields The number of log fields for the stream. + * + * @param fields An array of size \a num_fields with the log fields. + * The methods takes ownership of the array. + * + * @return False if an error occured. + */ + bool Init(WriterFrontend* frontend, string path, int num_fields, const Field* const* fields); - // Writes one log entry. The method takes ownership of "vals" and - // will return immediately after queueing the write request, which is - // potentially before output has actually been written out. - // - // num_fields and the types of the Values must match what was passed - // to Init(). - // - // Returns false if an error occured, in which case the writer must - // not be used any further. + /** + * Writes one log entry. + * + * @param num_fields: The number of log fields for this stream. The + * value must match what was passed to Init(). + * + * @param An array of size \a num_fields with the log values. Their + * types musst match with the field passed to Init(). The method + * takes ownership of \a vals.. + * + * Returns false if an error occured, in which case the writer must + * not be used any further. + * + * @return False if an error occured. + */ bool Write(int num_fields, int num_writes, Value*** vals); - // Sets the buffering status for the writer, if the writer supports - // that. (If not, it will be ignored). + /** + * Sets the buffering status for the writer, assuming the writer + * supports that. (If not, it will be ignored). + * + * @param enabled False if buffering is to be disabled (by default + * it's on). + * + * @return False if an error occured. + */ bool SetBuf(bool enabled); - // Flushes any currently buffered output, if the writer supports - // that. (If not, it will be ignored). + /** + * Flushes any currently buffered output, assuming the writer + * supports that. (If not, it will be ignored). + * + * @return False if an error occured. + */ bool Flush(); - // Triggers rotation, if the writer supports that. (If not, it will - // be ignored). + /** + * Triggers rotation, if the writer supports that. (If not, it will + * be ignored). + * + * @return False if an error occured. + */ bool Rotate(string rotated_path, double open, double close, bool terminating); - // Finishes writing to this logger regularly. Must not be called if - // an error has been indicated earlier. After calling this, no - // further writing must be performed. + /** + * Finishes writing to this logger in a regularl fashion. Must not be + * called if an error has been indicated earlier. After calling this, + * no further writing must be performed. + * + * @return False if an error occured. + */ bool Finish(); - //// Thread-safe methods that may be called from the writer - //// implementation. + /** + * Disables the frontend that has instantiated this backend. Once + * disabled,the frontend will not send any further message over. + */ + void DisableFrontend(); - // The following methods return the information as passed to Init(). + /** + * Returns the log path as passed into the constructor. + */ const string Path() const { return path; } + + /** + * Returns the number of log fields as passed into the constructor. + */ int NumFields() const { return num_fields; } + + /** + * Returns the log fields as passed into the constructor. + */ const Field* const * Fields() const { return fields; } - // Returns the current buffering state. + /** + * Returns the current buffering state. + * + * @return True if buffering is enabled. + */ bool IsBuf() { return buffering; } - // Signals to the log manager that a file has been rotated. - // - // writer: The frontend writer that triggered the rotation. This must - // be the value passed into DoRotate(). - // - // new_name: The filename of the rotated file. old_name: The filename - // of the origina file. - // - // open/close: The timestamps when the original file was opened and - // closed, respectively. - // - // terminating: True if rotation request occured due to the main Bro - // process shutting down. + /** + * Signals that a file has been rotated. This must be called by a + * writer's implementation of DoRotate() once rotation has finished. + * + * Most of the parameters should be passed through from DoRotate(). + * + * @param new_name The filename of the rotated file. + * + * @param old_name The filename of the original file. + * + * @param open: The timestamp when the original file was opened. + * + * @param close: The timestamp when the origina file was closed. + * + * @param terminating: True if the original rotation request occured + * due to the main Bro process shutting down. + */ bool FinishedRotation(string new_name, string old_name, double open, double close, bool terminating); protected: - // Methods for writers to override. If any of these returs false, it - // will be assumed that a fatal error has occured that prevents the - // writer from further operation. It will then be disabled and - // deleted. When returning false, the writer should also report the - // error via Error(). Note that even if a writer does not support the - // functionality for one these methods (like rotation), it must still - // return true if that is not to be considered a fatal error. - // - // Called once for initialization of the writer. + /** + * Writer-specific intialization method. + * + * A writer implementation must override this method. If it returns + * false, it will be assumed that a fatal error has occured that + * prevents the writer from further operation; it will then be + * disabled and eventually deleted. When returning false, an + * implementation should also call Error() to indicate what happened. + */ virtual bool DoInit(string path, int num_fields, - const Field* const * fields) = 0; + const Field* const* fields) = 0; - // Called once per log entry to record. - virtual bool DoWrite(int num_fields, const Field* const * fields, + /** + * Writer-specific output method implementing recording of fone log + * entry. + * + * A writer implementation must override this method. If it returns + * false, it will be assumed that a fatal error has occured that + * prevents the writer from further operation; it will then be + * disabled and eventually deleted. When returning false, an + * implementation should also call Error() to indicate what happened. + */ + virtual bool DoWrite(int num_fields, const Field* const* fields, Value** vals) = 0; - // Called when the buffering status for this writer is changed. If - // buffering is disabled, the writer should attempt to write out - // information as quickly as possible even if doing so may have a - // performance impact. If enabled (which is the default), it may - // buffer data as helpful and write it out later in a way optimized - // for performance. The current buffering state can be queried via - // IsBuf(). - // - // A writer may ignore buffering changes if it doesn't fit with its - // semantics (but must still return true in that case). + /** + * Writer-specific method implementing a change of fthe buffering + * state. If buffering is disabled, the writer should attempt to + * write out information as quickly as possible even if doing so may + * have a performance impact. If enabled (which is the default), it + * may buffer data as helpful and write it out later in a way + * optimized for performance. The current buffering state can be + * queried via IsBuf(). + * + * A writer implementation must override this method but it can just + * ignore calls if buffering doesn't align with its semantics. + * + * If the method returns false, it will be assumed that a fatal error + * has occured that prevents the writer from further operation; it + * will then be disabled and eventually deleted. When returning + * false, an implementation should also call Error() to indicate what + * happened. + */ virtual bool DoSetBuf(bool enabled) = 0; - // Called to flush any currently buffered output. - // - // A writer may ignore flush requests if it doesn't fit with its - // semantics (but must still return true in that case). + /** + * Writer-specific method implementing flushing of its output. + * + * A writer implementation must override this method but it can just + * ignore calls if flushing doesn't align with its semantics. + * + * If the method returns false, it will be assumed that a fatal error + * has occured that prevents the writer from further operation; it + * will then be disabled and eventually deleted. When returning + * false, an implementation should also call Error() to indicate what + * happened. + */ virtual bool DoFlush() = 0; - // Called when a log output is to be rotated. Most directly this only - // applies to writers writing into files, which should then close the - // current file and open a new one. However, a writer may also - // trigger other apppropiate actions if semantics are similar. - // - // Once rotation has finished, the implementation should call - // RotationDone() to signal the log manager that potential - // postprocessors can now run. - // - // "rotate_path" reflects the path to where the rotated output is to - // be moved, with specifics depending on the writer. It should - // generally be interpreted in a way consistent with that of "path" - // as passed into DoInit(). As an example, for file-based output, - // "rotate_path" could be the original filename extended with a - // timestamp indicating the time of the rotation. - // - // "open" and "close" are the network time's when the *current* file - // was opened and closed, respectively. - // - // "terminating" indicated whether the rotation request occurs due - // the main Bro prcoess terminating (and not because we've reach a - // regularly scheduled time for rotation). - // - // A writer may ignore rotation requests if it doesn't fit with its - // semantics (but must still return true in that case). + /** + * Writer-specific method implementing log rotation. Most directly + * this only applies to writers writing into files, which should then + * close the current file and open a new one. However, a writer may + * also trigger other apppropiate actions if semantics are similar. * + * Once rotation has finished, the implementation must call + * FinishedRotation() to signal the log manager that potential + * postprocessors can now run. + * + * A writer implementation must override this method but it can just + * ignore calls if flushing doesn't align with its semantics. It + * still needs to call FinishedRotation() though. + * + * If the method returns false, it will be assumed that a fatal error + * has occured that prevents the writer from further operation; it + * will then be disabled and eventually deleted. When returning + * false, an implementation should also call Error() to indicate what + * happened. + * + * @param rotate_path Reflects the path to where the rotated output + * is to be moved, with specifics depending on the writer. It should + * generally be interpreted in a way consistent with that of \c path + * as passed into DoInit(). As an example, for file-based output, \c + * rotate_path could be the original filename extended with a + * timestamp indicating the time of the rotation. + * + * @param open The network time when the *current* file was opened. + * + * @param close The network time when the *current* file was closed. + * + * @param terminating Indicates whether the rotation request occurs + * due the main Bro prcoess terminating (and not because we've + * reached a regularly scheduled time for rotation). + */ virtual bool DoRotate(string rotated_path, double open, double close, bool terminating) = 0; - // Called once on termination. Not called when any of the other - // methods has previously signaled an error, i.e., executing this - // method signals a regular shutdown of the writer. + /** + * Writer-specific method implementing log output finalization at + * termination. Not called when any of the other methods has + * previously signaled an error, i.e., executing this method signals + * a regular shutdown of the writer. + * + * A writer implementation must override this method but it can just + * ignore calls if flushing doesn't align with its semantics. + * + * If the method returns false, it will be assumed that a fatal error + * has occured that prevents the writer from further operation; it + * will then be disabled and eventually deleted. When returning + * false, an implementation should also call Error() to indicate what + * happened. + */ virtual bool DoFinish() = 0; - // Triggered by regular heartbeat messages from the main process. - // - // Note when overriding, you must call WriterBackend::DoHeartbeat(). + /** + * Triggered by regular heartbeat messages from the main thread. + * + * This method can be overridden but once must call + * WriterBackend::DoHeartbeat(). + */ virtual bool DoHeartbeat(double network_time, double current_time); private: friend class Manager; - // When an error occurs, we call this method to set a flag marking - // the writer as disabled. The Manager will check the flag later and - // remove the writer. - bool Disabled() { return disabled; } - - // Deletes the values as passed into Write(). + /** + * Deletes the values as passed into Write(). + */ void DeleteVals(int num_writes, Value*** vals); + // Frontend that instantiated us. This object must not be access from + // this class, it's running in a different thread! WriterFrontend* frontend; - string path; - int num_fields; - const Field* const * fields; - bool buffering; - bool disabled; - // For implementing Fmt(). - char* buf; - unsigned int buf_len; + string path; // Log path. + int num_fields; // Number of log fields. + const Field* const* fields; // Log fields. + bool buffering; // True if buffering is enabled. }; diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 2f7c1d6e7e..137cdf90ec 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -9,7 +9,7 @@ namespace logging { class InitMessage : public threading::InputMessage { public: - InitMessage(WriterBackend* backend, WriterFrontend* frontend, const string path, const int num_fields, const Field* const *fields) + InitMessage(WriterBackend* backend, WriterFrontend* frontend, const string path, const int num_fields, const Field* const* fields) : threading::InputMessage("Init", backend), path(path), num_fields(num_fields), fields(fields) { } diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index ed1a674842..6f1bb4ea1b 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -1,6 +1,4 @@ // See the file "COPYING" in the main distribution directory for copyright. -// -// Bridge class between main process and writer threads. #ifndef LOGGING_WRITERFRONTEND_H #define LOGGING_WRITERFRONTEND_H @@ -13,57 +11,187 @@ namespace logging { class WriterBackend; +/** + * Bridge class between the logging::Manager and backend writer threads. The + * Manager instantiates one \a WriterFrontend for each open logging filter. + * Each frontend in turns instantiates a WriterBackend-derived class + * internally that's specific to the particular output format. That backend + * spawns a new thread, and it receives messages from the frontend that + * correspond to method called by the manager. + * + */ class WriterFrontend { public: + /** + * Constructor. + * + * type: The backend writer type, with the value corresponding to the + * script-level \c Log::Writer enum (e.g., \a WRITER_ASCII). The + * frontend will internally instantiate a WriterBackend of the + * corresponding type. + * + * Frontends must only be instantiated by the main thread. + */ WriterFrontend(bro_int_t type); + + /** + * Destructor. + * + * Frontends must only be destroyed by the main thread. + */ virtual ~WriterFrontend(); - // Disables the writers and stop the backend thread. + /** + * Stops all output to this writer. Calling this methods disables all + * message forwarding to the backend and stops the backend thread. + * + * This method must only be called from the main thread. + */ void Stop(); - // Interface methods to interact with the writer from the main thread - // (and only from the main thread), typicalli from the log manager. - // All these methods forward (via inter-thread messaging) to the - // corresponding methods of an internally created WriterBackend. See - // there for documentation. - // - // If any of these operations fails, the writer will be automatically - // (but asynchronoulsy) disabled. + /** + * Initializes the writer. + * + * This method generates a message to the backend writer and triggers + * the corresponding message there. If the backend method fails, it + * sends a message back that will asynchronously call Disable(). + * + * See WriterBackend::Init() for arguments. The method takes + * ownership of \a fields. + * + * This method must only be called from the main thread. + */ + void Init(string path, int num_fields, const Field* const* fields); - void Init(string path, int num_fields, const Field* const * fields); + /** + * Write out a record. + * + * This method generates a message to the backend writer and triggers + * the corresponding message there. If the backend method fails, it + * sends a message back that will asynchronously call Disable(). + * + * As an optimization, if buffering is enabled (which is the default) + * this method may buffer several writes and send them over to the + * backend in bulk with a single message. An explicit bulk write of + * all currently buffered data can be triggered with + * FlushWriteBuffer(). The backend writer triggers this with a + * message at every heartbeat. + * + * See WriterBackend::Writer() for arguments (except that this method + * takes only a single record, not an array). The method takes + * ownership of \a vals. + * + * This method must only be called from the main thread. + */ void Write(int num_fields, Value** vals); + + /** + * Sets the buffering state. + * + * This method generates a message to the backend writer and triggers + * the corresponding message there. If the backend method fails, it + * sends a message back that will asynchronously call Disable(). + * + * See WriterBackend::SetBuf() for arguments. + * + * This method must only be called from the main thread. + */ void SetBuf(bool enabled); + + /** + * Flushes the output. + * + * This method generates a message to the backend writer and triggers + * the corresponding message there. In addition, it also triggers + * FlushWriteBuffer(). If the backend method fails, it sends a + * message back that will asynchronously call Disable(). + * + * This method must only be called from the main thread. + */ void Flush(); - void FlushWriteBuffer(); + + /** + * Triggers log rotation. + * + * This method generates a message to the backend writer and triggers + * the corresponding message there. If the backend method fails, it + * sends a message back that will asynchronously call Disable(). + * + * See WriterBackend::Rotate() for arguments. + * + * This method must only be called from the main thread. + */ void Rotate(string rotated_path, double open, double close, bool terminating); + + /** + * Finalizes writing to this tream. + * + * This method generates a message to the backend writer and triggers + * the corresponding message there. If the backend method fails, it + * sends a message back that will asynchronously call Disable(). + * + * This method must only be called from the main thread. + */ void Finish(); - // Calling this disable the writer. All methods calls will be no-ops - // from now on. The Manager will eventually remove disabled writers. + /** + * Explicitly triggers a transfer of all potentially buffered Write() + * operations over to the backend. + * + * This method must only be called from the main thread. + */ + void FlushWriteBuffer(); + + /** + * Disables the writer frontend. From now on, all method calls that + * would normally send message over to the backend, turn into no-ops. + * Note though that it does not stop the backend itself, use Stop() + * to do thast as well (this method is primarily for use as callback + * when the backend wants to disable the frontend). + * + * Disabled frontend will eventually be discarded by the + * logging::Manager. + * + * This method must only be called from the main thread. + */ void SetDisable() { disabled = true; } + + /** + * Returns true if the writer frontend has been disabled with SetDisable(). + */ bool Disabled() { return disabled; } + /** + * Returns the log path as passed into the constructor. + */ const string Path() const { return path; } + + /** + * Returns the number of log fields as passed into the constructor. + */ int NumFields() const { return num_fields; } + + /** + * Returns the log fields as passed into the constructor. + */ const Field* const * Fields() const { return fields; } protected: friend class Manager; - WriterBackend* backend; - bool disabled; - bool initialized; - bool buf; + WriterBackend* backend; // The backend we have instanatiated. + bool disabled; // True if disabled. + bool initialized; // True if initialized. + bool buf; // True if buffering is enabled (default). - string path; - int num_fields; - const Field* const * fields; + string path; // The log path. + int num_fields; // The number of log fields. + const Field* const* fields; // The log fields. // Buffer for bulk writes. static const int WRITER_BUFFER_SIZE = 50; - - int write_buffer_pos; - Value*** write_buffer; + int write_buffer_pos; // Position of next write in buffer. + Value*** write_buffer; // Buffer of size WRITER_BUFFER_SIZE. }; } diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index 0c627c68e9..4a9dea4950 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -19,8 +19,8 @@ public: protected: virtual bool DoInit(string path, int num_fields, - const Field* const * fields); - virtual bool DoWrite(int num_fields, const Field* const * fields, + const Field* const* fields); + virtual bool DoWrite(int num_fields, const Field* const* fields, Value** vals); virtual bool DoSetBuf(bool enabled); virtual bool DoRotate(string rotated_path, double open, diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index 9360ef44f6..b25bb09348 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -20,7 +20,7 @@ protected: virtual bool DoInit(string path, int num_fields, const Field* const * fields) { return true; } - virtual bool DoWrite(int num_fields, const Field* const * fields, + virtual bool DoWrite(int num_fields, const Field* const* fields, Value** vals) { return true; } virtual bool DoSetBuf(bool enabled) { return true; } virtual bool DoRotate(string rotated_path, double open, From ffb4094d365c61a9df8fea0d50ae375f06cc56b4 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 3 Feb 2012 02:41:10 -0800 Subject: [PATCH 080/651] Bugfixes --- src/logging/Manager.cc | 8 ++++---- src/logging/Manager.h | 2 +- src/logging/WriterBackend.cc | 8 ++++---- src/logging/WriterBackend.h | 15 ++++++++------- src/logging/WriterFrontend.cc | 22 ++++++++++------------ src/logging/writers/Ascii.cc | 2 +- src/logging/writers/Ascii.h | 5 +++-- src/logging/writers/None.h | 5 +++-- src/threading/MsgThread.cc | 8 +------- 9 files changed, 35 insertions(+), 40 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 0876b10eb2..63d4c60a5c 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -21,7 +21,7 @@ struct WriterDefinition { bro_int_t type; // The type. const char *name; // Descriptive name for error messages. bool (*init)(); // An optional one-time initialization function. - WriterBackend* (*factory)(); // A factory function creating instances. + WriterBackend* (*factory)(WriterFrontend* frontend); // A factory function creating instances. }; // Static table defining all availabel log writers. @@ -30,7 +30,7 @@ WriterDefinition log_writers[] = { { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, // End marker, don't touch. - { BifEnum::Log::WRITER_DEFAULT, "None", 0, (WriterBackend* (*)())0 } + { BifEnum::Log::WRITER_DEFAULT, "None", 0, (WriterBackend* (*)(WriterFrontend* frontend))0 } }; struct Manager::Filter { @@ -436,7 +436,7 @@ Manager::~Manager() delete *s; } -WriterBackend* Manager::CreateBackend(bro_int_t type) +WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) { WriterDefinition* ld = log_writers; @@ -478,7 +478,7 @@ WriterBackend* Manager::CreateBackend(bro_int_t type) assert(ld->factory); - WriterBackend* backend = (*ld->factory)(); + WriterBackend* backend = (*ld->factory)(frontend); assert(backend); return backend; } diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 1267a19ca7..f6829b3554 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -259,7 +259,7 @@ protected: // Instantiates a new WriterBackend of the given type (note that // doing so creates a new thread!). - WriterBackend* CreateBackend(bro_int_t type); + WriterBackend* CreateBackend(WriterFrontend* frontend, bro_int_t type); //// Function also used by the RemoteSerializer. diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index e361ca69d3..fe3a6ef560 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -55,12 +55,13 @@ public: using namespace logging; -WriterBackend::WriterBackend(const string& name) : MsgThread(name) +WriterBackend::WriterBackend(WriterFrontend* arg_frontend, const string& name) : MsgThread(name) { path = ""; num_fields = 0; fields = 0; buffering = true; + frontend = arg_frontend; } WriterBackend::~WriterBackend() @@ -100,10 +101,8 @@ void WriterBackend::DisableFrontend() SendOut(new DisableMessage(frontend)); } -bool WriterBackend::Init(WriterFrontend* arg_frontend, string arg_path, int arg_num_fields, - const Field* const* arg_fields) +bool WriterBackend::Init(string arg_path, int arg_num_fields, const Field* const* arg_fields) { - frontend = arg_frontend; path = arg_path; num_fields = arg_num_fields; fields = arg_fields; @@ -227,6 +226,7 @@ bool WriterBackend::Finish() bool WriterBackend::DoHeartbeat(double network_time, double current_time) { SendOut(new FlushWriteBufferMessage(frontend)); + return true; } diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index b5d313a480..21dcd41ff7 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -26,9 +26,15 @@ public: /** * Constructor. * + * @param frontend The frontend writer that created this backend. The + * *only* purpose of this value is to be passed back via messages as + * a argument to callbacks. One must not otherwise access the + * frontend, it's running in a different thread. + * * @param name A descriptive name for writer's type (e.g., \c Ascii). + * */ - WriterBackend(const string& name); + WriterBackend(WriterFrontend* frontend, const string& name); /** * Destructor. @@ -38,11 +44,6 @@ public: /** * One-time initialization of the writer to define the logged fields. * - * @param frontend The frontend writer that created this backend. The - * *only* purpose of this value is to be passed back via messages as - * a argument to callbacks. One must not otherwise access the - * frontend, it's running in a different thread. - * * @param path A string left to the interpretation of the writer * implementation; it corresponds to the value configured on the * script-level for the logging filter. @@ -54,7 +55,7 @@ public: * * @return False if an error occured. */ - bool Init(WriterFrontend* frontend, string path, int num_fields, const Field* const* fields); + bool Init(string path, int num_fields, const Field* const* fields); /** * Writes one log entry. diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 137cdf90ec..79c180b749 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -9,14 +9,13 @@ namespace logging { class InitMessage : public threading::InputMessage { public: - InitMessage(WriterBackend* backend, WriterFrontend* frontend, const string path, const int num_fields, const Field* const* fields) + InitMessage(WriterBackend* backend, const string path, const int num_fields, const Field* const* fields) : threading::InputMessage("Init", backend), path(path), num_fields(num_fields), fields(fields) { } - virtual bool Process() { return Object()->Init(frontend, path, num_fields, fields); } + virtual bool Process() { return Object()->Init(path, num_fields, fields); } private: - WriterFrontend* frontend; const string path; const int num_fields; const Field * const* fields; @@ -47,7 +46,7 @@ class WriteMessage : public threading::InputMessage public: WriteMessage(WriterBackend* backend, int num_fields, int num_writes, Value*** vals) : threading::InputMessage("Write", backend), - num_fields(num_fields), vals(vals) {} + num_fields(num_fields), num_writes(num_writes), vals(vals) {} virtual bool Process() { return Object()->Write(num_fields, num_writes, vals); } @@ -98,8 +97,9 @@ WriterFrontend::WriterFrontend(bro_int_t type) { disabled = initialized = false; buf = true; + write_buffer = 0; write_buffer_pos = 0; - backend = log_mgr->CreateBackend(type); + backend = log_mgr->CreateBackend(this, type); assert(backend); backend->Start(); @@ -129,7 +129,7 @@ void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* cons fields = arg_fields; initialized = true; - backend->SendIn(new InitMessage(backend, this, arg_path, arg_num_fields, arg_fields)); + backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields)); } void WriterFrontend::Write(int num_fields, Value** vals) @@ -144,15 +144,12 @@ void WriterFrontend::Write(int num_fields, Value** vals) write_buffer_pos = 0; } - if ( write_buffer_pos >= WRITER_BUFFER_SIZE ) - // Buffer full. - FlushWriteBuffer(); - write_buffer[write_buffer_pos++] = vals; - if ( ! buf ) - // Send out immediately if we don't want buffering. + if ( write_buffer_pos >= WRITER_BUFFER_SIZE || ! buf ) + // Buffer full (or no bufferin desired). FlushWriteBuffer(); + } void WriterFrontend::FlushWriteBuffer() @@ -165,6 +162,7 @@ void WriterFrontend::FlushWriteBuffer() // Clear buffer (no delete, we pass ownership to child thread.) write_buffer = 0; + write_buffer_pos = 0; } void WriterFrontend::SetBuf(bool enabled) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index a1ceb6e217..0e522dde1d 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -10,7 +10,7 @@ using namespace logging; using namespace writer; -Ascii::Ascii() : WriterBackend("Ascii") +Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend, "Ascii") { file = 0; diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index 4a9dea4950..4a24aad9b7 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -11,10 +11,11 @@ namespace logging { namespace writer { class Ascii : public WriterBackend { public: - Ascii(); + Ascii(WriterFrontend* frontend); ~Ascii(); - static WriterBackend* Instantiate() { return new Ascii; } + static WriterBackend* Instantiate(WriterFrontend* frontend) + { return new Ascii(frontend); } static string LogExt(); protected: diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index b25bb09348..19dc685ecb 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -11,10 +11,11 @@ namespace logging { namespace writer { class None : public WriterBackend { public: - None() : WriterBackend("None") {} + None(WriterFrontend* frontend) : WriterBackend(frontend, "None") {} ~None() {}; - static WriterBackend* Instantiate() { return new None; } + static WriterBackend* Instantiate(WriterFrontend* frontend) + { return new None(frontend); } protected: virtual bool DoInit(string path, int num_fields, diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 455c177df6..52da7c7400 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -214,13 +214,7 @@ BasicOutputMessage* MsgThread::RetrieveOut() BasicOutputMessage* msg = queue_out.Get(); assert(msg); -#ifdef DEBUG - if ( msg->Name() != "DebugMessage" ) // Avoid recursion. - { - string s = Fmt("Retrieved '%s' from %s", msg->Name().c_str(), Name().c_str()); - Debug(DBG_THREADING, s.c_str()); - } -#endif + DBG_LOG(DBG_THREADING, "Retrieved '%s' from %s", msg->Name().c_str(), Name().c_str()); return msg; } From 4879cb7b0ddcba2111deebb688d18a5c9cd9c8af Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 3 Feb 2012 03:03:38 -0800 Subject: [PATCH 081/651] Improved signal handling. Sending SIGTERM triggers a normal shutdown of all threads that waits until they have processed their remaining data. However, sending a 2nd SIGTERM while waiting for them to finish will immediately kill them all. --- src/main.cc | 7 +++++++ src/threading/BasicThread.cc | 11 +++++++++++ src/threading/BasicThread.h | 9 +++++++++ src/threading/Manager.cc | 13 +++++++++++++ src/threading/Manager.h | 16 ++++++++++++++++ 5 files changed, 56 insertions(+) diff --git a/src/main.cc b/src/main.cc index 58a23e6c80..e224910db4 100644 --- a/src/main.cc +++ b/src/main.cc @@ -333,6 +333,13 @@ RETSIGTYPE sig_handler(int signo) { set_processing_status("TERMINATING", "sig_handler"); signal_val = signo; + + if ( thread_mgr->Terminating() && (signal_val == SIGTERM || signal_val == SIGINT) ) + // If the thread manager is already terminating (i.e., + // waiting for child threads to exit), another term signal + // will send the threads a kill. + thread_mgr->KillThreads(); + return RETSIGVAL; } diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index f7bd2afbcd..73dc562b31 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -113,6 +113,17 @@ void BasicThread::Join() pthread = 0; } +void BasicThread::Kill() + { + if ( ! (started && pthread) ) + return; + + // I believe this is safe to call from a signal handler ... Not error + // checking so that killing doesn't bail out if we have already + // terminated. + pthread_kill(pthread, SIGKILL); + } + void* BasicThread::launcher(void *arg) { BasicThread* thread = (BasicThread *)arg; diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h index df5665c464..aeafc61c52 100644 --- a/src/threading/BasicThread.h +++ b/src/threading/BasicThread.h @@ -121,6 +121,15 @@ protected: */ void Join(); + /** + * Kills the thread immediately. One still needs to call Join() + * afterwards. + * + * This is called from the threading::Manager and safe to execute + * during a signal handler. + */ + void Kill(); + private: // pthread entry function. static void* launcher(void *arg); diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 2e8f6eb1fc..d07311bbe8 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -9,6 +9,7 @@ Manager::Manager() did_process = false; next_beat = 0; + terminating = false; } Manager::~Manager() @@ -21,6 +22,8 @@ void Manager::Terminate() { DBG_LOG(DBG_THREADING, "Terminating thread manager ..."); + terminating = true; + // First process remaining thread output for the message threads. do Process(); while ( did_process ); @@ -37,6 +40,16 @@ void Manager::Terminate() all_threads.clear(); msg_threads.clear(); + + terminating = false; + } + +void Manager::KillThreads() + { + DBG_LOG(DBG_THREADING, "Killing threads ..."); + + for ( all_thread_list::iterator i = all_threads.begin(); i != all_threads.end(); i++ ) + (*i)->Kill(); } void Manager::AddThread(BasicThread* thread) diff --git a/src/threading/Manager.h b/src/threading/Manager.h index d2f97209c9..7d9ba766d4 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -43,6 +43,21 @@ public: */ void Terminate(); + /** + * Returns True if we are currently in Terminate() waiting for + * threads to exit. + */ + bool Terminating() const { return terminating; } + + /** + * Immediately kills all child threads. It does however not yet join + * them, one still needs to call Terminate() for that. + * + * This method is safe to call from a signal handler, and can in fact + * be called while Terminate() is already in progress. + */ + void KillThreads(); + typedef std::list > msg_stats_list; /** @@ -115,6 +130,7 @@ private: bool did_process; // True if the last Process() found some work to do. double next_beat; // Timestamp when the next heartbeat will be sent. + bool terminating; // True if we are in Terminate(). msg_stats_list stats; }; From cf6a346b865fc95275eee83d25656fe95f7bafe3 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 3 Feb 2012 03:27:25 -0800 Subject: [PATCH 082/651] Fixing prof.log output. The queue Size() method was not yet atomic. --- src/Stats.cc | 3 ++- src/threading/MsgThread.cc | 1 + src/threading/Queue.h | 17 ++++++++++------- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/Stats.cc b/src/Stats.cc index 05ce33daed..27b433c9ee 100644 --- a/src/Stats.cc +++ b/src/Stats.cc @@ -224,7 +224,8 @@ void ProfileLogger::Log() i != thread_stats.end(); ++i ) { threading::MsgThread::Stats s = i->second; - file->Write(fmt(" %20s in=%" PRIu64 " out=%" PRIu64 " pending=%" PRIu64 "/%" PRIu64 "\n", + file->Write(fmt("%0.6f %-15s in=%" PRIu64 " out=%" PRIu64 " pending=%" PRIu64 "/%" PRIu64 "\n", + network_time, i->first.c_str(), s.sent_in, s.sent_out, s.pending_in, s.pending_out)); diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 52da7c7400..1bda8943da 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -206,6 +206,7 @@ void MsgThread::SendOut(BasicOutputMessage* msg, bool force) return; queue_out.Put(msg); + ++cnt_sent_out; } diff --git a/src/threading/Queue.h b/src/threading/Queue.h index add7019f9c..a25f897d23 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -67,7 +67,6 @@ private: int read_ptr; // Where the next operation will read from int write_ptr; // Where the next operation will write to - uint64_t size; // Current queue size. }; inline static void safe_lock(pthread_mutex_t* mutex) @@ -120,7 +119,6 @@ inline T Queue::Get() T data = messages[read_ptr].front(); messages[read_ptr].pop(); - --size; read_ptr = (read_ptr + 1) % NUM_QUEUES; @@ -139,7 +137,6 @@ inline void Queue::Put(T data) bool need_signal = messages[write_ptr].empty(); messages[write_ptr].push(data); - ++size; if ( need_signal ) pthread_cond_signal(&has_data[write_ptr]); @@ -165,13 +162,19 @@ inline bool Queue::Ready() template inline uint64_t Queue::Size() { - safe_lock(&mutex[read_ptr]); + // Need to lock all queues. + for ( int i = 0; i < NUM_QUEUES; i++ ) + safe_lock(&mutex[i]); - uint64_t s = size; + uint64_t size = 0; - safe_unlock(&mutex[read_ptr]); + for ( int i = 0; i < NUM_QUEUES; i++ ) + size += messages[i].size(); - return s; + for ( int i = 0; i < NUM_QUEUES; i++ ) + safe_unlock(&mutex[i]); + + return size; } } From 70fe7876a176e15bd0c8046dab939ad0f7fbde90 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 3 Feb 2012 04:01:24 -0800 Subject: [PATCH 083/651] Updating thread naming. Also includes experimental code to adapt the thread name as shown by top, but it's untested. --- src/Stats.cc | 2 +- src/logging/Manager.cc | 2 ++ src/logging/WriterBackend.cc | 6 +++++- src/logging/WriterBackend.h | 2 +- src/logging/WriterFrontend.cc | 9 +++++++++ src/logging/WriterFrontend.h | 12 ++++++++++++ src/logging/writers/Ascii.cc | 2 +- src/logging/writers/None.h | 2 +- src/threading/BasicThread.cc | 25 +++++++++++++++++++++++-- src/threading/BasicThread.h | 21 ++++++++++++++++++--- src/threading/MsgThread.cc | 8 +++++++- src/threading/MsgThread.h | 4 +--- 12 files changed, 81 insertions(+), 14 deletions(-) diff --git a/src/Stats.cc b/src/Stats.cc index 27b433c9ee..f418e9cccc 100644 --- a/src/Stats.cc +++ b/src/Stats.cc @@ -224,7 +224,7 @@ void ProfileLogger::Log() i != thread_stats.end(); ++i ) { threading::MsgThread::Stats s = i->second; - file->Write(fmt("%0.6f %-15s in=%" PRIu64 " out=%" PRIu64 " pending=%" PRIu64 "/%" PRIu64 "\n", + file->Write(fmt("%0.6f %-25s in=%" PRIu64 " out=%" PRIu64 " pending=%" PRIu64 "/%" PRIu64 "\n", network_time, i->first.c_str(), s.sent_in, s.sent_out, diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 63d4c60a5c..6d53ea363f 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -480,6 +480,8 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) WriterBackend* backend = (*ld->factory)(frontend); assert(backend); + + frontend->ty_name = ld->name; return backend; } diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index fe3a6ef560..4d2e497b14 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -55,13 +55,15 @@ public: using namespace logging; -WriterBackend::WriterBackend(WriterFrontend* arg_frontend, const string& name) : MsgThread(name) +WriterBackend::WriterBackend(WriterFrontend* arg_frontend) : MsgThread() { path = ""; num_fields = 0; fields = 0; buffering = true; frontend = arg_frontend; + + SetName(frontend->Name()); } WriterBackend::~WriterBackend() @@ -107,6 +109,8 @@ bool WriterBackend::Init(string arg_path, int arg_num_fields, const Field* const num_fields = arg_num_fields; fields = arg_fields; + SetName(frontend->Name()); + if ( ! DoInit(arg_path, arg_num_fields, arg_fields) ) { DisableFrontend(); diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 21dcd41ff7..33271e43f9 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -34,7 +34,7 @@ public: * @param name A descriptive name for writer's type (e.g., \c Ascii). * */ - WriterBackend(WriterFrontend* frontend, const string& name); + WriterBackend(WriterFrontend* frontend); /** * Destructor. diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 79c180b749..79278870f9 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -99,6 +99,7 @@ WriterFrontend::WriterFrontend(bro_int_t type) buf = true; write_buffer = 0; write_buffer_pos = 0; + ty_name = ""; backend = log_mgr->CreateBackend(this, type); assert(backend); @@ -109,6 +110,14 @@ WriterFrontend::~WriterFrontend() { } +string WriterFrontend::Name() const + { + if ( path.size() ) + return ty_name; + + return ty_name + "/" + path; + } + void WriterFrontend::Stop() { FlushWriteBuffer(); diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index 6f1bb4ea1b..e0bc590dfc 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -30,6 +30,9 @@ public: * frontend will internally instantiate a WriterBackend of the * corresponding type. * + * name: A descriptive name for the backend wroter type (e.g., \c + * Ascii). + * * Frontends must only be instantiated by the main thread. */ WriterFrontend(bro_int_t type); @@ -171,6 +174,14 @@ public: */ int NumFields() const { return num_fields; } + /** + * Returns a descriptive name for the writer, including the type of + * the backend and the path used. + * + * This method is safe to call from any thread. + */ + string Name() const; + /** * Returns the log fields as passed into the constructor. */ @@ -184,6 +195,7 @@ protected: bool initialized; // True if initialized. bool buf; // True if buffering is enabled (default). + string ty_name; // Name of the backend type. Set by the manager. string path; // The log path. int num_fields; // The number of log fields. const Field* const* fields; // The log fields. diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 0e522dde1d..7cc8459e68 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -10,7 +10,7 @@ using namespace logging; using namespace writer; -Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend, "Ascii") +Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) { file = 0; diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index 19dc685ecb..6a62161f49 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -11,7 +11,7 @@ namespace logging { namespace writer { class None : public WriterBackend { public: - None(WriterFrontend* frontend) : WriterBackend(frontend, "None") {} + None(WriterFrontend* frontend) : WriterBackend(frontend) {} ~None() {}; static WriterBackend* Instantiate(WriterFrontend* frontend) diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 73dc562b31..4d51c3c4e4 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -9,7 +9,7 @@ using namespace threading; uint64_t BasicThread::thread_counter = 0; -BasicThread::BasicThread(const string& arg_name) +BasicThread::BasicThread() { started = false; terminating = false; @@ -18,7 +18,7 @@ BasicThread::BasicThread(const string& arg_name) buf = 0; buf_len = 1024; - name = Fmt("%s@%d", arg_name.c_str(), ++thread_counter); + name = Fmt("thread-%d", ++thread_counter); thread_mgr->AddThread(this); } @@ -27,6 +27,27 @@ BasicThread::~BasicThread() { } +void BasicThread::SetName(const string& arg_name) + { + // Slight race condition here with reader threads, but shouldn't matter. + name = arg_name; + } + +void BasicThread::SetOSName(const string& name) + { +#ifdef LINUX + pthread_setname_np(pthread_self(), name.c_str()); +#endif + +#ifdef __APPLE__ + pthread_setname_np(name.c_str()); +#endif + +#ifdef FREEBSD + pthread_set_name_np(pthread_self(), name, name.c_str()); +#endif + } + const char* BasicThread::Fmt(const char* format, ...) { if ( ! buf ) diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h index aeafc61c52..6d2f739620 100644 --- a/src/threading/BasicThread.h +++ b/src/threading/BasicThread.h @@ -34,16 +34,31 @@ public: * @param name A descriptive name for thread the thread. This may * show up in messages to the user. */ - BasicThread(const string& name); // Managed by manager, must not delete otherwise. + BasicThread(); /** - * Returns a descriptive name for the thread. This is the name passed - * into the constructor. + * Returns a descriptive name for the thread. If not set via + * SetName(). If not set, a default name is choosen automatically. * * This method is safe to call from any thread. */ const string& Name() const { return name; } + /** + * Sets a descriptive name for the thread. This should be a string + * that's useful in output presented to the user and uniquely + * identifies the thread. + * + * This method must be called only from the thread itself. + */ + void SetName(const string& name); + + /** + * Set the name shown by the OS as the thread's description. Not + * supported on all OSs. + */ + void SetOSName(const string& name); + /** * Starts the thread. Calling this methods will spawn a new OS thread * executing Run(). Note that one can't restart a thread after a diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 1bda8943da..f41b20ddf9 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -127,7 +127,7 @@ bool ReporterMessage::Process() return true; } -MsgThread::MsgThread(const string& name) : BasicThread(name) +MsgThread::MsgThread() : BasicThread() { cnt_sent_in = cnt_sent_out = 0; thread_mgr->AddMsgThread(this); @@ -142,6 +142,12 @@ void MsgThread::OnStop() void MsgThread::Heartbeat() { SendIn(new HeartbeatMessage(this, network_time, current_time())); + + string name = Fmt("%s (%d/%d)", name.c_str(), + cnt_sent_in - queue_in.Size(), + cnt_sent_out - queue_out.Size()); + + SetOSName(name.c_str()); } void MsgThread::Info(const char* msg) diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index ec249e90ad..459ac6c603 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -34,10 +34,8 @@ public: * threading::Manager. * * Only Bro's main thread may instantiate a new thread. - * - * @param name A descriptive name. This is passed on to BasicThread(). */ - MsgThread(const string& name); + MsgThread(); /** * Sends a message to the child thread. The message will be proceesed From a0487ecb30e365956525aa6eb7cd1703d680b138 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 3 Feb 2012 14:12:29 -0800 Subject: [PATCH 084/651] move Value and Field from the logging namespace to the threading namespace, because other modules using threading will need them. --- src/Attr.cc | 4 +- src/CMakeLists.txt | 1 + src/RemoteSerializer.cc | 21 +- src/RemoteSerializer.h | 10 +- src/logging/Manager.cc | 328 +--------------------------- src/logging/Manager.h | 124 +---------- src/logging/WriterBackend.cc | 4 + src/logging/WriterBackend.h | 16 +- src/logging/WriterFrontend.cc | 4 + src/logging/WriterFrontend.h | 10 +- src/logging/writers/Ascii.cc | 3 + src/logging/writers/Ascii.h | 8 +- src/logging/writers/None.h | 6 +- src/threading/SerializationTypes.cc | 319 +++++++++++++++++++++++++++ src/threading/SerializationTypes.h | 126 +++++++++++ 15 files changed, 512 insertions(+), 472 deletions(-) create mode 100644 src/threading/SerializationTypes.cc create mode 100644 src/threading/SerializationTypes.h diff --git a/src/Attr.cc b/src/Attr.cc index b877250f52..40c6c1a75c 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -5,7 +5,7 @@ #include "Attr.h" #include "Expr.h" #include "Serializer.h" -#include "logging/Manager.h" +#include "threading/SerializationTypes.h" const char* attr_name(attr_tag t) { @@ -416,7 +416,7 @@ void Attributes::CheckAttr(Attr* a) break; case ATTR_LOG: - if ( ! logging::Value::IsCompatibleType(type) ) + if ( ! threading::Value::IsCompatibleType(type) ) Error("&log applied to a type that cannot be logged"); break; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 61a4847b70..7a3cc4babf 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -411,6 +411,7 @@ set(bro_SRCS threading/BasicThread.cc threading/Manager.cc threading/MsgThread.cc + threading/SerializationTypes.cc logging/Manager.cc logging/WriterBackend.cc diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index a75812b42b..ba2598c018 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -184,6 +184,7 @@ #include "File.h" #include "Conn.h" #include "Reporter.h" +#include "threading/SerializationTypes.h" #include "logging/Manager.h" extern "C" { @@ -2476,7 +2477,7 @@ bool RemoteSerializer::ProcessRemotePrint() return true; } -bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields) +bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields) { loop_over_list(peers, i) { @@ -2486,7 +2487,7 @@ bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, string return true; } -bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields) +bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields) { SetErrorDescr("logging"); @@ -2540,7 +2541,7 @@ error: return false; } -bool RemoteSerializer::SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals) +bool RemoteSerializer::SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals) { loop_over_list(peers, i) { @@ -2550,7 +2551,7 @@ bool RemoteSerializer::SendLogWrite(EnumVal* id, EnumVal* writer, string path, i return true; } -bool RemoteSerializer::SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals) +bool RemoteSerializer::SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals) { if ( peer->phase != Peer::HANDSHAKE && peer->phase != Peer::RUNNING ) return false; @@ -2641,7 +2642,7 @@ bool RemoteSerializer::ProcessLogCreateWriter() EnumVal* id_val = 0; EnumVal* writer_val = 0; - logging::Field** fields = 0; + threading::Field** fields = 0; BinarySerializationFormat fmt; fmt.StartRead(current_args->data, current_args->len); @@ -2658,11 +2659,11 @@ bool RemoteSerializer::ProcessLogCreateWriter() if ( ! success ) goto error; - fields = new logging::Field* [num_fields]; + fields = new threading::Field* [num_fields]; for ( int i = 0; i < num_fields; i++ ) { - fields[i] = new logging::Field; + fields[i] = new threading::Field; if ( ! fields[i]->Read(&fmt) ) goto error; } @@ -2703,7 +2704,7 @@ bool RemoteSerializer::ProcessLogWrite() // Unserialize one entry. EnumVal* id_val = 0; EnumVal* writer_val = 0; - logging::Value** vals = 0; + threading::Value** vals = 0; int id, writer; string path; @@ -2717,11 +2718,11 @@ bool RemoteSerializer::ProcessLogWrite() if ( ! success ) goto error; - vals = new logging::Value* [num_fields]; + vals = new threading::Value* [num_fields]; for ( int i = 0; i < num_fields; i++ ) { - vals[i] = new logging::Value; + vals[i] = new threading::Value; if ( ! vals[i]->Read(&fmt) ) goto error; } diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index ba0bde7d41..571fa72d39 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -15,7 +15,7 @@ class IncrementalSendTimer; -namespace logging { +namespace threading { class Field; class Value; } @@ -102,13 +102,13 @@ public: bool SendPrintHookEvent(BroFile* f, const char* txt, size_t len); // Send a request to create a writer on a remote side. - bool SendLogCreateWriter(PeerID peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields); + bool SendLogCreateWriter(PeerID peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields); // Broadcasts a request to create a writer. - bool SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Field* const * fields); + bool SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields); // Broadcast a log entry to everybody interested. - bool SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals); + bool SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals); // Synchronzizes time with all connected peers. Returns number of // current sync-point, or -1 on error. @@ -303,7 +303,7 @@ protected: bool SendID(SerialInfo* info, Peer* peer, const ID& id); bool SendCapabilities(Peer* peer); bool SendPacket(SerialInfo* info, Peer* peer, const Packet& p); - bool SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const logging::Value* const * vals); + bool SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals); void UnregisterHandlers(Peer* peer); void RaiseEvent(EventHandlerPtr event, Peer* peer, const char* arg = 0); diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 6d53ea363f..2333d6c612 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -14,7 +14,11 @@ #include "writers/Ascii.h" #include "writers/None.h" +#include "threading/SerializationTypes.h" + using namespace logging; +using threading::Value; +using threading::Field; // Structure describing a log writer type. struct WriterDefinition { @@ -83,316 +87,6 @@ struct Manager::Stream { ~Stream(); }; -bool Field::Read(SerializationFormat* fmt) - { - int t; - int st; - - bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type") && fmt->Read(&st, "subtype") ); - type = (TypeTag) t; - subtype = (TypeTag) st; - - return success; - } - -bool Field::Write(SerializationFormat* fmt) const - { - return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); - } - -Value::~Value() - { - if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) - && present ) - delete val.string_val; - - if ( type == TYPE_TABLE && present ) - { - for ( int i = 0; i < val.set_val.size; i++ ) - delete val.set_val.vals[i]; - - delete [] val.set_val.vals; - } - - if ( type == TYPE_VECTOR && present ) - { - for ( int i = 0; i < val.vector_val.size; i++ ) - delete val.vector_val.vals[i]; - - delete [] val.vector_val.vals; - } - } - -bool Value::IsCompatibleType(BroType* t, bool atomic_only) - { - if ( ! t ) - return false; - - switch ( t->Tag() ) { - case TYPE_BOOL: - case TYPE_INT: - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - case TYPE_SUBNET: - case TYPE_ADDR: - case TYPE_DOUBLE: - case TYPE_TIME: - case TYPE_INTERVAL: - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_FUNC: - return true; - - case TYPE_RECORD: - return ! atomic_only; - - case TYPE_TABLE: - { - if ( atomic_only ) - return false; - - if ( ! t->IsSet() ) - return false; - - return IsCompatibleType(t->AsSetType()->Indices()->PureType(), true); - } - - case TYPE_VECTOR: - { - if ( atomic_only ) - return false; - - return IsCompatibleType(t->AsVectorType()->YieldType(), true); - } - - default: - return false; - } - - return false; - } - -bool Value::Read(SerializationFormat* fmt) - { - int ty; - - if ( ! (fmt->Read(&ty, "type") && fmt->Read(&present, "present")) ) - return false; - - type = (TypeTag)(ty); - - if ( ! present ) - return true; - - switch ( type ) { - case TYPE_BOOL: - case TYPE_INT: - return fmt->Read(&val.int_val, "int"); - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - return fmt->Read(&val.uint_val, "uint"); - - case TYPE_SUBNET: - { - uint32 net[4]; - if ( ! (fmt->Read(&net[0], "net0") && - fmt->Read(&net[1], "net1") && - fmt->Read(&net[2], "net2") && - fmt->Read(&net[3], "net3") && - fmt->Read(&val.subnet_val.width, "width")) ) - return false; - -#ifdef BROv6 - val.subnet_val.net[0] = net[0]; - val.subnet_val.net[1] = net[1]; - val.subnet_val.net[2] = net[2]; - val.subnet_val.net[3] = net[3]; -#else - val.subnet_val.net = net[0]; -#endif - return true; - } - - case TYPE_ADDR: - { - uint32 addr[4]; - if ( ! (fmt->Read(&addr[0], "addr0") && - fmt->Read(&addr[1], "addr1") && - fmt->Read(&addr[2], "addr2") && - fmt->Read(&addr[3], "addr3")) ) - return false; - - val.addr_val[0] = addr[0]; -#ifdef BROv6 - val.addr_val[1] = addr[1]; - val.addr_val[2] = addr[2]; - val.addr_val[3] = addr[3]; -#endif - return true; - } - - case TYPE_DOUBLE: - case TYPE_TIME: - case TYPE_INTERVAL: - return fmt->Read(&val.double_val, "double"); - - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_FUNC: - { - val.string_val = new string; - return fmt->Read(val.string_val, "string"); - } - - case TYPE_TABLE: - { - if ( ! fmt->Read(&val.set_val.size, "set_size") ) - return false; - - val.set_val.vals = new Value* [val.set_val.size]; - - for ( int i = 0; i < val.set_val.size; ++i ) - { - val.set_val.vals[i] = new Value; - - if ( ! val.set_val.vals[i]->Read(fmt) ) - return false; - } - - return true; - } - - case TYPE_VECTOR: - { - if ( ! fmt->Read(&val.vector_val.size, "vector_size") ) - return false; - - val.vector_val.vals = new Value* [val.vector_val.size]; - - for ( int i = 0; i < val.vector_val.size; ++i ) - { - val.vector_val.vals[i] = new Value; - - if ( ! val.vector_val.vals[i]->Read(fmt) ) - return false; - } - - return true; - } - - default: - reporter->InternalError("unsupported type %s in Value::Write", type_name(type)); - } - - return false; - } - -bool Value::Write(SerializationFormat* fmt) const - { - if ( ! (fmt->Write((int)type, "type") && - fmt->Write(present, "present")) ) - return false; - - if ( ! present ) - return true; - - switch ( type ) { - case TYPE_BOOL: - case TYPE_INT: - return fmt->Write(val.int_val, "int"); - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - return fmt->Write(val.uint_val, "uint"); - - case TYPE_SUBNET: - { - uint32 net[4]; -#ifdef BROv6 - net[0] = val.subnet_val.net[0]; - net[1] = val.subnet_val.net[1]; - net[2] = val.subnet_val.net[2]; - net[3] = val.subnet_val.net[3]; -#else - net[0] = val.subnet_val.net; - net[1] = net[2] = net[3] = 0; -#endif - return fmt->Write(net[0], "net0") && - fmt->Write(net[1], "net1") && - fmt->Write(net[2], "net2") && - fmt->Write(net[3], "net3") && - fmt->Write(val.subnet_val.width, "width"); - } - - case TYPE_ADDR: - { - uint32 addr[4]; - addr[0] = val.addr_val[0]; -#ifdef BROv6 - addr[1] = val.addr_val[1]; - addr[2] = val.addr_val[2]; - addr[3] = val.addr_val[3]; -#else - addr[1] = addr[2] = addr[3] = 0; -#endif - return fmt->Write(addr[0], "addr0") && - fmt->Write(addr[1], "addr1") && - fmt->Write(addr[2], "addr2") && - fmt->Write(addr[3], "addr3"); - } - - case TYPE_DOUBLE: - case TYPE_TIME: - case TYPE_INTERVAL: - return fmt->Write(val.double_val, "double"); - - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_FUNC: - return fmt->Write(*val.string_val, "string"); - - case TYPE_TABLE: - { - if ( ! fmt->Write(val.set_val.size, "set_size") ) - return false; - - for ( int i = 0; i < val.set_val.size; ++i ) - { - if ( ! val.set_val.vals[i]->Write(fmt) ) - return false; - } - - return true; - } - - case TYPE_VECTOR: - { - if ( ! fmt->Write(val.vector_val.size, "vector_size") ) - return false; - - for ( int i = 0; i < val.vector_val.size; ++i ) - { - if ( ! val.vector_val.vals[i]->Write(fmt) ) - return false; - } - - return true; - } - - default: - reporter->InternalError("unsupported type %s in Value::REad", type_name(type)); - } - - return false; - } - Manager::Filter::~Filter() { for ( int i = 0; i < num_fields; ++i ) @@ -552,7 +246,7 @@ bool Manager::CreateStream(EnumVal* id, RecordVal* sval) if ( ! (columns->FieldDecl(i)->FindAttr(ATTR_LOG)) ) continue; - if ( ! Value::IsCompatibleType(columns->FieldType(i)) ) + if ( ! threading::Value::IsCompatibleType(columns->FieldType(i)) ) { reporter->Error("type of field '%s' is not support for logging output", columns->FieldName(i)); @@ -1089,7 +783,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) if ( filter->local || filter->remote ) { - Value** vals = RecordToFilterVals(stream, filter, columns); + threading::Value** vals = RecordToFilterVals(stream, filter, columns); if ( filter->remote ) remote_serializer->SendLogWrite(stream->id, @@ -1125,15 +819,15 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) return true; } -Value* Manager::ValToLogVal(Val* val, BroType* ty) +threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) { if ( ! ty ) ty = val->Type(); if ( ! val ) - return new Value(ty->Tag(), false); + return new threading::Value(ty->Tag(), false); - Value* lval = new Value(ty->Tag()); + threading::Value* lval = new threading::Value(ty->Tag()); switch ( lval->type ) { case TYPE_BOOL: @@ -1213,7 +907,7 @@ Value* Manager::ValToLogVal(Val* val, BroType* ty) set = new ListVal(TYPE_INT); lval->val.set_val.size = set->Length(); - lval->val.set_val.vals = new Value* [lval->val.set_val.size]; + lval->val.set_val.vals = new threading::Value* [lval->val.set_val.size]; for ( int i = 0; i < lval->val.set_val.size; i++ ) lval->val.set_val.vals[i] = ValToLogVal(set->Index(i)); @@ -1227,7 +921,7 @@ Value* Manager::ValToLogVal(Val* val, BroType* ty) VectorVal* vec = val->AsVectorVal(); lval->val.vector_val.size = vec->Size(); lval->val.vector_val.vals = - new Value* [lval->val.vector_val.size]; + new threading::Value* [lval->val.vector_val.size]; for ( int i = 0; i < lval->val.vector_val.size; i++ ) { diff --git a/src/logging/Manager.h b/src/logging/Manager.h index f6829b3554..c5d1a9fc2d 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -15,118 +15,6 @@ class RotationTimer; namespace logging { -/** - * Definition of a log file, i.e., one column of a log stream. - */ -struct Field { - string name; //! Name of the field. - TypeTag type; //! Type of the field. - TypeTag subtype; //! Inner type for sets. - - /** - * Constructor. - */ - Field() { subtype = TYPE_VOID; } - - /** - * Copy constructor. - */ - Field(const Field& other) - : name(other.name), type(other.type), subtype(other.subtype) { } - - /** - * Unserializes a field. - * - * @param fmt The serialization format to use. The format handles - * low-level I/O. - * - * @return False if an error occured. - */ - bool Read(SerializationFormat* fmt); - - /** - * Serializes a field. - * - * @param fmt The serialization format to use. The format handles - * low-level I/O. - * - * @return False if an error occured. - */ - bool Write(SerializationFormat* fmt) const; -}; - -/** - * Definition of a log value, i.e., a entry logged by a stream. - * - * This struct essentialy represents a serialization of a Val instance (for - * those Vals supported). - */ -struct Value { - TypeTag type; //! The type of the value. - bool present; //! False for optional record fields that are not set. - - struct set_t { bro_int_t size; Value** vals; }; - typedef set_t vec_t; - - /** - * This union is a subset of BroValUnion, including only the types we - * can log directly. See IsCompatibleType(). - */ - union _val { - bro_int_t int_val; - bro_uint_t uint_val; - uint32 addr_val[NUM_ADDR_WORDS]; - subnet_type subnet_val; - double double_val; - string* string_val; - set_t set_val; - vec_t vector_val; - } val; - - /** - * Constructor. - * - * arg_type: The type of the value. - * - * arg_present: False if the value represents an optional record field - * that is not set. - */ - Value(TypeTag arg_type = TYPE_ERROR, bool arg_present = true) - : type(arg_type), present(arg_present) {} - - /** - * Destructor. - */ - ~Value(); - - /** - * Unserializes a value. - * - * @param fmt The serialization format to use. The format handles low-level I/O. - * - * @return False if an error occured. - */ - bool Read(SerializationFormat* fmt); - - /** - * Serializes a value. - * - * @param fmt The serialization format to use. The format handles - * low-level I/O. - * - * @return False if an error occured. - */ - bool Write(SerializationFormat* fmt) const; - - /** - * Returns true if the type can be represented by a Value. If - * `atomic_only` is true, will not permit composite types. - */ - static bool IsCompatibleType(BroType* t, bool atomic_only=false); - -private: - Value(const Value& other) { } // Disabled. -}; class WriterBackend; class WriterFrontend; @@ -168,7 +56,7 @@ public: * logging.bif, which just forwards here. */ bool EnableStream(EnumVal* id); - + /** * Disables a log stream. * @@ -265,11 +153,11 @@ protected: // Takes ownership of fields. WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, const Field* const* fields); + int num_fields, const threading::Field* const* fields); // Takes ownership of values.. bool Write(EnumVal* id, EnumVal* writer, string path, - int num_fields, Value** vals); + int num_fields, threading::Value** vals); // Announces all instantiated writers to peer. void SendAllWritersTo(RemoteSerializer::PeerID peer); @@ -282,7 +170,7 @@ protected: void Error(WriterFrontend* writer, const char* msg); // Deletes the values as passed into Write(). - void DeleteVals(int num_fields, Value** vals); + void DeleteVals(int num_fields, threading::Value** vals); private: struct Filter; @@ -292,10 +180,10 @@ private: bool TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, TableVal* include, TableVal* exclude, string path, list indices); - Value** RecordToFilterVals(Stream* stream, Filter* filter, + threading::Value** RecordToFilterVals(Stream* stream, Filter* filter, RecordVal* columns); - Value* ValToLogVal(Val* val, BroType* ty = 0); + threading::Value* ValToLogVal(Val* val, BroType* ty = 0); Stream* FindStream(EnumVal* id); void RemoveDisabledWriters(Stream* stream); void InstallRotationTimer(WriterInfo* winfo); diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 4d2e497b14..3ecc54e240 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -4,9 +4,13 @@ #include "WriterBackend.h" #include "WriterFrontend.h" +#include "../threading/SerializationTypes.h" // Messages sent from backend to frontend (i.e., "OutputMessages"). +using threading::Value; +using threading::Field; + namespace logging { class RotationFinishedMessage : public threading::OutputMessage diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 33271e43f9..9ffa26d0c8 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -55,7 +55,7 @@ public: * * @return False if an error occured. */ - bool Init(string path, int num_fields, const Field* const* fields); + bool Init(string path, int num_fields, const threading::Field* const* fields); /** * Writes one log entry. @@ -72,7 +72,7 @@ public: * * @return False if an error occured. */ - bool Write(int num_fields, int num_writes, Value*** vals); + bool Write(int num_fields, int num_writes, threading::Value*** vals); /** * Sets the buffering status for the writer, assuming the writer @@ -129,7 +129,7 @@ public: /** * Returns the log fields as passed into the constructor. */ - const Field* const * Fields() const { return fields; } + const threading::Field* const * Fields() const { return fields; } /** * Returns the current buffering state. @@ -170,7 +170,7 @@ protected: * implementation should also call Error() to indicate what happened. */ virtual bool DoInit(string path, int num_fields, - const Field* const* fields) = 0; + const threading::Field* const* fields) = 0; /** * Writer-specific output method implementing recording of fone log @@ -182,8 +182,8 @@ protected: * disabled and eventually deleted. When returning false, an * implementation should also call Error() to indicate what happened. */ - virtual bool DoWrite(int num_fields, const Field* const* fields, - Value** vals) = 0; + virtual bool DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals) = 0; /** * Writer-specific method implementing a change of fthe buffering @@ -287,7 +287,7 @@ private: /** * Deletes the values as passed into Write(). */ - void DeleteVals(int num_writes, Value*** vals); + void DeleteVals(int num_writes, threading::Value*** vals); // Frontend that instantiated us. This object must not be access from // this class, it's running in a different thread! @@ -295,7 +295,7 @@ private: string path; // Log path. int num_fields; // Number of log fields. - const Field* const* fields; // Log fields. + const threading::Field* const* fields; // Log fields. bool buffering; // True if buffering is enabled. }; diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 79278870f9..1f7af5a53d 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -1,6 +1,10 @@ #include "WriterFrontend.h" #include "WriterBackend.h" +#include "../threading/SerializationTypes.h" + +using threading::Value; +using threading::Field; namespace logging { diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index e0bc590dfc..56c8885cf9 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -64,7 +64,7 @@ public: * * This method must only be called from the main thread. */ - void Init(string path, int num_fields, const Field* const* fields); + void Init(string path, int num_fields, const threading::Field* const* fields); /** * Write out a record. @@ -86,7 +86,7 @@ public: * * This method must only be called from the main thread. */ - void Write(int num_fields, Value** vals); + void Write(int num_fields, threading::Value** vals); /** * Sets the buffering state. @@ -185,7 +185,7 @@ public: /** * Returns the log fields as passed into the constructor. */ - const Field* const * Fields() const { return fields; } + const threading::Field* const * Fields() const { return fields; } protected: friend class Manager; @@ -198,12 +198,12 @@ protected: string ty_name; // Name of the backend type. Set by the manager. string path; // The log path. int num_fields; // The number of log fields. - const Field* const* fields; // The log fields. + const threading::Field* const* fields; // The log fields. // Buffer for bulk writes. static const int WRITER_BUFFER_SIZE = 50; int write_buffer_pos; // Position of next write in buffer. - Value*** write_buffer; // Buffer of size WRITER_BUFFER_SIZE. + threading::Value*** write_buffer; // Buffer of size WRITER_BUFFER_SIZE. }; } diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 7cc8459e68..fc6832afea 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -6,9 +6,12 @@ #include "../../NetVar.h" #include "Ascii.h" +#include "../../threading/SerializationTypes.h" using namespace logging; using namespace writer; +using threading::Value; +using threading::Field; Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) { diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index 4a24aad9b7..6f507aff01 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -20,9 +20,9 @@ public: protected: virtual bool DoInit(string path, int num_fields, - const Field* const* fields); - virtual bool DoWrite(int num_fields, const Field* const* fields, - Value** vals); + const threading::Field* const* fields); + virtual bool DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals); virtual bool DoSetBuf(bool enabled); virtual bool DoRotate(string rotated_path, double open, double close, bool terminating); @@ -31,7 +31,7 @@ protected: private: bool IsSpecial(string path) { return path.find("/dev/") == 0; } - bool DoWriteOne(ODesc* desc, Value* val, const Field* field); + bool DoWriteOne(ODesc* desc, threading::Value* val, const threading::Field* field); bool WriteHeaderField(const string& key, const string& value); FILE* file; diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index 6a62161f49..cce48953d1 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -19,10 +19,10 @@ public: protected: virtual bool DoInit(string path, int num_fields, - const Field* const * fields) { return true; } + const threading::Field* const * fields) { return true; } - virtual bool DoWrite(int num_fields, const Field* const* fields, - Value** vals) { return true; } + virtual bool DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals) { return true; } virtual bool DoSetBuf(bool enabled) { return true; } virtual bool DoRotate(string rotated_path, double open, double close, bool terminating); diff --git a/src/threading/SerializationTypes.cc b/src/threading/SerializationTypes.cc new file mode 100644 index 0000000000..01f0ac84ce --- /dev/null +++ b/src/threading/SerializationTypes.cc @@ -0,0 +1,319 @@ +// See the file "COPYING" in the main distribution directory for copyright. + + +#include "SerializationTypes.h" +#include "../RemoteSerializer.h" + + +using namespace threading; + +bool Field::Read(SerializationFormat* fmt) + { + int t; + int st; + + bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type") && fmt->Read(&st, "subtype") ); + type = (TypeTag) t; + subtype = (TypeTag) st; + + return success; + } + +bool Field::Write(SerializationFormat* fmt) const + { + return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); + } + +Value::~Value() + { + if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) + && present ) + delete val.string_val; + + if ( type == TYPE_TABLE && present ) + { + for ( int i = 0; i < val.set_val.size; i++ ) + delete val.set_val.vals[i]; + + delete [] val.set_val.vals; + } + + if ( type == TYPE_VECTOR && present ) + { + for ( int i = 0; i < val.vector_val.size; i++ ) + delete val.vector_val.vals[i]; + + delete [] val.vector_val.vals; + } + } + +bool Value::IsCompatibleType(BroType* t, bool atomic_only) + { + if ( ! t ) + return false; + + switch ( t->Tag() ) { + case TYPE_BOOL: + case TYPE_INT: + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + case TYPE_SUBNET: + case TYPE_ADDR: + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_FUNC: + return true; + + case TYPE_RECORD: + return ! atomic_only; + + case TYPE_TABLE: + { + if ( atomic_only ) + return false; + + if ( ! t->IsSet() ) + return false; + + return IsCompatibleType(t->AsSetType()->Indices()->PureType(), true); + } + + case TYPE_VECTOR: + { + if ( atomic_only ) + return false; + + return IsCompatibleType(t->AsVectorType()->YieldType(), true); + } + + default: + return false; + } + + return false; + } + +bool Value::Read(SerializationFormat* fmt) + { + int ty; + + if ( ! (fmt->Read(&ty, "type") && fmt->Read(&present, "present")) ) + return false; + + type = (TypeTag)(ty); + + if ( ! present ) + return true; + + switch ( type ) { + case TYPE_BOOL: + case TYPE_INT: + return fmt->Read(&val.int_val, "int"); + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + return fmt->Read(&val.uint_val, "uint"); + + case TYPE_SUBNET: + { + uint32 net[4]; + if ( ! (fmt->Read(&net[0], "net0") && + fmt->Read(&net[1], "net1") && + fmt->Read(&net[2], "net2") && + fmt->Read(&net[3], "net3") && + fmt->Read(&val.subnet_val.width, "width")) ) + return false; + +#ifdef BROv6 + val.subnet_val.net[0] = net[0]; + val.subnet_val.net[1] = net[1]; + val.subnet_val.net[2] = net[2]; + val.subnet_val.net[3] = net[3]; +#else + val.subnet_val.net = net[0]; +#endif + return true; + } + + case TYPE_ADDR: + { + uint32 addr[4]; + if ( ! (fmt->Read(&addr[0], "addr0") && + fmt->Read(&addr[1], "addr1") && + fmt->Read(&addr[2], "addr2") && + fmt->Read(&addr[3], "addr3")) ) + return false; + + val.addr_val[0] = addr[0]; +#ifdef BROv6 + val.addr_val[1] = addr[1]; + val.addr_val[2] = addr[2]; + val.addr_val[3] = addr[3]; +#endif + return true; + } + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + return fmt->Read(&val.double_val, "double"); + + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_FUNC: + { + val.string_val = new string; + return fmt->Read(val.string_val, "string"); + } + + case TYPE_TABLE: + { + if ( ! fmt->Read(&val.set_val.size, "set_size") ) + return false; + + val.set_val.vals = new Value* [val.set_val.size]; + + for ( int i = 0; i < val.set_val.size; ++i ) + { + val.set_val.vals[i] = new Value; + + if ( ! val.set_val.vals[i]->Read(fmt) ) + return false; + } + + return true; + } + + case TYPE_VECTOR: + { + if ( ! fmt->Read(&val.vector_val.size, "vector_size") ) + return false; + + val.vector_val.vals = new Value* [val.vector_val.size]; + + for ( int i = 0; i < val.vector_val.size; ++i ) + { + val.vector_val.vals[i] = new Value; + + if ( ! val.vector_val.vals[i]->Read(fmt) ) + return false; + } + + return true; + } + + default: + reporter->InternalError("unsupported type %s in Value::Write", type_name(type)); + } + + return false; + } + +bool Value::Write(SerializationFormat* fmt) const + { + if ( ! (fmt->Write((int)type, "type") && + fmt->Write(present, "present")) ) + return false; + + if ( ! present ) + return true; + + switch ( type ) { + case TYPE_BOOL: + case TYPE_INT: + return fmt->Write(val.int_val, "int"); + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + return fmt->Write(val.uint_val, "uint"); + + case TYPE_SUBNET: + { + uint32 net[4]; +#ifdef BROv6 + net[0] = val.subnet_val.net[0]; + net[1] = val.subnet_val.net[1]; + net[2] = val.subnet_val.net[2]; + net[3] = val.subnet_val.net[3]; +#else + net[0] = val.subnet_val.net; + net[1] = net[2] = net[3] = 0; +#endif + return fmt->Write(net[0], "net0") && + fmt->Write(net[1], "net1") && + fmt->Write(net[2], "net2") && + fmt->Write(net[3], "net3") && + fmt->Write(val.subnet_val.width, "width"); + } + + case TYPE_ADDR: + { + uint32 addr[4]; + addr[0] = val.addr_val[0]; +#ifdef BROv6 + addr[1] = val.addr_val[1]; + addr[2] = val.addr_val[2]; + addr[3] = val.addr_val[3]; +#else + addr[1] = addr[2] = addr[3] = 0; +#endif + return fmt->Write(addr[0], "addr0") && + fmt->Write(addr[1], "addr1") && + fmt->Write(addr[2], "addr2") && + fmt->Write(addr[3], "addr3"); + } + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + return fmt->Write(val.double_val, "double"); + + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_FUNC: + return fmt->Write(*val.string_val, "string"); + + case TYPE_TABLE: + { + if ( ! fmt->Write(val.set_val.size, "set_size") ) + return false; + + for ( int i = 0; i < val.set_val.size; ++i ) + { + if ( ! val.set_val.vals[i]->Write(fmt) ) + return false; + } + + return true; + } + + case TYPE_VECTOR: + { + if ( ! fmt->Write(val.vector_val.size, "vector_size") ) + return false; + + for ( int i = 0; i < val.vector_val.size; ++i ) + { + if ( ! val.vector_val.vals[i]->Write(fmt) ) + return false; + } + + return true; + } + + default: + reporter->InternalError("unsupported type %s in Value::REad", type_name(type)); + } + + return false; + } + diff --git a/src/threading/SerializationTypes.h b/src/threading/SerializationTypes.h new file mode 100644 index 0000000000..8cae99c117 --- /dev/null +++ b/src/threading/SerializationTypes.h @@ -0,0 +1,126 @@ + +#ifndef THREADING_SERIALIZATIONTYPES_H +#define THREADING_SERIALIZATIONTZPES_H + +#include "../RemoteSerializer.h" + +using namespace std; + +namespace threading { + +/** + * Definition of a log file, i.e., one column of a log stream. + */ +struct Field { + string name; //! Name of the field. + TypeTag type; //! Type of the field. + TypeTag subtype; //! Inner type for sets. + + /** + * Constructor. + */ + Field() { subtype = TYPE_VOID; } + + /** + * Copy constructor. + */ + Field(const Field& other) + : name(other.name), type(other.type), subtype(other.subtype) { } + + /** + * Unserializes a field. + * + * @param fmt The serialization format to use. The format handles + * low-level I/O. + * + * @return False if an error occured. + */ + bool Read(SerializationFormat* fmt); + + /** + * Serializes a field. + * + * @param fmt The serialization format to use. The format handles + * low-level I/O. + * + * @return False if an error occured. + */ + bool Write(SerializationFormat* fmt) const; +}; + +/** + * Definition of a log value, i.e., a entry logged by a stream. + * + * This struct essentialy represents a serialization of a Val instance (for + * those Vals supported). + */ +struct Value { + TypeTag type; //! The type of the value. + bool present; //! False for optional record fields that are not set. + + struct set_t { bro_int_t size; Value** vals; }; + typedef set_t vec_t; + + /** + * This union is a subset of BroValUnion, including only the types we + * can log directly. See IsCompatibleType(). + */ + union _val { + bro_int_t int_val; + bro_uint_t uint_val; + uint32 addr_val[NUM_ADDR_WORDS]; + subnet_type subnet_val; + double double_val; + string* string_val; + set_t set_val; + vec_t vector_val; + } val; + + /** + * Constructor. + * + * arg_type: The type of the value. + * + * arg_present: False if the value represents an optional record field + * that is not set. + */ + Value(TypeTag arg_type = TYPE_ERROR, bool arg_present = true) + : type(arg_type), present(arg_present) {} + + /** + * Destructor. + */ + ~Value(); + + /** + * Unserializes a value. + * + * @param fmt The serialization format to use. The format handles low-level I/O. + * + * @return False if an error occured. + */ + bool Read(SerializationFormat* fmt); + + /** + * Serializes a value. + * + * @param fmt The serialization format to use. The format handles + * low-level I/O. + * + * @return False if an error occured. + */ + bool Write(SerializationFormat* fmt) const; + + /** + * Returns true if the type can be represented by a Value. If + * `atomic_only` is true, will not permit composite types. + */ + static bool IsCompatibleType(BroType* t, bool atomic_only=false); + +private: + Value(const Value& other) { } // Disabled. +}; + +} + +#endif /* THREADING_SERIALIZATIONTZPES_H */ From 23b2c95644f3a48a3a154843041ae1b016f1dfc1 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 6 Feb 2012 10:57:07 -0800 Subject: [PATCH 085/651] backend does not need friend access to manager --- src/logging/WriterBackend.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 9ffa26d0c8..aa4448f5e2 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -282,8 +282,6 @@ protected: virtual bool DoHeartbeat(double network_time, double current_time); private: - friend class Manager; - /** * Deletes the values as passed into Write(). */ From f76bbf01a4e02fabd55ed38fba28e3acb7ab9036 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 6 Feb 2012 13:15:01 -0800 Subject: [PATCH 086/651] fix CreateBackend function - the way that the right backend was chosen & backends were initialized did not make sense... --- src/logging/Manager.cc | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index add10b3f10..e8d732d84a 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -142,12 +142,12 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) return 0; } - if ( ld->type == type ) - break; - - if ( ! ld->factory ) - // Oops, we can't instantiate this guy. - return 0; + if ( ld->type != type ) + { + // no, didn't find the right one... + ++ld; + continue; + } // If the writer has an init function, call it. if ( ld->init ) @@ -157,17 +157,24 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) // call it again later. ld->init = 0; else + { // Init failed, disable by deleting factory // function. ld->factory = 0; - DBG_LOG(DBG_LOGGING, "failed to init writer class %s", - ld->name); + DBG_LOG(DBG_LOGGING, "failed to init writer class %s", + ld->name); - return false; + return false; + } } - ++ld; + if ( ! ld->factory ) + // Oops, we can't instantiate this guy. + return 0; + + // all done. break. + break; } assert(ld->factory); From e22d396229343b4956d93b14ad9629cde4690dbf Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 6 Feb 2012 13:53:33 -0800 Subject: [PATCH 087/651] typo --- src/threading/SerializationTypes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/threading/SerializationTypes.h b/src/threading/SerializationTypes.h index 11ceda929c..8f36402637 100644 --- a/src/threading/SerializationTypes.h +++ b/src/threading/SerializationTypes.h @@ -1,6 +1,6 @@ #ifndef THREADING_SERIALIZATIONTYPES_H -#define THREADING_SERIALIZATIONTZPES_H +#define THREADING_SERIALIZATIONTYPES_H #include "../RemoteSerializer.h" From 833e7244000c1a1d13b67c9087ada12686bf4b98 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 6 Feb 2012 16:14:39 -0800 Subject: [PATCH 088/651] way less compile errors. --- src/input/Manager.cc | 191 +++++++++++++++------------- src/input/Manager.h | 6 +- src/input/ReaderBackend.cc | 95 +++++++++----- src/input/ReaderBackend.h | 23 ++-- src/input/ReaderFrontend.h | 54 ++++++++ src/input/readers/Ascii.h | 29 +++-- src/logging/Manager.cc | 2 +- src/threading/SerializationTypes.cc | 6 +- src/threading/SerializationTypes.h | 2 + 9 files changed, 257 insertions(+), 151 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 24bc464daf..a97286162d 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -3,18 +3,20 @@ #include #include "Manager.h" +#include "ReaderFrontend.h" +#include "ReaderBackend.h" +#include "readers/Ascii.h" + #include "Event.h" #include "EventHandler.h" #include "NetVar.h" #include "Net.h" -#include "InputReader.h" - -#include "InputReaderAscii.h" - #include "CompHash.h" +#include "../threading/SerializationTypes.h" + using namespace input; using threading::Value; using threading::Field; @@ -99,7 +101,7 @@ Manager::TableFilter::~TableFilter() { struct Manager::ReaderInfo { EnumVal* id; EnumVal* type; - InputReader* reader; + ReaderFrontend* reader; //list events; // events we fire when "something" happens map filters; // filters that can prevent our actions @@ -132,38 +134,27 @@ bool Manager::ReaderInfo::HasFilter(int id) { } -struct InputReaderDefinition { +struct ReaderDefinition { bro_int_t type; // the type const char *name; // descriptive name for error messages bool (*init)(); // optional one-time inifializing function - InputReader* (*factory)(); // factory function for creating instances + ReaderBackend* (*factory)(ReaderFrontend* frontend); // factory function for creating instances }; -InputReaderDefinition input_readers[] = { - { BifEnum::Input::READER_ASCII, "Ascii", 0, InputReaderAscii::Instantiate }, +ReaderDefinition input_readers[] = { + { BifEnum::Input::READER_ASCII, "Ascii", 0, reader::Ascii::Instantiate }, // End marker - { BifEnum::Input::READER_DEFAULT, "None", 0, (InputReader* (*)())0 } + { BifEnum::Input::READER_DEFAULT, "None", 0, (ReaderBackend* (*)(ReaderFrontend* frontend))0 } }; Manager::Manager() { } -// create a new input reader object to be used at whomevers leisure lateron. -InputReader* Manager::CreateStream(EnumVal* id, RecordVal* description) -{ - InputReaderDefinition* ir = input_readers; - - RecordType* rtype = description->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::StreamDescription, 0) ) - { - reporter->Error("Streamdescription argument not of right type"); - return 0; - } +ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) { + ReaderDefinition* ir = input_readers; - EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); - while ( true ) { if ( ir->type == BifEnum::Input::READER_DEFAULT ) { @@ -171,7 +162,7 @@ InputReader* Manager::CreateStream(EnumVal* id, RecordVal* description) return 0; } - if ( ir->type != reader->AsEnum() ) { + if ( ir->type != type ) { // no, didn't find the right one... ++ir; continue; @@ -201,9 +192,30 @@ InputReader* Manager::CreateStream(EnumVal* id, RecordVal* description) // all done. break. break; } - assert(ir->factory); - InputReader* reader_obj = (*ir->factory)(); + + ReaderBackend* backend = (*ir->factory)(frontend); + assert(backend); + + frontend->ty_name = ir->name; + return backend; +} + +// create a new input reader object to be used at whomevers leisure lateron. +ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) +{ + ReaderDefinition* ir = input_readers; + + RecordType* rtype = description->Type()->AsRecordType(); + if ( ! same_type(rtype, BifType::Record::Input::StreamDescription, 0) ) + { + reporter->Error("Streamdescription argument not of right type"); + return 0; + } + + EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); + + ReaderFrontend* reader_obj = new ReaderFrontend(id->AsEnum()); assert(reader_obj); // get the source... @@ -217,16 +229,16 @@ InputReader* Manager::CreateStream(EnumVal* id, RecordVal* description) readers.push_back(info); - int success = reader_obj->Init(source); - if ( success == false ) { + reader_obj->Init(source); + /* if ( success == false ) { assert( RemoveStream(id) ); return 0; - } - success = reader_obj->Update(); - if ( success == false ) { + } */ + reader_obj->Update(); + /* if ( success == false ) { assert ( RemoveStream(id) ); return 0; - } + } */ return reader_obj; @@ -306,7 +318,7 @@ bool Manager::AddEventFilter(EnumVal *id, RecordVal* fval) { } - vector fieldsV; // vector, because UnrollRecordType needs it + vector fieldsV; // vector, because UnrollRecordType needs it bool status = !UnrollRecordType(&fieldsV, fields, ""); @@ -316,7 +328,7 @@ bool Manager::AddEventFilter(EnumVal *id, RecordVal* fval) { } - LogField** logf = new LogField*[fieldsV.size()]; + Field** logf = new Field*[fieldsV.size()]; for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { logf[i] = fieldsV[i]; } @@ -410,7 +422,7 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { } - vector fieldsV; // vector, because we don't know the length beforehands + vector fieldsV; // vector, because we don't know the length beforehands bool status = !UnrollRecordType(&fieldsV, idx, ""); @@ -430,7 +442,7 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { } - LogField** fields = new LogField*[fieldsV.size()]; + Field** fields = new Field*[fieldsV.size()]; for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { fields[i] = fieldsV[i]; } @@ -538,12 +550,12 @@ bool Manager::RemoveStream(const EnumVal* id) { return true; } -bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { +bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { for ( int i = 0; i < rec->NumFields(); i++ ) { if ( !IsCompatibleType(rec->FieldType(i)) ) { - reporter->Error("Incompatible type \"%s\" in table definition for InputReader", type_name(rec->FieldType(i)->Tag())); + reporter->Error("Incompatible type \"%s\" in table definition for ReaderFrontend", type_name(rec->FieldType(i)->Tag())); return false; } @@ -557,7 +569,7 @@ bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, } } else { - LogField* field = new LogField(); + Field* field = new Field(); field->name = nameprepend + rec->FieldName(i); field->type = rec->FieldType(i)->Tag(); if ( field->type == TYPE_TABLE ) { @@ -591,7 +603,9 @@ bool Manager::ForceUpdate(const EnumVal* id) return false; } - return i->reader->Update(); + i->reader->Update(); + + return true; // update is async :( } bool Manager::RemoveTableFilter(EnumVal* id, const string &name) { @@ -638,21 +652,21 @@ bool Manager::RemoveEventFilter(EnumVal* id, const string &name) { return true; } -Val* Manager::LogValToIndexVal(int num_fields, const RecordType *type, const LogVal* const *vals) { +Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Value* const *vals) { Val* idxval; int position = 0; if ( num_fields == 1 && type->FieldType(0)->Tag() != TYPE_RECORD ) { - idxval = LogValToVal(vals[0], type->FieldType(0)); + idxval = ValueToVal(vals[0], type->FieldType(0)); position = 1; } else { ListVal *l = new ListVal(TYPE_ANY); for ( int j = 0 ; j < type->NumFields(); j++ ) { if ( type->FieldType(j)->Tag() == TYPE_RECORD ) { - l->Append(LogValToRecordVal(vals, type->FieldType(j)->AsRecordType(), &position)); + l->Append(ValueToRecordVal(vals, type->FieldType(j)->AsRecordType(), &position)); } else { - l->Append(LogValToVal(vals[position], type->FieldType(j))); + l->Append(ValueToVal(vals[position], type->FieldType(j))); position++; } } @@ -666,7 +680,7 @@ Val* Manager::LogValToIndexVal(int num_fields, const RecordType *type, const Log } -void Manager::SendEntry(const InputReader* reader, int id, const LogVal* const *vals) { +void Manager::SendEntry(const ReaderFrontend* reader, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -689,7 +703,7 @@ void Manager::SendEntry(const InputReader* reader, int id, const LogVal* const * } -void Manager::SendEntryTable(const InputReader* reader, int id, const LogVal* const *vals) { +void Manager::SendEntryTable(const ReaderFrontend* reader, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); bool updated = false; @@ -701,12 +715,12 @@ void Manager::SendEntryTable(const InputReader* reader, int id, const LogVal* co TableFilter* filter = (TableFilter*) i->filters[id]; //reporter->Error("Hashing %d index fields", i->num_idx_fields); - HashKey* idxhash = HashLogVals(filter->num_idx_fields, vals); + HashKey* idxhash = HashValues(filter->num_idx_fields, vals); //reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); //reporter->Error("Hashing %d val fields", i->num_val_fields); HashKey* valhash = 0; if ( filter->num_val_fields > 0 ) - HashLogVals(filter->num_val_fields, vals+filter->num_idx_fields); + HashValues(filter->num_val_fields, vals+filter->num_idx_fields); //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); @@ -731,16 +745,16 @@ void Manager::SendEntryTable(const InputReader* reader, int id, const LogVal* co } - Val* idxval = LogValToIndexVal(filter->num_idx_fields, filter->itype, vals); + Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; int position = filter->num_idx_fields; if ( filter->num_val_fields == 0 ) { valval = 0; } else if ( filter->num_val_fields == 1 && !filter->want_record ) { - valval = LogValToVal(vals[position], filter->rtype->FieldType(0)); + valval = ValueToVal(vals[position], filter->rtype->FieldType(0)); } else { - valval = LogValToRecordVal(vals, filter->rtype, &position); + valval = ValueToRecordVal(vals, filter->rtype, &position); } @@ -757,7 +771,7 @@ void Manager::SendEntryTable(const InputReader* reader, int id, const LogVal* co EnumVal* ev; //Ref(idxval); int startpos = 0; - Val* predidx = LogValToRecordVal(vals, filter->itype, &startpos); + Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); Ref(valval); if ( updated ) { @@ -831,7 +845,7 @@ void Manager::SendEntryTable(const InputReader* reader, int id, const LogVal* co } -void Manager::EndCurrentSend(const InputReader* reader, int id) { +void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -914,7 +928,7 @@ void Manager::EndCurrentSend(const InputReader* reader, int id) { filter->currDict = new PDict(InputHash); } -void Manager::Put(const InputReader* reader, int id, const LogVal* const *vals) { +void Manager::Put(const ReaderFrontend* reader, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -937,7 +951,7 @@ void Manager::Put(const InputReader* reader, int id, const LogVal* const *vals) } -void Manager::SendEventFilterEvent(const InputReader* reader, EnumVal* type, int id, const LogVal* const *vals) { +void Manager::SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); bool updated = false; @@ -956,15 +970,15 @@ void Manager::SendEventFilterEvent(const InputReader* reader, EnumVal* type, int int position = 0; if ( filter->want_record ) { - RecordVal * r = LogValToRecordVal(vals, filter->fields, &position); + RecordVal * r = ValueToRecordVal(vals, filter->fields, &position); out_vals.push_back(r); } else { for ( int j = 0; j < filter->fields->NumFields(); j++) { Val* val = 0; if ( filter->fields->FieldType(j)->Tag() == TYPE_RECORD ) { - val = LogValToRecordVal(vals, filter->fields->FieldType(j)->AsRecordType(), &position); + val = ValueToRecordVal(vals, filter->fields->FieldType(j)->AsRecordType(), &position); } else { - val = LogValToVal(vals[position], filter->fields->FieldType(j)); + val = ValueToVal(vals[position], filter->fields->FieldType(j)); position++; } out_vals.push_back(val); @@ -975,7 +989,7 @@ void Manager::SendEventFilterEvent(const InputReader* reader, EnumVal* type, int } -void Manager::PutTable(const InputReader* reader, int id, const LogVal* const *vals) { +void Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); assert(i); @@ -984,22 +998,22 @@ void Manager::PutTable(const InputReader* reader, int id, const LogVal* const *v assert(i->filters[id]->filter_type == TABLE_FILTER); TableFilter* filter = (TableFilter*) i->filters[id]; - Val* idxval = LogValToIndexVal(filter->num_idx_fields, filter->itype, vals); + Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; int position = filter->num_idx_fields; if ( filter->num_val_fields == 0 ) { valval = 0; } else if ( filter->num_val_fields == 1 && !filter->want_record ) { - valval = LogValToVal(vals[filter->num_idx_fields], filter->rtype->FieldType(filter->num_idx_fields)); + valval = ValueToVal(vals[filter->num_idx_fields], filter->rtype->FieldType(filter->num_idx_fields)); } else { - valval = LogValToRecordVal(vals, filter->rtype, &position); + valval = ValueToRecordVal(vals, filter->rtype, &position); } filter->tab->Assign(idxval, valval); } -void Manager::Clear(const InputReader* reader, int id) { +void Manager::Clear(const ReaderFrontend* reader, int id) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -1014,7 +1028,7 @@ void Manager::Clear(const InputReader* reader, int id) { filter->tab->RemoveAll(); } -bool Manager::Delete(const InputReader* reader, int id, const LogVal* const *vals) { +bool Manager::Delete(const ReaderFrontend* reader, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -1025,7 +1039,7 @@ bool Manager::Delete(const InputReader* reader, int id, const LogVal* const *val if ( i->filters[id]->filter_type == TABLE_FILTER ) { TableFilter* filter = (TableFilter*) i->filters[id]; - Val* idxval = LogValToIndexVal(filter->num_idx_fields, filter->itype, vals); + Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); return( filter->tab->Delete(idxval) != 0 ); } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); @@ -1037,12 +1051,12 @@ bool Manager::Delete(const InputReader* reader, int id, const LogVal* const *val } } -void Manager::Error(InputReader* reader, const char* msg) +void Manager::Error(ReaderFrontend* reader, const char* msg) { reporter->Error("error with input reader for %s: %s", reader->Source().c_str(), msg); } -bool Manager::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) +bool Manager::SendEvent(const string& name, const int num_vals, const Value* const *vals) { EventHandler* handler = event_registry->Lookup(name.c_str()); if ( handler == 0 ) { @@ -1059,7 +1073,7 @@ bool Manager::SendEvent(const string& name, const int num_vals, const LogVal* co val_list* vl = new val_list; for ( int i = 0; i < num_vals; i++) { - vl->append(LogValToVal(vals[i], type->FieldType(i))); + vl->append(ValueToVal(vals[i], type->FieldType(i))); } mgr.Dispatch(new Event(handler, vl)); @@ -1118,7 +1132,7 @@ RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, -RecordVal* Manager::LogValToRecordVal(const LogVal* const *vals, RecordType *request_type, int* position) { +RecordVal* Manager::ValueToRecordVal(const Value* const *vals, RecordType *request_type, int* position) { if ( position == 0 ) { reporter->InternalError("Need position"); return 0; @@ -1136,9 +1150,9 @@ RecordVal* Manager::LogValToRecordVal(const LogVal* const *vals, RecordType *req Val* fieldVal = 0; if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) { - fieldVal = LogValToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); + fieldVal = ValueToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); } else { - fieldVal = LogValToVal(vals[*position], request_type->FieldType(i)); + fieldVal = ValueToVal(vals[*position], request_type->FieldType(i)); (*position)++; } @@ -1150,7 +1164,7 @@ RecordVal* Manager::LogValToRecordVal(const LogVal* const *vals, RecordType *req } -int Manager::GetLogValLength(const LogVal* val) { +int Manager::GetValueLength(const Value* val) { int length = 0; switch (val->type) { @@ -1193,7 +1207,7 @@ int Manager::GetLogValLength(const LogVal* val) { case TYPE_TABLE: { for ( int i = 0; i < val->val.set_val.size; i++ ) { - length += GetLogValLength(val->val.set_val.vals[i]); + length += GetValueLength(val->val.set_val.vals[i]); } break; } @@ -1201,20 +1215,20 @@ int Manager::GetLogValLength(const LogVal* val) { case TYPE_VECTOR: { int j = val->val.vector_val.size; for ( int i = 0; i < j; i++ ) { - length += GetLogValLength(val->val.vector_val.vals[i]); + length += GetValueLength(val->val.vector_val.vals[i]); } break; } default: - reporter->InternalError("unsupported type %d for GetLogValLength", val->type); + reporter->InternalError("unsupported type %d for GetValueLength", val->type); } return length; } -int Manager::CopyLogVal(char *data, const int startpos, const LogVal* val) { +int Manager::CopyValue(char *data, const int startpos, const Value* val) { switch ( val->type ) { case TYPE_BOOL: case TYPE_INT: @@ -1276,7 +1290,7 @@ int Manager::CopyLogVal(char *data, const int startpos, const LogVal* val) { case TYPE_TABLE: { int length = 0; for ( int i = 0; i < val->val.set_val.size; i++ ) { - length += CopyLogVal(data, startpos+length, val->val.set_val.vals[i]); + length += CopyValue(data, startpos+length, val->val.set_val.vals[i]); } return length; break; @@ -1286,14 +1300,14 @@ int Manager::CopyLogVal(char *data, const int startpos, const LogVal* val) { int length = 0; int j = val->val.vector_val.size; for ( int i = 0; i < j; i++ ) { - length += CopyLogVal(data, startpos+length, val->val.vector_val.vals[i]); + length += CopyValue(data, startpos+length, val->val.vector_val.vals[i]); } return length; break; } default: - reporter->InternalError("unsupported type %d for CopyLogVal", val->type); + reporter->InternalError("unsupported type %d for CopyValue", val->type); return 0; } @@ -1302,12 +1316,12 @@ int Manager::CopyLogVal(char *data, const int startpos, const LogVal* val) { } -HashKey* Manager::HashLogVals(const int num_elements, const LogVal* const *vals) { +HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { int length = 0; for ( int i = 0; i < num_elements; i++ ) { - const LogVal* val = vals[i]; - length += GetLogValLength(val); + const Value* val = vals[i]; + length += GetValueLength(val); } //reporter->Error("Length: %d", length); @@ -1318,8 +1332,8 @@ HashKey* Manager::HashLogVals(const int num_elements, const LogVal* const *vals) reporter->InternalError("Could not malloc?"); } for ( int i = 0; i < num_elements; i++ ) { - const LogVal* val = vals[i]; - position += CopyLogVal(data, position, val); + const Value* val = vals[i]; + position += CopyValue(data, position, val); } assert(position == length); @@ -1328,7 +1342,7 @@ HashKey* Manager::HashLogVals(const int num_elements, const LogVal* const *vals) } -Val* Manager::LogValToVal(const LogVal* val, BroType* request_type) { +Val* Manager::ValueToVal(const Value* val, BroType* request_type) { if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) { reporter->InternalError("Typetags don't match: %d vs %d", request_type->Tag(), val->type); @@ -1384,7 +1398,7 @@ Val* Manager::LogValToVal(const LogVal* val, BroType* request_type) { SetType* s = new SetType(set_index, 0); TableVal* t = new TableVal(s); for ( int i = 0; i < val->val.set_val.size; i++ ) { - t->Assign(LogValToVal( val->val.set_val.vals[i], type ), 0); + t->Assign(ValueToVal( val->val.set_val.vals[i], type ), 0); } return t; break; @@ -1396,7 +1410,7 @@ Val* Manager::LogValToVal(const LogVal* val, BroType* request_type) { VectorType* vt = new VectorType(type->Ref()); VectorVal* v = new VectorVal(vt); for ( int i = 0; i < val->val.vector_val.size; i++ ) { - v->Assign(i, LogValToVal( val->val.set_val.vals[i], type ), 0); + v->Assign(i, ValueToVal( val->val.set_val.vals[i], type ), 0); } return v; @@ -1425,7 +1439,7 @@ Val* Manager::LogValToVal(const LogVal* val, BroType* request_type) { return NULL; } -Manager::ReaderInfo* Manager::FindReader(const InputReader* reader) +Manager::ReaderInfo* Manager::FindReader(const ReaderFrontend* reader) { for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) { @@ -1460,4 +1474,3 @@ string Manager::Hash(const string &input) { return out; } - diff --git a/src/input/Manager.h b/src/input/Manager.h index fe37efa08b..507af6468f 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -14,6 +14,7 @@ namespace input { class ReaderFrontend; +class ReaderBackend; class Manager { public: @@ -30,7 +31,8 @@ public: bool RemoveEventFilter(EnumVal* id, const string &name); protected: - + friend class ReaderFrontend; + // Reports an error for the given reader. void Error(ReaderFrontend* reader, const char* msg); @@ -42,6 +44,8 @@ protected: // for readers to write to input stream in indirect mode (manager is monitoring new/deleted values) void SendEntry(const ReaderFrontend* reader, int id, const threading::Value* const *vals); void EndCurrentSend(const ReaderFrontend* reader, int id); + + ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); private: struct ReaderInfo; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index b2bcedb2ad..8c996db4a1 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -1,13 +1,44 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "InputReader.h" +#include "ReaderBackend.h" +#include "ReaderFrontend.h" using threading::Value; using threading::Field; -namespace logging { +namespace input { -InputReader::InputReader(ReaderFrontend *arg_frontend) :MsgThread() +class ErrorMessage : public threading::OutputMessage { +public: + ErrorMessage(ReaderFrontend* reader, string message) + : threading::OutputMessage("Error", reader), + message(message) {} + + virtual bool Process() { + input_mgr->Error(object, message.c_str()); + return true; + } + +private: + string message; +} + +class PutMessage : public threading::OutputMessage { +public: + PutMessage(ReaderFrontend* reader, int id, const Value* const *val) + : threading::OutputMessage("Error", reader), + id(id), val(val) {} + + virtual bool Process() { + return input_mgr->Put(object, id, val); + } + +private: + int id; + Value* val; +} + +ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { buf = 0; buf_len = 1024; @@ -18,38 +49,47 @@ InputReader::InputReader(ReaderFrontend *arg_frontend) :MsgThread() SetName(frontend->Name()); } -InputReader::~InputReader() +ReaderBackend::~ReaderBackend() { } -void InputReader::Error(const char *msg) +void ReaderBackend::Error(const string &msg) { - input_mgr->Error(this, msg); + SendOut(new ErrorMessage(frontend, msg); } -void InputReader::Error(const string &msg) +void ReaderBackend::Put(int id, const Value* const *val) { - input_mgr->Error(this, msg.c_str()); + SendOut(new PutMessage(frontend, id, val); } -void InputReader::Put(int id, const LogVal* const *val) +void ReaderBackend::Delete(int id, const Value* const *val) { - input_mgr->Put(this, id, val); + SendOut(new DeleteMessage(frontend, id, val); } -void InputReader::Clear(int id) +void ReaderBackend::Clear(int id) { - input_mgr->Clear(this, id); + SendOut(new ClearMessage(frontend, id); } -void InputReader::Delete(int id, const LogVal* const *val) +bool ReaderBackend::SendEvent(const string& name, const int num_vals, const Value* const *vals) { - input_mgr->Delete(this, id, val); + SendOut(new SendEventMessage(frontend, name, num_vals, vals); +} + +void ReaderBackend::EndCurrentSend(int id) +{ + SendOut(new EndCurrentSendMessage(frontent, id); } +void ReaderBackend::SendEntry(int id, const Value* const *vals) +{ + SendOut(new SendEntryMessage(frontend, id, vals); +} -bool InputReader::Init(string arg_source) +bool ReaderBackend::Init(string arg_source) { source = arg_source; @@ -58,35 +98,31 @@ bool InputReader::Init(string arg_source) return !disabled; } -bool InputReader::AddFilter(int id, int arg_num_fields, - const LogField* const * arg_fields) +bool ReaderBackend::AddFilter(int id, int arg_num_fields, + const Field* const * arg_fields) { return DoAddFilter(id, arg_num_fields, arg_fields); } -bool InputReader::RemoveFilter(int id) +bool ReaderBackend::RemoveFilter(int id) { return DoRemoveFilter(id); } -void InputReader::Finish() +void ReaderBackend::Finish() { DoFinish(); disabled = true; } -bool InputReader::Update() +bool ReaderBackend::Update() { return DoUpdate(); } -bool InputReader::SendEvent(const string& name, const int num_vals, const LogVal* const *vals) -{ - return input_mgr->SendEvent(name, num_vals, vals); -} // stolen from logwriter -const char* InputReader::Fmt(const char* format, ...) +const char* ReaderBackend::Fmt(const char* format, ...) { if ( ! buf ) buf = (char*) malloc(buf_len); @@ -111,14 +147,5 @@ const char* InputReader::Fmt(const char* format, ...) } -void InputReader::SendEntry(int id, const LogVal* const *vals) -{ - input_mgr->SendEntry(this, id, vals); -} - -void InputReader::EndCurrentSend(int id) -{ - input_mgr->EndCurrentSend(this, id); -} } diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 7d2640b4fd..1fe44a09b2 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -1,26 +1,25 @@ // See the file "COPYING" in the main distribution directory for copyright. -// -// Same notes about thread safety as in LogWriter.h apply. - #ifndef INPUT_READERBACKEND_H #define INPUT_READERBACKEND_H -#include "InputMgr.h" #include "BroString.h" -#include "LogMgr.h" +#include "../threading/SerializationTypes.h" +#include "threading/MsgThread.h" namespace input { +class ReaderFrontend; + class ReaderBackend : public threading::MsgThread { public: - ReaderBackend(ReaderFrontend *frontend); + ReaderBackend(ReaderFrontend* frontend); virtual ~ReaderBackend(); bool Init(string arg_source); - bool AddFilter( int id, int arg_num_fields, const LogField* const* fields ); + bool AddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); bool RemoveFilter ( int id ); @@ -32,7 +31,7 @@ protected: // Methods that have to be overwritten by the individual readers virtual bool DoInit(string arg_sources) = 0; - virtual bool DoAddFilter( int id, int arg_num_fields, const LogField* const* fields ) = 0; + virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ) = 0; virtual bool DoRemoveFilter( int id ) = 0; @@ -51,15 +50,15 @@ protected: // A thread-safe version of fmt(). (stolen from logwriter) const char* Fmt(const char* format, ...); - bool SendEvent(const string& name, const int num_vals, const LogVal* const *vals); + bool SendEvent(const string& name, const int num_vals, const threading::Value* const *vals); // Content-sendinf-functions (simple mode). Including table-specific stuff that simply is not used if we have no table - void Put(int id, const LogVal* const *val); - void Delete(int id, const LogVal* const *val); + void Put(int id, const threading::Value* const *val); + void Delete(int id, const threading::Value* const *val); void Clear(int id); // Table-functions (tracking mode): Only changed lines are propagated. - void SendEntry(int id, const LogVal* const *vals); + void SendEntry(int id, const threading::Value* const *vals); void EndCurrentSend(int id); diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index e69de29bb2..984ba30794 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -0,0 +1,54 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUT_READERFRONTEND_H +#define INPUT_READERFRONTEND_H + +#include "Manager.h" + +#include "threading/MsgThread.h" + +namespace input { + +class ReaderBackend; + +class ReaderFrontend { +public: + ReaderFrontend(bro_int_t type); + + virtual ~ReaderFrontend(); + + void Init(string arg_source); + + void Update(); + + void AddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); + + void Finish(); + + /** + * Returns a descriptive name for the reader, including the type of + * the backend and the source used. + * + * This method is safe to call from any thread. + */ + string Name() const; + + +protected: + friend class Manager; + + const string Source() const { return source; } + + string ty_name; // Name of the backend type. Set by the manager. + +private: + string source; + +}; + +} + + +#endif /* INPUT_READERFRONTEND_H */ + + diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index 1747f983e4..a3bf5c21a6 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -1,13 +1,15 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef INPUTREADERASCII_H -#define INPUTREADERASCII_H +#ifndef INPUT_READERS_ASCII_H +#define INPUT_READERS_ASCII_H -#include "InputReader.h" -#include #include #include +#include "../ReaderBackend.h" + +namespace input { namespace reader { + // Description for input field mapping struct FieldMapping { string name; @@ -28,18 +30,18 @@ struct FieldMapping { }; -class InputReaderAscii : public InputReader { +class Ascii : public ReaderBackend { public: - InputReaderAscii(); - ~InputReaderAscii(); + Ascii(ReaderFrontend* frontend); + ~Ascii(); - static InputReader* Instantiate() { return new InputReaderAscii; } + static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Ascii(frontend); } protected: virtual bool DoInit(string path); - virtual bool DoAddFilter( int id, int arg_num_fields, const LogField* const* fields ); + virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); virtual bool DoRemoveFilter ( int id ); @@ -52,7 +54,7 @@ private: struct Filter { unsigned int num_fields; - const LogField* const * fields; // raw mapping + const threading::Field* const * fields; // raw mapping // map columns in the file to columns to send back to the manager vector columnMap; @@ -64,7 +66,7 @@ private: TransportProto StringToProto(const string &proto); bool ReadHeader(); - LogVal* EntryToVal(string s, FieldMapping type); + threading::Value* EntryToVal(string s, FieldMapping type); bool GetLine(string& str); @@ -85,4 +87,7 @@ private: }; -#endif /* INPUTREADERASCII_H */ +} +} + +#endif /* INPUT_READERS_ASCII_H */ diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index add10b3f10..65a55dee02 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -14,7 +14,7 @@ #include "writers/Ascii.h" #include "writers/None.h" -#include "threading/SerializationTypes.h" +#include "../threading/SerializationTypes.h" using namespace logging; using threading::Value; diff --git a/src/threading/SerializationTypes.cc b/src/threading/SerializationTypes.cc index f74de6ce57..dc5a1a14f9 100644 --- a/src/threading/SerializationTypes.cc +++ b/src/threading/SerializationTypes.cc @@ -12,7 +12,8 @@ bool Field::Read(SerializationFormat* fmt) int t; int st; - bool success = (fmt->Read(&name, "name") && fmt->Read(&t, "type") && fmt->Read(&st, "subtype") ); + bool success = (fmt->Read(&name, "name") && fmt->Read(&secondary_name, "secondary_name") && + fmt->Read(&t, "type") && fmt->Read(&st, "subtype") ); type = (TypeTag) t; subtype = (TypeTag) st; @@ -21,7 +22,8 @@ bool Field::Read(SerializationFormat* fmt) bool Field::Write(SerializationFormat* fmt) const { - return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); + return (fmt->Write(name, "name") && fmt->Write(secondary_name, "secondary_name") && fmt->Write((int)type, "type") && + fmt->Write((int)subtype, "subtype")); } Value::~Value() diff --git a/src/threading/SerializationTypes.h b/src/threading/SerializationTypes.h index 11ceda929c..ffcf774842 100644 --- a/src/threading/SerializationTypes.h +++ b/src/threading/SerializationTypes.h @@ -13,6 +13,8 @@ namespace threading { */ struct Field { string name; //! Name of the field. + // needed by input framework. port fields have two names (one for the port, one for the type) - this specifies the secondary name. + string secondary_name; TypeTag type; //! Type of the field. TypeTag subtype; //! Inner type for sets. From 8385d5bb2dedab25da82b67213a6051bd52e1746 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 6 Feb 2012 17:37:02 -0800 Subject: [PATCH 089/651] it compiles :) But that's all, not tested, don't expect it to do anything but crash. --- src/input/Manager.cc | 4 +- src/input/Manager.h | 17 +++-- src/input/ReaderBackend.cc | 114 ++++++++++++++++++++++++++++----- src/input/ReaderBackend.h | 4 +- src/input/ReaderFrontend.cc | 121 +++++++++++++++++++++++++++++++----- src/input/ReaderFrontend.h | 17 ++--- src/input/readers/Ascii.cc | 48 +++++++------- 7 files changed, 258 insertions(+), 67 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index a97286162d..189a034b0f 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -680,7 +680,7 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu } -void Manager::SendEntry(const ReaderFrontend* reader, int id, const Value* const *vals) { +void Manager::SendEntry(const ReaderFrontend* reader, const int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -703,7 +703,7 @@ void Manager::SendEntry(const ReaderFrontend* reader, int id, const Value* const } -void Manager::SendEntryTable(const ReaderFrontend* reader, int id, const Value* const *vals) { +void Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); bool updated = false; diff --git a/src/input/Manager.h b/src/input/Manager.h index 507af6468f..a0b98294ca 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -14,7 +14,7 @@ namespace input { class ReaderFrontend; -class ReaderBackend; +class ReaderBackend; class Manager { public: @@ -32,6 +32,13 @@ public: protected: friend class ReaderFrontend; + friend class ErrorMessage; + friend class PutMessage; + friend class DeleteMessage; + friend class ClearMessage; + friend class SendEventMessage; + friend class SendEntryMessage; + friend class EndCurrentSendMessage; // Reports an error for the given reader. void Error(ReaderFrontend* reader, const char* msg); @@ -42,11 +49,14 @@ protected: bool Delete(const ReaderFrontend* reader, int id, const threading::Value* const *vals); // for readers to write to input stream in indirect mode (manager is monitoring new/deleted values) - void SendEntry(const ReaderFrontend* reader, int id, const threading::Value* const *vals); - void EndCurrentSend(const ReaderFrontend* reader, int id); + void SendEntry(const ReaderFrontend* reader, const int id, const threading::Value* const *vals); + void EndCurrentSend(const ReaderFrontend* reader, const int id); + + bool SendEvent(const string& name, const int num_vals, const threading::Value* const *vals); ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); + private: struct ReaderInfo; @@ -60,7 +70,6 @@ private: void SendEvent(EventHandlerPtr ev, const int numvals, ...); void SendEvent(EventHandlerPtr ev, list events); - bool SendEvent(const string& name, const int num_vals, const threading::Value* const *vals); HashKey* HashValues(const int num_elements, const threading::Value* const *vals); int GetValueLength(const threading::Value* val); diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 8c996db4a1..72c8f95d8e 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -2,6 +2,7 @@ #include "ReaderBackend.h" #include "ReaderFrontend.h" +#include "Manager.h" using threading::Value; using threading::Field; @@ -15,28 +16,106 @@ public: message(message) {} virtual bool Process() { - input_mgr->Error(object, message.c_str()); + input_mgr->Error(Object(), message.c_str()); return true; } private: string message; -} +}; class PutMessage : public threading::OutputMessage { public: PutMessage(ReaderFrontend* reader, int id, const Value* const *val) - : threading::OutputMessage("Error", reader), + : threading::OutputMessage("Put", reader), id(id), val(val) {} virtual bool Process() { - return input_mgr->Put(object, id, val); + input_mgr->Put(Object(), id, val); + return true; } private: int id; - Value* val; -} + const Value* const *val; +}; + +class DeleteMessage : public threading::OutputMessage { +public: + DeleteMessage(ReaderFrontend* reader, int id, const Value* const *val) + : threading::OutputMessage("Delete", reader), + id(id), val(val) {} + + virtual bool Process() { + return input_mgr->Delete(Object(), id, val); + } + +private: + int id; + const Value* const *val; +}; + +class ClearMessage : public threading::OutputMessage { +public: + ClearMessage(ReaderFrontend* reader, int id) + : threading::OutputMessage("Clear", reader), + id(id) {} + + virtual bool Process() { + input_mgr->Clear(Object(), id); + return true; + } + +private: + int id; +}; + +class SendEventMessage : public threading::OutputMessage { +public: + SendEventMessage(ReaderFrontend* reader, const string& name, const int num_vals, const Value* const *val) + : threading::OutputMessage("SendEvent", reader), + name(name), num_vals(num_vals), val(val) {} + + virtual bool Process() { + return input_mgr->SendEvent(name, num_vals, val); + } + +private: + const string name; + const int num_vals; + const Value* const *val; +}; + +class SendEntryMessage : public threading::OutputMessage { +public: + SendEntryMessage(ReaderFrontend* reader, const int id, const Value* const *val) + : threading::OutputMessage("SendEntry", reader), + id(id), val(val) {} + + virtual bool Process() { + input_mgr->SendEntry(Object(), id, val); + return true; + } + +private: + const int id; + const Value* const *val; +}; + +class EndCurrentSendMessage : public threading::OutputMessage { +public: + EndCurrentSendMessage(ReaderFrontend* reader, int id) + : threading::OutputMessage("SendEntry", reader), + id(id) {} + + virtual bool Process() { + input_mgr->EndCurrentSend(Object(), id); + return true; + } + +private: + int id; +}; ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { @@ -56,37 +135,44 @@ ReaderBackend::~ReaderBackend() void ReaderBackend::Error(const string &msg) { - SendOut(new ErrorMessage(frontend, msg); + SendOut(new ErrorMessage(frontend, msg)); } +/* +void ReaderBackend::Error(const char *msg) +{ + SendOut(new ErrorMessage(frontend, string(msg))); +} */ + + void ReaderBackend::Put(int id, const Value* const *val) { - SendOut(new PutMessage(frontend, id, val); + SendOut(new PutMessage(frontend, id, val)); } void ReaderBackend::Delete(int id, const Value* const *val) { - SendOut(new DeleteMessage(frontend, id, val); + SendOut(new DeleteMessage(frontend, id, val)); } void ReaderBackend::Clear(int id) { - SendOut(new ClearMessage(frontend, id); + SendOut(new ClearMessage(frontend, id)); } -bool ReaderBackend::SendEvent(const string& name, const int num_vals, const Value* const *vals) +void ReaderBackend::SendEvent(const string& name, const int num_vals, const Value* const *vals) { - SendOut(new SendEventMessage(frontend, name, num_vals, vals); + SendOut(new SendEventMessage(frontend, name, num_vals, vals)); } void ReaderBackend::EndCurrentSend(int id) { - SendOut(new EndCurrentSendMessage(frontent, id); + SendOut(new EndCurrentSendMessage(frontend, id)); } void ReaderBackend::SendEntry(int id, const Value* const *vals) { - SendOut(new SendEntryMessage(frontend, id, vals); + SendOut(new SendEntryMessage(frontend, id, vals)); } bool ReaderBackend::Init(string arg_source) diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 1fe44a09b2..a37daaf4b6 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -42,7 +42,7 @@ protected: // Reports an error to the user. void Error(const string &msg); - void Error(const char *msg); + //void Error(const char *msg); // The following methods return the information as passed to Init(). const string Source() const { return source; } @@ -50,7 +50,7 @@ protected: // A thread-safe version of fmt(). (stolen from logwriter) const char* Fmt(const char* format, ...); - bool SendEvent(const string& name, const int num_vals, const threading::Value* const *vals); + void SendEvent(const string& name, const int num_vals, const threading::Value* const *vals); // Content-sendinf-functions (simple mode). Including table-specific stuff that simply is not used if we have no table void Put(int id, const threading::Value* const *val); diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index 44638d90b3..a7f9a4d2f6 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -1,28 +1,117 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef INPUT_READERFRONTEND_H -#define INPUT_READERFRONTEND_H - #include "Manager.h" - +#include "ReaderFrontend.h" +#include "ReaderBackend.h" #include "threading/MsgThread.h" -namespace logging { - -class ReaderBackend; - -class ReaderFrontend { - - ReaderFrontend(bro_int_t type); - - virtual ~ReaderFrontend(); +namespace input { -protected: - friend class Manager; +class InitMessage : public threading::InputMessage +{ +public: + InitMessage(ReaderBackend* backend, const string source) + : threading::InputMessage("Init", backend), + source(source) { } + + virtual bool Process() { return Object()->Init(source); } + +private: + const string source; }; +class UpdateMessage : public threading::InputMessage +{ +public: + UpdateMessage(ReaderBackend* backend) + : threading::InputMessage("Update", backend) + { } + + virtual bool Process() { return Object()->Update(); } +}; + +class FinishMessage : public threading::InputMessage +{ +public: + FinishMessage(ReaderBackend* backend) + : threading::InputMessage("Finish", backend) + { } + + virtual bool Process() { Object()->Finish(); return true; } +}; + +class AddFilterMessage : public threading::InputMessage +{ +public: + AddFilterMessage(ReaderBackend* backend, const int id, const int num_fields, const threading::Field* const* fields) + : threading::InputMessage("AddFilter", backend), + id(id), num_fields(num_fields), fields(fields) { } + + virtual bool Process() { return Object()->AddFilter(id, num_fields, fields); } + +private: + const int id; + const int num_fields; + const threading::Field* const* fields; +}; + +ReaderFrontend::ReaderFrontend(bro_int_t type) { + disabled = initialized = false; + ty_name = ""; + backend = input_mgr->CreateBackend(this, type); + + assert(backend); + backend->Start(); +} + +ReaderFrontend::~ReaderFrontend() { +} + +void ReaderFrontend::Init(string arg_source) { + if ( disabled ) + return; + + if ( initialized ) + reporter->InternalError("writer initialize twice"); + + source = arg_source; + initialized = true; + + backend->SendIn(new InitMessage(backend, arg_source)); +} + +void ReaderFrontend::Update() { + if ( disabled ) + return; + + backend->SendIn(new UpdateMessage(backend)); +} + +void ReaderFrontend::Finish() { + if ( disabled ) + return; + + backend->SendIn(new FinishMessage(backend)); +} + +void ReaderFrontend::AddFilter(const int id, const int arg_num_fields, const threading::Field* const* fields) { + if ( disabled ) + return; + + backend->SendIn(new AddFilterMessage(backend, id, arg_num_fields, fields)); +} + +string ReaderFrontend::Name() const + { + if ( source.size() ) + return ty_name; + + return ty_name + "/" + source; + } + + + } -#endif /* INPUT_READERFRONTEND_H */ diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 984ba30794..876082d9a6 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -3,13 +3,12 @@ #ifndef INPUT_READERFRONTEND_H #define INPUT_READERFRONTEND_H -#include "Manager.h" +#include "../threading/MsgThread.h" +#include "../threading/SerializationTypes.h" -#include "threading/MsgThread.h" +namespace input { -namespace input { - -class ReaderBackend; +class Manager; class ReaderFrontend { public: @@ -21,7 +20,7 @@ public: void Update(); - void AddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); + void AddFilter( const int id, const int arg_num_fields, const threading::Field* const* fields ); void Finish(); @@ -32,17 +31,19 @@ public: * This method is safe to call from any thread. */ string Name() const; - protected: friend class Manager; - const string Source() const { return source; } + const string Source() const { return source; }; string ty_name; // Name of the backend type. Set by the manager. private: + ReaderBackend* backend; // The backend we have instanatiated. string source; + bool disabled; // True if disabled. + bool initialized; // True if initialized. }; diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 257cb4cf71..e798f69a36 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -1,11 +1,17 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "InputReaderAscii.h" -#include "DebugLogger.h" +#include "Ascii.h" #include "NetVar.h" +#include #include +#include "../../threading/SerializationTypes.h" + +using namespace input::reader; +using threading::Value; +using threading::Field; + FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position) : name(arg_name), type(arg_type) { @@ -31,7 +37,7 @@ FieldMapping FieldMapping::subType() { return FieldMapping(name, subtype, position); } -InputReaderAscii::InputReaderAscii() +Ascii::Ascii(ReaderFrontend *frontend) : ReaderBackend(frontend) { file = 0; @@ -53,13 +59,13 @@ InputReaderAscii::InputReaderAscii() } -InputReaderAscii::~InputReaderAscii() +Ascii::~Ascii() { DoFinish(); } -void InputReaderAscii::DoFinish() +void Ascii::DoFinish() { filters.empty(); if ( file != 0 ) { @@ -69,7 +75,7 @@ void InputReaderAscii::DoFinish() } } -bool InputReaderAscii::DoInit(string path) +bool Ascii::DoInit(string path) { fname = path; @@ -82,7 +88,7 @@ bool InputReaderAscii::DoInit(string path) return true; } -bool InputReaderAscii::DoAddFilter( int id, int arg_num_fields, const LogField* const* fields ) { +bool Ascii::DoAddFilter( int id, int arg_num_fields, const Field* const* fields ) { if ( HasFilter(id) ) { return false; // no, we don't want to add this a second time } @@ -96,7 +102,7 @@ bool InputReaderAscii::DoAddFilter( int id, int arg_num_fields, const LogField* return true; } -bool InputReaderAscii::DoRemoveFilter ( int id ) { +bool Ascii::DoRemoveFilter ( int id ) { if (!HasFilter(id) ) { return false; } @@ -107,7 +113,7 @@ bool InputReaderAscii::DoRemoveFilter ( int id ) { } -bool InputReaderAscii::HasFilter(int id) { +bool Ascii::HasFilter(int id) { map::iterator it = filters.find(id); if ( it == filters.end() ) { return false; @@ -116,7 +122,7 @@ bool InputReaderAscii::HasFilter(int id) { } -bool InputReaderAscii::ReadHeader() { +bool Ascii::ReadHeader() { // try to read the header line... string line; if ( !GetLine(line) ) { @@ -142,7 +148,7 @@ bool InputReaderAscii::ReadHeader() { for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { - const LogField* field = (*it).second.fields[i]; + const Field* field = (*it).second.fields[i]; map::iterator fit = fields.find(field->name); if ( fit == fields.end() ) { @@ -169,7 +175,7 @@ bool InputReaderAscii::ReadHeader() { return true; } -bool InputReaderAscii::GetLine(string& str) { +bool Ascii::GetLine(string& str) { while ( getline(*file, str) ) { if ( str[0] != '#' ) { return true; @@ -184,7 +190,7 @@ bool InputReaderAscii::GetLine(string& str) { return false; } -TransportProto InputReaderAscii::StringToProto(const string &proto) { +TransportProto Ascii::StringToProto(const string &proto) { if ( proto == "unknown" ) { return TRANSPORT_UNKNOWN; } else if ( proto == "tcp" ) { @@ -202,12 +208,12 @@ TransportProto InputReaderAscii::StringToProto(const string &proto) { return TRANSPORT_UNKNOWN; } -LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { +Value* Ascii::EntryToVal(string s, FieldMapping field) { - LogVal* val = new LogVal(field.type, true); + Value* val = new Value(field.type, true); if ( s.compare(unset_field) == 0 ) { // field is not set... - return new LogVal(field.type, false); + return new Value(field.type, false); } switch ( field.type ) { @@ -306,7 +312,7 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { if ( s.compare(empty_field) == 0 ) length = 0; - LogVal** lvals = new LogVal* [length]; + Value** lvals = new Value* [length]; if ( field.type == TYPE_TABLE ) { val->val.set_val.vals = lvals; @@ -333,7 +339,7 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { break; } - LogVal* newval = EntryToVal(element, field.subType()); + Value* newval = EntryToVal(element, field.subType()); if ( newval == 0 ) { Error("Error while reading set"); return 0; @@ -365,7 +371,7 @@ LogVal* InputReaderAscii::EntryToVal(string s, FieldMapping field) { } // read the entire file and send appropriate thingies back to InputMgr -bool InputReaderAscii::DoUpdate() { +bool Ascii::DoUpdate() { // dirty, fix me. (well, apparently after trying seeking, etc - this is not that bad) if ( file && file->is_open() ) { @@ -405,7 +411,7 @@ bool InputReaderAscii::DoUpdate() { for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - LogVal** fields = new LogVal*[(*it).second.num_fields]; + Value** fields = new Value*[(*it).second.num_fields]; int fpos = 0; for ( vector::iterator fit = (*it).second.columnMap.begin(); @@ -417,7 +423,7 @@ bool InputReaderAscii::DoUpdate() { return false; } - LogVal* val = EntryToVal(stringfields[(*fit).position], *fit); + Value* val = EntryToVal(stringfields[(*fit).position], *fit); if ( val == 0 ) { return false; } From b8ec653ebf31ffc7c2dffa02614f93b91bb45367 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sun, 12 Feb 2012 09:41:43 -0800 Subject: [PATCH 090/651] Bugfixes. - Data queued at termination wasn't written out completely. - Fixed some race conditions. - Fixing IOSource integration. - Fixing setting thread names on Linux. - Fixing minor leaks. All tests now pass for me on Linux in debug and non-debug compiles. Remaining TODOs: - Needs leak check. - Test on MacOS and FreeBSD. - More testing: - High volume traffic. - Different platforms. --- src/logging/Manager.cc | 9 +++++++++ src/logging/Manager.h | 8 +++++++- src/logging/WriterBackend.cc | 8 ++++++-- src/logging/WriterFrontend.cc | 6 ++++-- src/logging/WriterFrontend.h | 3 --- src/logging/writers/Ascii.cc | 24 ++++++++++++++++++++++-- src/main.cc | 1 + src/threading/BasicThread.cc | 11 +++++++++-- src/threading/BasicThread.h | 2 +- src/threading/Manager.cc | 30 +++++++++++++++++++----------- src/threading/MsgThread.cc | 21 +++++++++++++++++---- src/threading/MsgThread.h | 7 +++++-- 12 files changed, 100 insertions(+), 30 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 6d53ea363f..593766e52a 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -1470,6 +1470,15 @@ bool Manager::Flush(EnumVal* id) return true; } +void Manager::Terminate() + { + for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) + { + if ( *s ) + Flush((*s)->id); + } + } + void Manager::Error(WriterFrontend* writer, const char* msg) { reporter->Error("error with writer for %s: %s", diff --git a/src/logging/Manager.h b/src/logging/Manager.h index f6829b3554..d12fc7e8fe 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -251,6 +251,12 @@ public: */ bool Flush(EnumVal* id); + /** + * Prepares the log manager to terminate. This will flush all log + * stream. + */ + void Terminate(); + protected: friend class WriterFrontend; friend class RotationFinishedMessage; @@ -258,7 +264,7 @@ protected: friend class ::RotationTimer; // Instantiates a new WriterBackend of the given type (note that - // doing so creates a new thread!). + // doing so creates a new thread!). WriterBackend* CreateBackend(WriterFrontend* frontend, bro_int_t type); //// Function also used by the RemoteSerializer. diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 4d2e497b14..fa86fce324 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -57,7 +57,7 @@ using namespace logging; WriterBackend::WriterBackend(WriterFrontend* arg_frontend) : MsgThread() { - path = ""; + path = ""; num_fields = 0; fields = 0; buffering = true; @@ -109,7 +109,9 @@ bool WriterBackend::Init(string arg_path, int arg_num_fields, const Field* const num_fields = arg_num_fields; fields = arg_fields; - SetName(frontend->Name()); + string name = Fmt("%s/%s", path.c_str(), frontend->Name().c_str()); + + SetName(name); if ( ! DoInit(arg_path, arg_num_fields, arg_fields) ) { @@ -229,6 +231,8 @@ bool WriterBackend::Finish() bool WriterBackend::DoHeartbeat(double network_time, double current_time) { + MsgThread::DoHeartbeat(network_time, current_time); + SendOut(new FlushWriteBufferMessage(frontend)); return true; diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 79278870f9..0a8ff4b09d 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -1,4 +1,6 @@ +#include "Net.h" + #include "WriterFrontend.h" #include "WriterBackend.h" @@ -155,8 +157,8 @@ void WriterFrontend::Write(int num_fields, Value** vals) write_buffer[write_buffer_pos++] = vals; - if ( write_buffer_pos >= WRITER_BUFFER_SIZE || ! buf ) - // Buffer full (or no bufferin desired). + if ( write_buffer_pos >= WRITER_BUFFER_SIZE || ! buf || terminating ) + // Buffer full (or no bufferin desired or termiating). FlushWriteBuffer(); } diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index e0bc590dfc..a1a1e2b86a 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -30,9 +30,6 @@ public: * frontend will internally instantiate a WriterBackend of the * corresponding type. * - * name: A descriptive name for the backend wroter type (e.g., \c - * Ascii). - * * Frontends must only be instantiated by the main thread. */ WriterFrontend(bro_int_t type); diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 7cc8459e68..5429bf0b97 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -171,14 +171,34 @@ bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) break; case TYPE_SUBNET: - desc->Add(dotted_addr(val->val.subnet_val.net)); + { + // FIXME: This will be replaced with string(addr) once the + // IPV6 branch is merged in. + uint32_t addr = ntohl(val->val.subnet_val.net); + char buf[32]; + snprintf(buf, sizeof(buf), "%d.%d.%d.%d", + addr >> 24, (addr >> 16) & 0xff, + (addr >> 8) & 0xff, addr & 0xff); + + desc->Add(buf); desc->Add("/"); desc->Add(val->val.subnet_val.width); break; + } case TYPE_ADDR: - desc->Add(dotted_addr(val->val.addr_val)); + { + // FIXME: This will be replaced with string(addr) once the + // IPV6 branch is merged in. + uint32_t addr = ntohl(*val->val.addr_val); + char buf[32]; + snprintf(buf, sizeof(buf), "%d.%d.%d.%d", + addr >> 24, (addr >> 16) & 0xff, + (addr >> 8) & 0xff, addr & 0xff); + + desc->Add(buf); break; + } case TYPE_TIME: case TYPE_INTERVAL: diff --git a/src/main.cc b/src/main.cc index e224910db4..c101e54e74 100644 --- a/src/main.cc +++ b/src/main.cc @@ -290,6 +290,7 @@ void terminate_bro() if ( remote_serializer ) remote_serializer->LogStats(); + log_mgr->Terminate(); thread_mgr->Terminate(); delete timer_mgr; diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 4d51c3c4e4..51c4f7a3bc 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -2,9 +2,14 @@ #include #include +#include "config.h" #include "BasicThread.h" #include "Manager.h" +#ifdef HAVE_LINUX +#include +#endif + using namespace threading; uint64_t BasicThread::thread_counter = 0; @@ -25,6 +30,8 @@ BasicThread::BasicThread() BasicThread::~BasicThread() { + if ( buf ) + free(buf); } void BasicThread::SetName(const string& arg_name) @@ -35,8 +42,8 @@ void BasicThread::SetName(const string& arg_name) void BasicThread::SetOSName(const string& name) { -#ifdef LINUX - pthread_setname_np(pthread_self(), name.c_str()); +#ifdef HAVE_LINUX + prctl(PR_SET_NAME, name.c_str(), 0, 0, 0); #endif #ifdef __APPLE__ diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h index 6d2f739620..cc87ae03bc 100644 --- a/src/threading/BasicThread.h +++ b/src/threading/BasicThread.h @@ -82,7 +82,7 @@ public: void Stop(); /** - * Returns true if Terminate() has been called. + * Returns true if Stop() has been called. * * This method is safe to call from any thread. */ diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index d07311bbe8..d008d2e5e8 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -7,9 +7,10 @@ Manager::Manager() { DBG_LOG(DBG_THREADING, "Creating thread manager ..."); - did_process = false; + did_process = true; next_beat = 0; terminating = false; + idle = false; } Manager::~Manager() @@ -41,6 +42,7 @@ void Manager::Terminate() all_threads.clear(); msg_threads.clear(); + idle = true; terminating = false; } @@ -70,18 +72,22 @@ void Manager::GetFds(int* read, int* write, int* except) double Manager::NextTimestamp(double* network_time) { - if ( did_process || ! next_beat == 0 ) - // If we had something to process last time (or haven't had a - // chance to check yet), we want to check for more asap. + if ( ::network_time && ! next_beat ) + next_beat = ::network_time + HEART_BEAT_INTERVAL; + +// fprintf(stderr, "N %.6f %.6f did_process=%d next_next=%.6f\n", ::network_time, timer_mgr->Time(), (int)did_process, next_beat); + + if ( did_process || ::network_time > next_beat ) + // If we had something to process last time (or out heartbeat + // is due), we want to check for more asap. return timer_mgr->Time(); - // Else we assume we don't have much to do at all and wait for the next heart beat. - return next_beat; + return -1.0; } void Manager::Process() { - bool do_beat = (next_beat == 0 || network_time >= next_beat); + bool do_beat = (next_beat && network_time > next_beat); did_process = false; @@ -90,14 +96,17 @@ void Manager::Process() MsgThread* t = *i; if ( do_beat ) + { t->Heartbeat(); + next_beat = 0; + } if ( ! t->HasOut() ) continue; Message* msg = t->RetrieveOut(); - if ( msg->Process() ) + if ( msg->Process() && network_time ) did_process = true; else @@ -110,15 +119,14 @@ void Manager::Process() delete msg; } - if ( do_beat ) - next_beat = network_time + HEART_BEAT_INTERVAL; +// fprintf(stderr, "P %.6f %.6f do_beat=%d did_process=%d next_next=%.6f\n", network_time, timer_mgr->Time(), do_beat, (int)did_process, next_beat); } const threading::Manager::msg_stats_list& threading::Manager::GetMsgThreadStats() { stats.clear(); - for ( msg_thread_list::iterator i = msg_threads.begin(); i != msg_threads.end(); i++ ) + for ( msg_thread_list::iterator i = msg_threads.begin(); i != msg_threads.end(); i++ ) { MsgThread* t = *i; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index f41b20ddf9..b7782b9a05 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -142,12 +142,19 @@ void MsgThread::OnStop() void MsgThread::Heartbeat() { SendIn(new HeartbeatMessage(this, network_time, current_time())); + } - string name = Fmt("%s (%d/%d)", name.c_str(), - cnt_sent_in - queue_in.Size(), - cnt_sent_out - queue_out.Size()); +bool MsgThread::DoHeartbeat(double network_time, double current_time) + { + string n = Name(); - SetOSName(name.c_str()); + n = Fmt("bro: %s (%" PRIu64 "/%" PRIu64 ")", n.c_str(), + cnt_sent_in - queue_in.Size(), + cnt_sent_out - queue_out.Size()); + + SetOSName(n.c_str()); + + return true; } void MsgThread::Info(const char* msg) @@ -197,7 +204,10 @@ void MsgThread::Debug(DebugStream stream, const char* msg) void MsgThread::SendIn(BasicInputMessage* msg, bool force) { if ( Terminating() && ! force ) + { + delete msg; return; + } DBG_LOG(DBG_THREADING, "Sending '%s' to %s ...", msg->Name().c_str(), Name().c_str()); @@ -209,7 +219,10 @@ void MsgThread::SendIn(BasicInputMessage* msg, bool force) void MsgThread::SendOut(BasicOutputMessage* msg, bool force) { if ( Terminating() && ! force ) + { + delete msg; return; + } queue_out.Put(msg); diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 459ac6c603..28c7690dfa 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -184,7 +184,10 @@ protected: * This is method is called regularly by the threading::Manager. * * Can be overriden in derived classed to hook into the heart beat, - * but must call the parent implementation. + * but must call the parent implementation. Note that this method is + * always called by the main thread and must not access data of the + * child thread directly. See DoHeartbeat() if you want to do + * something on the child-side. */ virtual void Heartbeat(); @@ -206,7 +209,7 @@ protected: * current_time: Wall clock when the heartbeat was trigger by the * main thread. */ - virtual bool DoHeartbeat(double network_time, double current_time) { return true; } + virtual bool DoHeartbeat(double network_time, double current_time); private: /** From 7fcb7b5f17a966b6f384f7c34aa234fb60a45483 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sun, 12 Feb 2012 13:04:47 -0800 Subject: [PATCH 091/651] Save CPU when idle. This needs a bit more testing. It may also with the general problem of high CPU usage with low traffic. --- src/Net.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Net.cc b/src/Net.cc index 2d8ee85353..d2b505544e 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -454,6 +454,7 @@ void net_run() // date on timers and events. network_time = ct; expire_timers(); + usleep(1); // Just yield. } } From 88233efb2c8b6700dd2ec0a1f6addc65a282b622 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 13 Feb 2012 22:29:55 -0800 Subject: [PATCH 092/651] It works. Even including all unit tests. But: there are still a few places where I am sure that there are race conditions & memory leaks & I do not really like the current interface & I have to add a few more messages between the front and backend. But - it works :) --- scripts/base/frameworks/input/main.bro | 2 + src/input/Manager.cc | 78 ++++++++++++++----- src/input/Manager.h | 14 ++-- src/input/ReaderBackend.cc | 28 +++---- src/input/ReaderBackend.h | 8 +- src/input/ReaderFrontend.cc | 1 + src/input/readers/Ascii.cc | 4 +- src/main.cc | 1 + src/threading/Manager.cc | 2 +- src/threading/MsgThread.cc | 2 +- .../scripts/base/frameworks/input/basic.bro | 5 +- .../frameworks/input/onecolumn-norecord.bro | 4 + .../frameworks/input/onecolumn-record.bro | 4 + .../scripts/base/frameworks/input/port.bro | 7 ++ .../base/frameworks/input/predicate.bro | 3 + .../base/frameworks/input/twofilters.bro | 13 +++- 16 files changed, 127 insertions(+), 49 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index c76eba80b9..cac1aca54a 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -114,6 +114,8 @@ export { ## filter: the `TableFilter` record describing the filter. global read_table: function(description: Input::StreamDescription, filter: Input::TableFilter) : bool; + global update_finished: event(id: Input::ID); + } @load base/input.bif diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 189a034b0f..79d42fe71f 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -215,7 +215,7 @@ ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); - ReaderFrontend* reader_obj = new ReaderFrontend(id->AsEnum()); + ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); assert(reader_obj); // get the source... @@ -680,7 +680,7 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu } -void Manager::SendEntry(const ReaderFrontend* reader, const int id, const Value* const *vals) { +void Manager::SendEntry(const ReaderFrontend* reader, const int id, Value* *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -692,18 +692,25 @@ void Manager::SendEntry(const ReaderFrontend* reader, const int id, const Value* return; } + int readFields; if ( i->filters[id]->filter_type == TABLE_FILTER ) { - SendEntryTable(reader, id, vals); + readFields = SendEntryTable(reader, id, vals); } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - SendEventFilterEvent(reader, type, id, vals); + readFields = SendEventFilterEvent(reader, type, id, vals); } else { assert(false); } + for ( int i = 0; i < readFields; i++ ) { + delete vals[i]; + } + delete [] vals; + + } -void Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Value* const *vals) { +int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); bool updated = false; @@ -733,7 +740,7 @@ void Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const V // ok, exact duplicate filter->lastDict->Remove(idxhash); filter->currDict->Insert(idxhash, h); - return; + return filter->num_val_fields + filter->num_idx_fields; } else { assert( filter->num_val_fields > 0 ); // updated @@ -794,11 +801,11 @@ void Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const V if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... delete(filter->currDict->RemoveEntry(idxhash)); - return; + return filter->num_val_fields + filter->num_idx_fields; } else { // keep old one filter->currDict->Insert(idxhash, h); - return; + return filter->num_val_fields + filter->num_idx_fields; } } @@ -809,7 +816,7 @@ void Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const V HashKey* k = filter->tab->ComputeHash(idxval); if ( !k ) { reporter->InternalError("could not hash"); - return; + return filter->num_val_fields + filter->num_idx_fields; } filter->tab->Assign(idxval, k, valval); @@ -842,6 +849,9 @@ void Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const V } } } + + + return filter->num_val_fields + filter->num_idx_fields; } @@ -926,9 +936,21 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { filter->lastDict = filter->currDict; filter->currDict = new PDict(InputHash); + + // Send event that the current update is indeed finished. + + + EventHandler* handler = event_registry->Lookup("Input::update_finished"); + if ( handler == 0 ) { + reporter->InternalError("Input::update_finished not found!"); + } + + + Ref(i->id); + SendEvent(handler, 1, i->id); } -void Manager::Put(const ReaderFrontend* reader, int id, const Value* const *vals) { +void Manager::Put(const ReaderFrontend* reader, int id, Value* *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -951,7 +973,7 @@ void Manager::Put(const ReaderFrontend* reader, int id, const Value* const *vals } -void Manager::SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const Value* const *vals) { +int Manager::SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); bool updated = false; @@ -985,11 +1007,13 @@ void Manager::SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, } } - SendEvent(filter->event, out_vals); + SendEvent(filter->event, out_vals); + + return filter->fields->NumFields(); } -void Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const *vals) { +int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const *vals) { ReaderInfo *i = FindReader(reader); assert(i); @@ -1011,6 +1035,8 @@ void Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const } filter->tab->Assign(idxval, valval); + + return filter->num_idx_fields + filter->num_val_fields; } void Manager::Clear(const ReaderFrontend* reader, int id) { @@ -1028,7 +1054,7 @@ void Manager::Clear(const ReaderFrontend* reader, int id) { filter->tab->RemoveAll(); } -bool Manager::Delete(const ReaderFrontend* reader, int id, const Value* const *vals) { +bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader"); @@ -1037,18 +1063,29 @@ bool Manager::Delete(const ReaderFrontend* reader, int id, const Value* const *v assert(i->HasFilter(id)); + bool success = false; + int readVals = 0; + if ( i->filters[id]->filter_type == TABLE_FILTER ) { TableFilter* filter = (TableFilter*) i->filters[id]; Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); - return( filter->tab->Delete(idxval) != 0 ); + readVals = filter->num_idx_fields + filter->num_val_fields; + success = ( filter->tab->Delete(idxval) != 0 ); } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - SendEventFilterEvent(reader, type, id, vals); - return true; + readVals = SendEventFilterEvent(reader, type, id, vals); + success = true; } else { assert(false); return false; } + + for ( int i = 0; i < readVals; i++ ) { + delete vals[i]; + } + delete [] vals; + + return success; } void Manager::Error(ReaderFrontend* reader, const char* msg) @@ -1056,7 +1093,7 @@ void Manager::Error(ReaderFrontend* reader, const char* msg) reporter->Error("error with input reader for %s: %s", reader->Source().c_str(), msg); } -bool Manager::SendEvent(const string& name, const int num_vals, const Value* const *vals) +bool Manager::SendEvent(const string& name, const int num_vals, Value* *vals) { EventHandler* handler = event_registry->Lookup(name.c_str()); if ( handler == 0 ) { @@ -1078,6 +1115,11 @@ bool Manager::SendEvent(const string& name, const int num_vals, const Value* con mgr.Dispatch(new Event(handler, vl)); + for ( int i = 0; i < num_vals; i++ ) { + delete vals[i]; + } + delete [] vals; + return true; } diff --git a/src/input/Manager.h b/src/input/Manager.h index a0b98294ca..45c07895f2 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -44,15 +44,15 @@ protected: void Error(ReaderFrontend* reader, const char* msg); // for readers to write to input stream in direct mode (reporting new/deleted values directly) - void Put(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + void Put(const ReaderFrontend* reader, int id, threading::Value* *vals); void Clear(const ReaderFrontend* reader, int id); - bool Delete(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + bool Delete(const ReaderFrontend* reader, int id, threading::Value* *vals); // for readers to write to input stream in indirect mode (manager is monitoring new/deleted values) - void SendEntry(const ReaderFrontend* reader, const int id, const threading::Value* const *vals); + void SendEntry(const ReaderFrontend* reader, const int id, threading::Value* *vals); void EndCurrentSend(const ReaderFrontend* reader, const int id); - bool SendEvent(const string& name, const int num_vals, const threading::Value* const *vals); + bool SendEvent(const string& name, const int num_vals, threading::Value* *vals); ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); @@ -60,9 +60,9 @@ protected: private: struct ReaderInfo; - void SendEntryTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); - void PutTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); - void SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const threading::Value* const *vals); + int SendEntryTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + int PutTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + int SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const threading::Value* const *vals); bool IsCompatibleType(BroType* t, bool atomic_only=false); diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 72c8f95d8e..f9992f5f0e 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -26,7 +26,7 @@ private: class PutMessage : public threading::OutputMessage { public: - PutMessage(ReaderFrontend* reader, int id, const Value* const *val) + PutMessage(ReaderFrontend* reader, int id, Value* *val) : threading::OutputMessage("Put", reader), id(id), val(val) {} @@ -37,12 +37,12 @@ public: private: int id; - const Value* const *val; + Value* *val; }; class DeleteMessage : public threading::OutputMessage { public: - DeleteMessage(ReaderFrontend* reader, int id, const Value* const *val) + DeleteMessage(ReaderFrontend* reader, int id, Value* *val) : threading::OutputMessage("Delete", reader), id(id), val(val) {} @@ -52,7 +52,7 @@ public: private: int id; - const Value* const *val; + Value* *val; }; class ClearMessage : public threading::OutputMessage { @@ -72,7 +72,7 @@ private: class SendEventMessage : public threading::OutputMessage { public: - SendEventMessage(ReaderFrontend* reader, const string& name, const int num_vals, const Value* const *val) + SendEventMessage(ReaderFrontend* reader, const string& name, const int num_vals, Value* *val) : threading::OutputMessage("SendEvent", reader), name(name), num_vals(num_vals), val(val) {} @@ -83,14 +83,14 @@ public: private: const string name; const int num_vals; - const Value* const *val; + Value* *val; }; class SendEntryMessage : public threading::OutputMessage { public: - SendEntryMessage(ReaderFrontend* reader, const int id, const Value* const *val) + SendEntryMessage(ReaderFrontend* reader, const int id, Value* *val) : threading::OutputMessage("SendEntry", reader), - id(id), val(val) {} + id(id), val(val) { } virtual bool Process() { input_mgr->SendEntry(Object(), id, val); @@ -99,13 +99,13 @@ public: private: const int id; - const Value* const *val; + Value* *val; }; class EndCurrentSendMessage : public threading::OutputMessage { public: EndCurrentSendMessage(ReaderFrontend* reader, int id) - : threading::OutputMessage("SendEntry", reader), + : threading::OutputMessage("EndCurrentSend", reader), id(id) {} virtual bool Process() { @@ -145,12 +145,12 @@ void ReaderBackend::Error(const char *msg) } */ -void ReaderBackend::Put(int id, const Value* const *val) +void ReaderBackend::Put(int id, Value* *val) { SendOut(new PutMessage(frontend, id, val)); } -void ReaderBackend::Delete(int id, const Value* const *val) +void ReaderBackend::Delete(int id, Value* *val) { SendOut(new DeleteMessage(frontend, id, val)); } @@ -160,7 +160,7 @@ void ReaderBackend::Clear(int id) SendOut(new ClearMessage(frontend, id)); } -void ReaderBackend::SendEvent(const string& name, const int num_vals, const Value* const *vals) +void ReaderBackend::SendEvent(const string& name, const int num_vals, Value* *vals) { SendOut(new SendEventMessage(frontend, name, num_vals, vals)); } @@ -170,7 +170,7 @@ void ReaderBackend::EndCurrentSend(int id) SendOut(new EndCurrentSendMessage(frontend, id)); } -void ReaderBackend::SendEntry(int id, const Value* const *vals) +void ReaderBackend::SendEntry(int id, Value* *vals) { SendOut(new SendEntryMessage(frontend, id, vals)); } diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index a37daaf4b6..c12d187545 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -50,15 +50,15 @@ protected: // A thread-safe version of fmt(). (stolen from logwriter) const char* Fmt(const char* format, ...); - void SendEvent(const string& name, const int num_vals, const threading::Value* const *vals); + void SendEvent(const string& name, const int num_vals, threading::Value* *vals); // Content-sendinf-functions (simple mode). Including table-specific stuff that simply is not used if we have no table - void Put(int id, const threading::Value* const *val); - void Delete(int id, const threading::Value* const *val); + void Put(int id, threading::Value* *val); + void Delete(int id, threading::Value* *val); void Clear(int id); // Table-functions (tracking mode): Only changed lines are propagated. - void SendEntry(int id, const threading::Value* const *vals); + void SendEntry(int id, threading::Value* *vals); void EndCurrentSend(int id); diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index a7f9a4d2f6..0dac33d5e8 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -56,6 +56,7 @@ private: const threading::Field* const* fields; }; + ReaderFrontend::ReaderFrontend(bro_int_t type) { disabled = initialized = false; ty_name = ""; diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index e798f69a36..095d74bf11 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -445,10 +445,12 @@ bool Ascii::DoUpdate() { SendEntry((*it).first, fields); - for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { + /* Do not do this, ownership changes to other thread + * for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { delete fields[i]; } delete [] fields; + */ } } diff --git a/src/main.cc b/src/main.cc index 9df06aa0a0..8205b6de0b 100644 --- a/src/main.cc +++ b/src/main.cc @@ -295,6 +295,7 @@ void terminate_bro() log_mgr->Terminate(); thread_mgr->Terminate(); + mgr.Drain(); delete timer_mgr; delete dns_mgr; diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index d008d2e5e8..7b571e753c 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -111,7 +111,7 @@ void Manager::Process() else { - string s = msg->Name() + " failed, terminating thread"; + string s = msg->Name() + " failed, terminating thread " + t->Name() + " (in ThreadManager)"; reporter->Error("%s", s.c_str()); t->Stop(); } diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index b7782b9a05..5f77a1c9f8 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -267,7 +267,7 @@ void MsgThread::Run() if ( ! result ) { - string s = msg->Name() + " failed, terminating thread"; + string s = msg->Name() + " failed, terminating thread (MsgThread)"; Error(s.c_str()); Stop(); break; diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index d1b6659eb6..3b75220625 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -48,7 +48,10 @@ event bro_init() Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); Input::force_update(A::INPUT); - print servers; Input::remove_tablefilter(A::INPUT, "ssh"); Input::remove_stream(A::INPUT); } + +event Input::update_finished(id: Input::ID) { + print servers; +} diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro index 88838cc8d6..712a877960 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -34,5 +34,9 @@ event bro_init() Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); Input::force_update(A::INPUT); +} + +event Input::update_finished(id: Input::ID) { print servers; } + diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro index fc4d862cd3..7b62ddcddd 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -34,5 +34,9 @@ event bro_init() Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers]); Input::force_update(A::INPUT); +} + +event Input::update_finished(id: Input::ID) { print servers; } + diff --git a/testing/btest/scripts/base/frameworks/input/port.bro b/testing/btest/scripts/base/frameworks/input/port.bro index c14892ae36..65d73c54f7 100644 --- a/testing/btest/scripts/base/frameworks/input/port.bro +++ b/testing/btest/scripts/base/frameworks/input/port.bro @@ -39,3 +39,10 @@ event bro_init() Input::remove_tablefilter(A::INPUT, "input"); Input::remove_stream(A::INPUT); } + +event Input::update_finished(id: Input::ID) { + print servers[1.2.3.4]; + print servers[1.2.3.5]; + print servers[1.2.3.6]; +} + diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro index 5e6bae7b62..bc1ab89bb2 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -42,6 +42,9 @@ event bro_init() $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); Input::force_update(A::INPUT); +} + +event Input::update_finished(id: Input::ID) { if ( 1 in servers ) { print "VALID"; } diff --git a/testing/btest/scripts/base/frameworks/input/twofilters.bro b/testing/btest/scripts/base/frameworks/input/twofilters.bro index 5af664e0e9..d5bff0c5bb 100644 --- a/testing/btest/scripts/base/frameworks/input/twofilters.bro +++ b/testing/btest/scripts/base/frameworks/input/twofilters.bro @@ -35,6 +35,8 @@ type Val: record { global destination1: table[int] of Val = table(); global destination2: table[int] of Val = table(); +global done: bool = F; + event bro_init() { # first read in the old stuff into the table... @@ -45,6 +47,15 @@ event bro_init() Input::add_tablefilter(A::INPUT, [$name="input2",$idx=Idx, $val=Val, $destination=destination2]); Input::force_update(A::INPUT); +} + +event Input::update_finished(id: Input::ID) { + if ( done == T ) { + return; + } + + done = T; + if ( 1 in destination1 ) { print "VALID"; } @@ -90,6 +101,4 @@ event bro_init() if ( 7 in destination2 ) { print "VALID"; } - - } From 1f8b299aaf37c5d03994c8ed9f6f7acaaba9a98b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 14 Feb 2012 10:12:09 -0800 Subject: [PATCH 093/651] Shortening file names a bit. --- src/Attr.cc | 2 +- src/CMakeLists.txt | 2 +- src/RemoteSerializer.cc | 2 +- src/logging/Manager.cc | 4 ++-- src/logging/WriterBackend.cc | 2 +- src/logging/WriterFrontend.cc | 2 +- src/logging/writers/Ascii.cc | 4 ++-- src/threading/{SerializationTypes.cc => SerialTypes.cc} | 2 +- src/threading/{SerializationTypes.h => SerialTypes.h} | 0 9 files changed, 10 insertions(+), 10 deletions(-) rename src/threading/{SerializationTypes.cc => SerialTypes.cc} (99%) rename src/threading/{SerializationTypes.h => SerialTypes.h} (100%) diff --git a/src/Attr.cc b/src/Attr.cc index 40c6c1a75c..82d9c9ddc7 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -5,7 +5,7 @@ #include "Attr.h" #include "Expr.h" #include "Serializer.h" -#include "threading/SerializationTypes.h" +#include "threading/SerialTypes.h" const char* attr_name(attr_tag t) { diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 7a3cc4babf..67d82c577a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -411,7 +411,7 @@ set(bro_SRCS threading/BasicThread.cc threading/Manager.cc threading/MsgThread.cc - threading/SerializationTypes.cc + threading/SerialTypes.cc logging/Manager.cc logging/WriterBackend.cc diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index ba2598c018..948dfddaff 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -184,7 +184,7 @@ #include "File.h" #include "Conn.h" #include "Reporter.h" -#include "threading/SerializationTypes.h" +#include "threading/SerialTypes.h" #include "logging/Manager.h" extern "C" { diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 5f9f1c4222..6078a1e566 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -7,6 +7,8 @@ #include "../NetVar.h" #include "../Net.h" +#include "threading/SerialTypes.h" + #include "Manager.h" #include "WriterFrontend.h" #include "WriterBackend.h" @@ -14,8 +16,6 @@ #include "writers/Ascii.h" #include "writers/None.h" -#include "threading/SerializationTypes.h" - using namespace logging; using threading::Value; using threading::Field; diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 0ffedf073c..f4e48ebaef 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -1,10 +1,10 @@ // See the file "COPYING" in the main distribution directory for copyright. #include "util.h" +#include "threading/SerialTypes.h" #include "WriterBackend.h" #include "WriterFrontend.h" -#include "../threading/SerializationTypes.h" // Messages sent from backend to frontend (i.e., "OutputMessages"). diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index b0e780f27d..02f1a188d8 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -1,9 +1,9 @@ #include "Net.h" +#include "threading/SerialTypes.h" #include "WriterFrontend.h" #include "WriterBackend.h" -#include "../threading/SerializationTypes.h" using threading::Value; using threading::Field; diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index c974877175..0a101feb79 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -3,10 +3,10 @@ #include #include -#include "../../NetVar.h" +#include "NetVar.h" +#include "threading/SerialTypes.h" #include "Ascii.h" -#include "../../threading/SerializationTypes.h" using namespace logging; using namespace writer; diff --git a/src/threading/SerializationTypes.cc b/src/threading/SerialTypes.cc similarity index 99% rename from src/threading/SerializationTypes.cc rename to src/threading/SerialTypes.cc index f74de6ce57..f35d1fc6b0 100644 --- a/src/threading/SerializationTypes.cc +++ b/src/threading/SerialTypes.cc @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "SerializationTypes.h" +#include "SerialTypes.h" #include "../RemoteSerializer.h" diff --git a/src/threading/SerializationTypes.h b/src/threading/SerialTypes.h similarity index 100% rename from src/threading/SerializationTypes.h rename to src/threading/SerialTypes.h From a850cc59922118a8dd62e5c9c9e9c6abe22bd942 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 15 Feb 2012 15:14:04 -0800 Subject: [PATCH 094/651] make filter removal and stream closure asynchronous. --- src/input/Manager.cc | 64 ++++++++++++++++++--- src/input/Manager.h | 5 +- src/input/ReaderBackend.cc | 110 +++++++++++++++++------------------- src/input/ReaderBackend.h | 13 ++--- src/input/ReaderFrontend.cc | 29 ++++++++-- src/input/ReaderFrontend.h | 57 +++++++++++++++++++ 6 files changed, 200 insertions(+), 78 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 79d42fe71f..6655ae5e82 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -534,7 +534,6 @@ bool Manager::RemoveStream(const EnumVal* id) { if ( (*s)->id == id ) { i = (*s); - readers.erase(s); // remove from vector break; } } @@ -545,11 +544,29 @@ bool Manager::RemoveStream(const EnumVal* id) { i->reader->Finish(); - delete(i); - return true; } +bool Manager::RemoveStreamContinuation(const ReaderFrontend* reader) { + ReaderInfo *i = 0; + + + for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) + { + if ( (*s)->reader && (*s)->reader == reader ) + { + i = *s; + delete(i); + readers.erase(s); + return true; + } + } + + reporter->Error("Stream not found in RemoveStreamContinuation"); + return false; + +} + bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { for ( int i = 0; i < rec->NumFields(); i++ ) { @@ -615,20 +632,51 @@ bool Manager::RemoveTableFilter(EnumVal* id, const string &name) { return false; } - map::iterator it = i->filters.find(id->InternalInt()); - if ( it == i->filters.end() ) { + bool found = false; + int filterId; + + for ( map::iterator it = i->filters.begin(); it != i->filters.end(); ++it ) { + if ( (*it).second->name == name ) { + found = true; + filterId = (*it).first; + + if ( (*it).second->filter_type != TABLE_FILTER ) { + reporter->Error("Trying to remove filter %s of wrong type", name.c_str()); + return false; + } + + break; + } + } + + if ( !found ) { + reporter->Error("Trying to remove nonexisting filter %s", name.c_str()); return false; } - if ( i->filters[id->InternalInt()]->filter_type != TABLE_FILTER ) { - // wrong type; + i->reader->RemoveFilter(filterId); + + return true; +} + +bool Manager::RemoveFilterContinuation(const ReaderFrontend* reader, const int filterId) { + ReaderInfo *i = FindReader(reader); + if ( i == 0 ) { + reporter->Error("Reader not found"); + return false; + } + + map::iterator it = i->filters.find(filterId); + if ( it == i->filters.end() ) { + reporter->Error("Got RemoveFilterContinuation where filter nonexistant for %d", filterId); return false; } delete (*it).second; i->filters.erase(it); + return true; -} +} bool Manager::RemoveEventFilter(EnumVal* id, const string &name) { ReaderInfo *i = FindReader(id); diff --git a/src/input/Manager.h b/src/input/Manager.h index 45c07895f2..9e35dd2199 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -32,13 +32,14 @@ public: protected: friend class ReaderFrontend; - friend class ErrorMessage; friend class PutMessage; friend class DeleteMessage; friend class ClearMessage; friend class SendEventMessage; friend class SendEntryMessage; friend class EndCurrentSendMessage; + friend class FilterRemovedMessage; + friend class ReaderFinishedMessage; // Reports an error for the given reader. void Error(ReaderFrontend* reader, const char* msg); @@ -56,6 +57,8 @@ protected: ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); + bool RemoveFilterContinuation(const ReaderFrontend* reader, const int filterId); + bool RemoveStreamContinuation(const ReaderFrontend* reader); private: struct ReaderInfo; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index f9992f5f0e..5cb4fe34f2 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -9,21 +9,6 @@ using threading::Field; namespace input { -class ErrorMessage : public threading::OutputMessage { -public: - ErrorMessage(ReaderFrontend* reader, string message) - : threading::OutputMessage("Error", reader), - message(message) {} - - virtual bool Process() { - input_mgr->Error(Object(), message.c_str()); - return true; - } - -private: - string message; -}; - class PutMessage : public threading::OutputMessage { public: PutMessage(ReaderFrontend* reader, int id, Value* *val) @@ -104,7 +89,7 @@ private: class EndCurrentSendMessage : public threading::OutputMessage { public: - EndCurrentSendMessage(ReaderFrontend* reader, int id) + EndCurrentSendMessage(ReaderFrontend* reader, const int id) : threading::OutputMessage("EndCurrentSend", reader), id(id) {} @@ -114,9 +99,46 @@ public: } private: - int id; + const int id; }; +class FilterRemovedMessage : public threading::OutputMessage { +public: + FilterRemovedMessage(ReaderFrontend* reader, const int id) + : threading::OutputMessage("FilterRemoved", reader), + id(id) {} + + virtual bool Process() { + return input_mgr->RemoveFilterContinuation(Object(), id); + } + +private: + const int id; +}; + +class ReaderFinishedMessage : public threading::OutputMessage { +public: + ReaderFinishedMessage(ReaderFrontend* reader) + : threading::OutputMessage("ReaderFinished", reader) {} + + virtual bool Process() { + return input_mgr->RemoveStreamContinuation(Object()); + } + +private: +}; + + +class DisableMessage : public threading::OutputMessage +{ +public: + DisableMessage(ReaderFrontend* writer) + : threading::OutputMessage("Disable", writer) {} + + virtual bool Process() { Object()->SetDisable(); return true; } +}; + + ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { buf = 0; @@ -133,18 +155,6 @@ ReaderBackend::~ReaderBackend() } -void ReaderBackend::Error(const string &msg) -{ - SendOut(new ErrorMessage(frontend, msg)); -} - -/* -void ReaderBackend::Error(const char *msg) -{ - SendOut(new ErrorMessage(frontend, string(msg))); -} */ - - void ReaderBackend::Put(int id, Value* *val) { SendOut(new PutMessage(frontend, id, val)); @@ -181,6 +191,11 @@ bool ReaderBackend::Init(string arg_source) // disable if DoInit returns error. disabled = !DoInit(arg_source); + + if ( disabled ) { + DisableFrontend(); + } + return !disabled; } @@ -192,13 +207,17 @@ bool ReaderBackend::AddFilter(int id, int arg_num_fields, bool ReaderBackend::RemoveFilter(int id) { - return DoRemoveFilter(id); + bool success = DoRemoveFilter(id); + SendOut(new FilterRemovedMessage(frontend, id)); + return success; // yes, I know, noone reads this. } void ReaderBackend::Finish() { DoFinish(); disabled = true; + DisableFrontend(); + SendOut(new ReaderFinishedMessage(frontend)); } bool ReaderBackend::Update() @@ -206,32 +225,9 @@ bool ReaderBackend::Update() return DoUpdate(); } - -// stolen from logwriter -const char* ReaderBackend::Fmt(const char* format, ...) - { - if ( ! buf ) - buf = (char*) malloc(buf_len); - - va_list al; - va_start(al, format); - int n = safe_vsnprintf(buf, buf_len, format, al); - va_end(al); - - if ( (unsigned int) n >= buf_len ) - { // Not enough room, grow the buffer. - buf_len = n + 32; - buf = (char*) realloc(buf, buf_len); - - // Is it portable to restart? - va_start(al, format); - n = safe_vsnprintf(buf, buf_len, format, al); - va_end(al); - } - - return buf; - } - - +void ReaderBackend::DisableFrontend() +{ + SendOut(new DisableMessage(frontend)); +} } diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index c12d187545..de4a056c22 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -26,6 +26,12 @@ public: void Finish(); bool Update(); + + /** + * Disables the frontend that has instantiated this backend. Once + * disabled,the frontend will not send any further message over. + */ + void DisableFrontend(); protected: // Methods that have to be overwritten by the individual readers @@ -40,16 +46,9 @@ protected: // update file contents to logmgr virtual bool DoUpdate() = 0; - // Reports an error to the user. - void Error(const string &msg); - //void Error(const char *msg); - // The following methods return the information as passed to Init(). const string Source() const { return source; } - // A thread-safe version of fmt(). (stolen from logwriter) - const char* Fmt(const char* format, ...); - void SendEvent(const string& name, const int num_vals, threading::Value* *vals); // Content-sendinf-functions (simple mode). Including table-specific stuff that simply is not used if we have no table diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index 0dac33d5e8..0fdf90d9ad 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -5,8 +5,9 @@ #include "ReaderBackend.h" #include "threading/MsgThread.h" -namespace input { +// FIXME: cleanup of disabled inputreaders is missing. we need this, because stuff can e.g. fail in init and might never be removed afterwards. +namespace input { class InitMessage : public threading::InputMessage { @@ -56,6 +57,19 @@ private: const threading::Field* const* fields; }; +class RemoveFilterMessage : public threading::InputMessage +{ +public: + RemoveFilterMessage(ReaderBackend* backend, const int id) + : threading::InputMessage("RemoveFilter", backend), + id(id) { } + + virtual bool Process() { return Object()->RemoveFilter(id); } + +private: + const int id; +}; + ReaderFrontend::ReaderFrontend(bro_int_t type) { disabled = initialized = false; @@ -103,15 +117,20 @@ void ReaderFrontend::AddFilter(const int id, const int arg_num_fields, const thr backend->SendIn(new AddFilterMessage(backend, id, arg_num_fields, fields)); } +void ReaderFrontend::RemoveFilter(const int id) { + if ( disabled ) + return; + + backend->SendIn(new RemoveFilterMessage(backend, id)); +} + string ReaderFrontend::Name() const - { +{ if ( source.size() ) return ty_name; return ty_name + "/" + source; - } - - +} } diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 876082d9a6..97433c8af6 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -10,20 +10,77 @@ namespace input { class Manager; +/** + * Bridge class between the input::Manager and backend input threads. The + * Manager instantiates one \a ReaderFrontend for each open input stream. + * Each frontend in turns instantiates a ReaderBackend-derived class + * internally that's specific to the particular input format. That backend + * spawns a new thread, and it receives messages from the frontend that + * correspond to method called by the manager. + */ class ReaderFrontend { public: + /** + * Constructor. + * + * type: The backend writer type, with the value corresponding to the + * script-level \c Input::Reader enum (e.g., \a READER_ASCII). The + * frontend will internally instantiate a ReaderBackend of the + * corresponding type. + * + * Frontends must only be instantiated by the main thread. + */ ReaderFrontend(bro_int_t type); + /** + * Destructor. + * + * Frontends must only be destroyed by the main thread. + */ virtual ~ReaderFrontend(); + /** + * Initializes the reader. + * + * This method generates a message to the backend reader and triggers + * the corresponding message there. If the backend method fails, it + * sends a message back that will asynchronously call Disable(). + * + * See ReaderBackend::Init() for arguments. + * This method must only be called from the main thread. + */ void Init(string arg_source); void Update(); + /* * The method takes + * ownership of \a fields. */ + void AddFilter( const int id, const int arg_num_fields, const threading::Field* const* fields ); + void RemoveFilter ( const int id ); + void Finish(); + /** + * Disables the reader frontend. From now on, all method calls that + * would normally send message over to the backend, turn into no-ops. + * Note though that it does not stop the backend itself, use Finsh() + * to do that as well (this method is primarily for use as callback + * when the backend wants to disable the frontend). + * + * Disabled frontend will eventually be discarded by the + * input::Manager. + * + * This method must only be called from the main thread. + */ + void SetDisable() { disabled = true; } + + /** + * Returns true if the reader frontend has been disabled with SetDisable(). + */ + bool Disabled() { return disabled; } + /** * Returns a descriptive name for the reader, including the type of * the backend and the source used. From 84883348ecc11643b6e96cecdbcdefce2bef45ba Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 16 Feb 2012 11:27:10 -0800 Subject: [PATCH 095/651] interface documentation. to a big part stolen from the logging framework --- scripts/base/frameworks/input/main.bro | 8 +- src/input/Manager.cc | 33 +++-- src/input/Manager.h | 122 ++++++++++++++++- src/input/ReaderBackend.h | 182 +++++++++++++++++++++++-- src/input/ReaderFrontend.h | 31 ++++- src/types.bif | 6 + 6 files changed, 350 insertions(+), 32 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index cac1aca54a..7e581070e6 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -4,11 +4,14 @@ module Input; export { + redef enum Input::ID += { TABLE_READ }; - + ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; + const default_mode = MANUAL &redef; + ## Stream decription type used for the `create_stream` method type StreamDescription: record { ## String that allows the reader to find the source. @@ -17,6 +20,9 @@ export { ## Reader to use for this steam reader: Reader &default=default_reader; + + ## Read mode to use for this stream + mode: Mode &default=default_mode; }; ## TableFilter description type used for the `add_tablefilter` method. diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 6655ae5e82..d4e5cdaee9 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -685,18 +685,28 @@ bool Manager::RemoveEventFilter(EnumVal* id, const string &name) { return false; } - map::iterator it = i->filters.find(id->InternalInt()); - if ( it == i->filters.end() ) { + bool found = false; + int filterId; + for ( map::iterator it = i->filters.begin(); it != i->filters.end(); ++it ) { + if ( (*it).second->name == name ) { + found = true; + filterId = (*it).first; + + if ( (*it).second->filter_type != EVENT_FILTER ) { + reporter->Error("Trying to remove filter %s of wrong type", name.c_str()); + return false; + } + + break; + } + } + + if ( !found ) { + reporter->Error("Trying to remove nonexisting filter %s", name.c_str()); return false; } - if ( i->filters[id->InternalInt()]->filter_type != EVENT_FILTER ) { - // wrong type; - return false; - } - - delete (*it).second; - i->filters.erase(it); + i->reader->RemoveFilter(filterId); return true; } @@ -1136,11 +1146,6 @@ bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { return success; } -void Manager::Error(ReaderFrontend* reader, const char* msg) -{ - reporter->Error("error with input reader for %s: %s", reader->Source().c_str(), msg); -} - bool Manager::SendEvent(const string& name, const int num_vals, Value* *vals) { EventHandler* handler = event_registry->Lookup(name.c_str()); diff --git a/src/input/Manager.h b/src/input/Manager.h index 9e35dd2199..be84ee416d 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -1,4 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. +// +// Class for managing input streams and filters #ifndef INPUT_MANAGER_H #define INPUT_MANAGER_H @@ -16,18 +18,100 @@ namespace input { class ReaderFrontend; class ReaderBackend; +/** + * Singleton class for managing input streams. + */ class Manager { public: + /** + * Constructor. + */ Manager(); + + /** + * Destructor. + */ + ~Manager(); + /** + * Creates a new input stream. + * + * @param id The enum value corresponding the input stream. + * + * @param description A record of script type \c Input:StreamDescription. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ ReaderFrontend* CreateStream(EnumVal* id, RecordVal* description); + + /** + * Force update on a input stream. + * Forces a re-read of the whole input source. + * Usually used, when an input stream is opened in managed mode. + * Otherwise, this can be used to trigger a input source check before a heartbeat message arrives. + * May be ignored by the reader. + * + * @param id The enum value corresponding the input stream. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ bool ForceUpdate(const EnumVal* id); + + /** + * Deletes an existing input stream + * + * @param id The enum value corresponding the input stream. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ bool RemoveStream(const EnumVal* id); + /** + * Add a filter to an input source, which will write the data from the data source into + * a Bro table. + * + * @param id The enum value corresponding the input stream. + * + * @param description A record of script type \c Input:TableFilter. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ bool AddTableFilter(EnumVal *id, RecordVal* filter); + + /** + * Removes a tablefilter from the log stream + * + * @param id The enum value corresponding the input stream. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ bool RemoveTableFilter(EnumVal* id, const string &name); + /** + * Add a filter to an input source, which sends events for read input data. + * + * @param id The enum value corresponding the input stream. + * + * @param description A record of script type \c Input:EventFilter. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ bool AddEventFilter(EnumVal *id, RecordVal* filter); + + /** + * Removes a eventfilter from the log stream + * + * @param id The enum value corresponding the input stream. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ bool RemoveEventFilter(EnumVal* id, const string &name); protected: @@ -41,46 +125,76 @@ protected: friend class FilterRemovedMessage; friend class ReaderFinishedMessage; - // Reports an error for the given reader. - void Error(ReaderFrontend* reader, const char* msg); - - // for readers to write to input stream in direct mode (reporting new/deleted values directly) + // For readers to write to input stream in direct mode (reporting new/deleted values directly) + // Functions take ownership of threading::Value fields void Put(const ReaderFrontend* reader, int id, threading::Value* *vals); void Clear(const ReaderFrontend* reader, int id); bool Delete(const ReaderFrontend* reader, int id, threading::Value* *vals); // for readers to write to input stream in indirect mode (manager is monitoring new/deleted values) + // Functions take ownership of threading::Value fields void SendEntry(const ReaderFrontend* reader, const int id, threading::Value* *vals); void EndCurrentSend(const ReaderFrontend* reader, const int id); + // Allows readers to directly send Bro events. + // The num_vals and vals must be the same the named event expects. + // Takes ownership of threading::Value fields bool SendEvent(const string& name, const int num_vals, threading::Value* *vals); + // Instantiates a new ReaderBackend of the given type (note that + // doing so creates a new thread!). ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); + // Functions are called from the ReaderBackend to notify the manager, that a filter has been removed + // or a stream has been closed. + // Used to prevent race conditions where data for a specific filter is still in the queue when the + // RemoveFilter directive is executed by the main thread. + // This makes sure all data that has ben queued for a filter is still received. bool RemoveFilterContinuation(const ReaderFrontend* reader, const int filterId); bool RemoveStreamContinuation(const ReaderFrontend* reader); private: struct ReaderInfo; + // SendEntry implementation for Tablefilter int SendEntryTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + + // Put implementation for Tablefilter int PutTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + + // SendEntry and Put implementation for Eventfilter int SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const threading::Value* const *vals); + // Checks is a bro type can be used for data reading. The equivalend in threading cannot be used, because we have support different types + // from the log framework bool IsCompatibleType(BroType* t, bool atomic_only=false); + // Check if a record is made up of compatible types and return a list of all fields that are in the record in order. + // Recursively unrolls records bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); + // Send events void SendEvent(EventHandlerPtr ev, const int numvals, ...); void SendEvent(EventHandlerPtr ev, list events); + // get a hashkey for a set of threading::Values HashKey* HashValues(const int num_elements, const threading::Value* const *vals); + + // Get the memory used by a specific value int GetValueLength(const threading::Value* val); + // Copies the raw data in a specific threading::Value to position sta int CopyValue(char *data, const int startpos, const threading::Value* val); + // Convert Threading::Value to an internal Bro Type (works also with Records) Val* ValueToVal(const threading::Value* val, BroType* request_type); + + // Convert Threading::Value to an internal Bro List type Val* ValueToIndexVal(int num_fields, const RecordType* type, const threading::Value* const *vals); + + // Converts a threading::value to a record type. mostly used by ValueToVal RecordVal* ValueToRecordVal(const threading::Value* const *vals, RecordType *request_type, int* position); + + // Converts a Bro ListVal to a RecordVal given the record type RecordVal* ListValToRecordVal(ListVal* list, RecordType *request_type, int* position); ReaderInfo* FindReader(const ReaderFrontend* reader); diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index de4a056c22..c6fbaac715 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -11,20 +11,90 @@ namespace input { class ReaderFrontend; +/** + * Base class for reader implementation. When the input:Manager creates a + * new input stream, it instantiates a ReaderFrontend. That then in turn + * creates a ReaderBackend of the right type. The frontend then forwards + * message over the backend as its methods are called. + * + * All of this methods must be called only from the corresponding child + * thread (the constructor is the one exception.) + */ class ReaderBackend : public threading::MsgThread { public: + /** + * Constructor. + * + * @param frontend The frontend reader that created this backend. The + * *only* purpose of this value is to be passed back via messages as + * a argument to callbacks. One must not otherwise access the + * frontend, it's running in a different thread. + * + * @param frontend pointer to the reader frontend + */ ReaderBackend(ReaderFrontend* frontend); + /** + * Destructor. + */ virtual ~ReaderBackend(); - + + /** + * One-time initialization of the reader to define the input source. + * + * @param arg_source A string left to the interpretation of the reader + * implementation; it corresponds to the value configured on the + * script-level for the input stream. + * + * @param num_fields The number of log fields for the stream. + * + * @param fields An array of size \a num_fields with the log fields. + * The methods takes ownership of the array. + * + * @return False if an error occured. + */ bool Init(string arg_source); + /** + * Add an input filter to the input stream + * + * @param id identifier of the input stream + * + * @param arg_num_fields number of fields contained in \a fields + * + * @param fields the types and names of the fields to be retrieved from the input source + * + * @return False if an error occured. + */ bool AddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); + + /** + * Remove an input filter to the input stream + * + * @param id identifier of the input stream + * + * @return False if an error occured. + */ bool RemoveFilter ( int id ); + /** + * Finishes reading from this input stream in a regular fashion. Must not be + * called if an error has been indicated earlier. After calling this, + * no further reading from the stream can be performed + * + * @return False if an error occured. + */ void Finish(); + /** + * Force trigger an update of the input stream. + * The action that will be taken depends on the current read mode and the individual input backend + * + * An backend can choose to ignore this. + * + * @return False if an error occured. + */ bool Update(); /** @@ -34,30 +104,126 @@ public: void DisableFrontend(); protected: - // Methods that have to be overwritten by the individual readers + // Methods that have to be overwritten by the individual readers + + /** + * Reader-specific intialization method. + * + * A reader implementation must override this method. If it returns + * false, it will be assumed that a fatal error has occured that + * prevents the reader from further operation; it will then be + * disabled and eventually deleted. When returning false, an + * implementation should also call Error() to indicate what happened. + */ virtual bool DoInit(string arg_sources) = 0; + /** + * Reader-specific method to add a filter. + * + * A reader implementation must override this method. + */ virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ) = 0; + /** + * Reader-specific method to remove a filter. + * + * A reader implementation must override this method. + */ virtual bool DoRemoveFilter( int id ) = 0; + /** + * Reader-specific method implementing input finalization at + * termination. + * + * A reader implementation must override this method but it can just + * ignore calls if an input source must not be closed. + * + * After the method is called, the writer will be deleted. If an error occurs + * during shutdown, an implementation should also call Error() to indicate what + * happened. + */ virtual void DoFinish() = 0; - // update file contents to logmgr + /** + * Reader-specific method implementing the forced update trigger + * + * A reader implementation must override this method but it can just ignore + * calls, if a forced update does not fit the input source or the current input + * reading mode + */ virtual bool DoUpdate() = 0; - // The following methods return the information as passed to Init(). + /** + * Returns the input source as passed into the constructor. + */ const string Source() const { return source; } + /** + * Method allowing a reader to send a specified bro event. + * Vals must match the values expected by the bro event. + * + * @param name name of the bro event to send + * + * @param num_vals number of entries in \a vals + * + * @param vals the values to be given to the event + */ void SendEvent(const string& name, const int num_vals, threading::Value* *vals); - // Content-sendinf-functions (simple mode). Including table-specific stuff that simply is not used if we have no table + // Content-sending-functions (simple mode). Including table-specific stuff that simply is not used if we have no table + /** + * Method allowing a reader to send a list of values read for a specific filter back to the manager. + * + * If the filter points to a table, the values are inserted into the table; if it points to an event, the event is raised + * + * @param id the input filter id for which the values are sent + * + * @param val list of threading::Values expected by the filter + */ void Put(int id, threading::Value* *val); + + /** + * Method allowing a reader to delete a specific value from a bro table. + * + * If the receiving filter is an event, only a removed event is raised + * + * @param id the input filter id for which the values are sent + * + * @param val list of threading::Values expected by the filter + */ void Delete(int id, threading::Value* *val); + + /** + * Method allowing a reader to clear a value from a bro table. + * + * If the receiving filter is an event, this is ignored. + * + * @param id the input filter id for which the values are sent + */ void Clear(int id); - // Table-functions (tracking mode): Only changed lines are propagated. + // Content-sending-functions (tracking mode): Only changed lines are propagated. + + + /** + * Method allowing a reader to send a list of values read for a specific filter back to the manager. + * + * If the filter points to a table, the values are inserted into the table; if it points to an event, the event is raised. + * + * @param id the input filter id for which the values are sent + * + * @param val list of threading::Values expected by the filter + */ void SendEntry(int id, threading::Value* *vals); + + /** + * Method telling the manager, that the current list of entries sent by SendEntry is finished. + * + * For table filters, all entries that were not updated since the last EndCurrentSend will be deleted, because they are no longer + * present in the input source + * + * @param id the input filter id for which the values are sent + */ void EndCurrentSend(int id); @@ -68,11 +234,7 @@ private: string source; - // When an error occurs, this method is called to set a flag marking the - // writer as disabled. - bool disabled; - bool Disabled() { return disabled; } // For implementing Fmt(). char* buf; diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 97433c8af6..c29071612d 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -51,15 +51,37 @@ public: */ void Init(string arg_source); + /** + * Force an update of the current input source. Actual action depends on + * the opening mode and on the input source. + * + * This method generates a message to the backend reader and triggers + * the corresponding message there. + * This method must only be called from the main thread. + */ void Update(); - /* * The method takes - * ownership of \a fields. */ - + /** + * Add a filter to the current input source. + * + * See ReaderBackend::AddFilter for arguments. + * + * The method takes ownership of \a fields + */ void AddFilter( const int id, const int arg_num_fields, const threading::Field* const* fields ); + /** + * Removes a filter to the current input source. + */ void RemoveFilter ( const int id ); + /** + * Finalizes writing to this tream. + * + * This method generates a message to the backend reader and triggers + * the corresponding message there. + * This method must only be called from the main thread. + */ void Finish(); /** @@ -92,6 +114,9 @@ public: protected: friend class Manager; + /** + * Returns the source as passed into the constructor + */ const string Source() const { return source; }; string ty_name; // Name of the backend type. Set by the manager. diff --git a/src/types.bif b/src/types.bif index 9256fe3bd0..1529319197 100644 --- a/src/types.bif +++ b/src/types.bif @@ -185,4 +185,10 @@ enum ID %{ Unknown, %} +enum Mode %{ + MANUAL, + REREAD, + STREAM, +%} + module GLOBAL; From bc6ebe53cc40f28aebff0c744e33e92fdc6328ac Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 16 Feb 2012 11:31:30 -0800 Subject: [PATCH 096/651] remove unnecessary error function from manger --- src/logging/Manager.cc | 6 ------ src/logging/Manager.h | 3 --- 2 files changed, 9 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 5ab2e5bc77..b47a2f9eff 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -1181,12 +1181,6 @@ void Manager::Terminate() } } -void Manager::Error(WriterFrontend* writer, const char* msg) - { - reporter->Error("error with writer for %s: %s", - writer->Path().c_str(), msg); - } - // Timer which on dispatching rotates the filter. class RotationTimer : public Timer { public: diff --git a/src/logging/Manager.h b/src/logging/Manager.h index b65d22e3c0..d931bfaef8 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -172,9 +172,6 @@ protected: bool FinishedRotation(WriterFrontend* writer, string new_name, string old_name, double open, double close, bool terminating); - // Reports an error for the given writer. - void Error(WriterFrontend* writer, const char* msg); - // Deletes the values as passed into Write(). void DeleteVals(int num_fields, threading::Value** vals); From 91943c26559590c858f8c7e30db6ffc41a68ec59 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 16 Feb 2012 15:03:20 -0800 Subject: [PATCH 097/651] * rework script interface, add autostart stream flag that starts up a stream automatically when first filter has been added ( probably the most common use case ) * change internal reader interface again * remove some quite embarassing bugs that must have been in the interface for rather long * add different read methods to script & internal interface (like normal, streaming, etc). Not implemented in ascii reader yet. --- scripts/base/frameworks/input/main.bro | 3 + src/input/Manager.cc | 24 ++++---- src/input/ReaderBackend.cc | 32 +++++++++- src/input/ReaderBackend.h | 46 +++++++++++++-- src/input/ReaderFrontend.cc | 31 ++++++++-- src/input/ReaderFrontend.h | 13 ++++- src/input/readers/Ascii.cc | 58 +++++++++++++++++-- src/input/readers/Ascii.h | 10 +++- src/threading/Manager.cc | 2 +- src/types.bif | 6 +- .../scripts/base/frameworks/input/basic.bro | 1 - .../scripts/base/frameworks/input/event.bro | 1 - .../frameworks/input/onecolumn-norecord.bro | 1 - .../frameworks/input/onecolumn-record.bro | 1 - .../scripts/base/frameworks/input/port.bro | 1 - .../base/frameworks/input/predicate.bro | 1 - .../base/frameworks/input/tableevent.bro | 1 - .../base/frameworks/input/twofilters.bro | 2 +- 18 files changed, 191 insertions(+), 43 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 7e581070e6..445f947106 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -23,6 +23,9 @@ export { ## Read mode to use for this stream mode: Mode &default=default_mode; + + ## Automatically start the input stream after the first filter has been added + autostart: bool &default=T; }; ## TableFilter description type used for the `add_tablefilter` method. diff --git a/src/input/Manager.cc b/src/input/Manager.cc index d4e5cdaee9..4438a07c6c 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -214,6 +214,10 @@ ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) } EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); + EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); + Val *autostart = description->LookupWithDefault(rtype->FieldOffset("autostart")); + bool do_autostart = ( autostart->InternalInt() == 1 ); + Unref(autostart); // Ref'd by LookupWithDefault ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); assert(reader_obj); @@ -229,16 +233,7 @@ ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) readers.push_back(info); - reader_obj->Init(source); - /* if ( success == false ) { - assert( RemoveStream(id) ); - return 0; - } */ - reader_obj->Update(); - /* if ( success == false ) { - assert ( RemoveStream(id) ); - return 0; - } */ + reader_obj->Init(source, mode->InternalInt(), do_autostart); return reader_obj; @@ -785,7 +780,7 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va //reporter->Error("Hashing %d val fields", i->num_val_fields); HashKey* valhash = 0; if ( filter->num_val_fields > 0 ) - HashValues(filter->num_val_fields, vals+filter->num_idx_fields); + valhash = HashValues(filter->num_val_fields, vals+filter->num_idx_fields); //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); @@ -794,6 +789,13 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va InputHash *h = filter->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before + + valhash->Hash(); + + h->valhash->Hash(); + + + if ( filter->num_val_fields == 0 || h->valhash->Hash() == valhash->Hash() ) { // ok, exact duplicate filter->lastDict->Remove(idxhash); diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 5cb4fe34f2..cfc74d33a8 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -185,24 +185,42 @@ void ReaderBackend::SendEntry(int id, Value* *vals) SendOut(new SendEntryMessage(frontend, id, vals)); } -bool ReaderBackend::Init(string arg_source) +bool ReaderBackend::Init(string arg_source, int mode, bool arg_autostart) { source = arg_source; + autostart = arg_autostart; + SetName("InputReader/"+source); // disable if DoInit returns error. - disabled = !DoInit(arg_source); + disabled = !DoInit(arg_source, mode); if ( disabled ) { + Error("Init failed"); DisableFrontend(); } return !disabled; } +bool ReaderBackend::StartReading() { + int success = DoStartReading(); + + if ( success == false ) { + DisableFrontend(); + } + + return success; +} + bool ReaderBackend::AddFilter(int id, int arg_num_fields, const Field* const * arg_fields) { - return DoAddFilter(id, arg_num_fields, arg_fields); + bool success = DoAddFilter(id, arg_num_fields, arg_fields); + if ( success && autostart) { + autostart = false; + return StartReading(); + } + return success; } bool ReaderBackend::RemoveFilter(int id) @@ -230,4 +248,12 @@ void ReaderBackend::DisableFrontend() SendOut(new DisableMessage(frontend)); } +bool ReaderBackend::DoHeartbeat(double network_time, double current_time) +{ + MsgThread::DoHeartbeat(network_time, current_time); + + return true; +} + + } diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index c6fbaac715..e34db3e559 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -51,9 +51,24 @@ public: * @param fields An array of size \a num_fields with the log fields. * The methods takes ownership of the array. * + * @param mode the opening mode for the input source + * + * @param autostart automatically start the input source after the first filter has been added + * * @return False if an error occured. */ - bool Init(string arg_source); + bool Init(string arg_source, int mode, bool autostart); + + /** + * One-time start method of the reader. + * + * This method is called from the scripting layer, after all filters have been added. + * No data should be read before this method is called. + * + * If autostart in Init is set to true, this method is called automatically by the backend after + * the first filter has been added. + */ + bool StartReading(); /** * Add an input filter to the input stream @@ -107,7 +122,8 @@ protected: // Methods that have to be overwritten by the individual readers /** - * Reader-specific intialization method. + * Reader-specific intialization method. Note that data may only be read from the input source + * after the Start function has been called. * * A reader implementation must override this method. If it returns * false, it will be assumed that a fatal error has occured that @@ -115,7 +131,19 @@ protected: * disabled and eventually deleted. When returning false, an * implementation should also call Error() to indicate what happened. */ - virtual bool DoInit(string arg_sources) = 0; + virtual bool DoInit(string arg_sources, int mode) = 0; + + /** + * Reader-specific start method. After this function has been called, data may be read from + * the input source and be sent to the specified filters + * + * A reader implementation must override this method. + * If it returns false, it will be assumed that a fatal error has occured + * that prevents the reader from further operation; it will then be + * disabled and eventually deleted. When returning false, an implementation + * should also call Error to indicate what happened. + */ + virtual bool DoStartReading() = 0; /** * Reader-specific method to add a filter. @@ -225,7 +253,14 @@ protected: * @param id the input filter id for which the values are sent */ void EndCurrentSend(int id); - + + /** + * Triggered by regular heartbeat messages from the main thread. + * + * This method can be overridden but once must call + * ReaderBackend::DoHeartbeat(). + */ + virtual bool DoHeartbeat(double network_time, double current_time); private: // Frontend that instantiated us. This object must not be access from @@ -238,7 +273,8 @@ private: // For implementing Fmt(). char* buf; - unsigned int buf_len; + unsigned int buf_len; + bool autostart; }; } diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index 0fdf90d9ad..f7fc23bf72 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -12,14 +12,16 @@ namespace input { class InitMessage : public threading::InputMessage { public: - InitMessage(ReaderBackend* backend, const string source) + InitMessage(ReaderBackend* backend, const string source, const int mode, const bool autostart) : threading::InputMessage("Init", backend), - source(source) { } + source(source), mode(mode), autostart(autostart) { } - virtual bool Process() { return Object()->Init(source); } + virtual bool Process() { return Object()->Init(source, mode, autostart); } private: const string source; + const int mode; + const bool autostart; }; class UpdateMessage : public threading::InputMessage @@ -42,6 +44,16 @@ public: virtual bool Process() { Object()->Finish(); return true; } }; +class StartReadingMessage : public threading::InputMessage +{ +public: + StartReadingMessage(ReaderBackend* backend) + : threading::InputMessage("StartReading", backend) + { } + + virtual bool Process() { Object()->StartReading(); return true; } +}; + class AddFilterMessage : public threading::InputMessage { public: @@ -83,17 +95,17 @@ ReaderFrontend::ReaderFrontend(bro_int_t type) { ReaderFrontend::~ReaderFrontend() { } -void ReaderFrontend::Init(string arg_source) { +void ReaderFrontend::Init(string arg_source, int mode, bool autostart) { if ( disabled ) return; if ( initialized ) - reporter->InternalError("writer initialize twice"); + reporter->InternalError("reader initialize twice"); source = arg_source; initialized = true; - backend->SendIn(new InitMessage(backend, arg_source)); + backend->SendIn(new InitMessage(backend, arg_source, mode, autostart)); } void ReaderFrontend::Update() { @@ -132,6 +144,13 @@ string ReaderFrontend::Name() const return ty_name + "/" + source; } +void ReaderFrontend::StartReading() { + if ( disabled ) + return; + + backend->SendIn(new StartReadingMessage(backend)); +} + } diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index c29071612d..d67ca299c0 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -49,7 +49,18 @@ public: * See ReaderBackend::Init() for arguments. * This method must only be called from the main thread. */ - void Init(string arg_source); + void Init(string arg_source, int mode, bool autostart); + + /** + * Start the reader. + * + * This methods starts the reader, after all necessary filters have been added. + * It is not necessary to call this function, if autostart has been set. + * If autostart has been set, the reader will be initialized automatically after the first filter has been added + * + * This method must only be called from the main thread. + */ + void StartReading(); /** * Force an update of the current input source. Actual action depends on diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 095d74bf11..cd1723e5e4 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -8,10 +8,14 @@ #include "../../threading/SerializationTypes.h" +#define MANUAL 0 +#define REREAD 1 + using namespace input::reader; using threading::Value; using threading::Field; + FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position) : name(arg_name), type(arg_type) { @@ -75,16 +79,41 @@ void Ascii::DoFinish() } } -bool Ascii::DoInit(string path) +bool Ascii::DoInit(string path, int arg_mode) { + started = false; fname = path; + mode = arg_mode; file = new ifstream(path.c_str()); if ( !file->is_open() ) { - Error(Fmt("cannot open %s", fname.c_str())); + Error(Fmt("Init: cannot open %s", fname.c_str())); return false; } + if ( ( mode != MANUAL ) && (mode != REREAD) ) { + Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); + return false; + } + + return true; +} + +bool Ascii::DoStartReading() { + if ( started == true ) { + Error("Started twice"); + return false; + } + + started = true; + switch ( mode ) { + case MANUAL: + DoUpdate(); + break; + default: + assert(false); + } + return true; } @@ -132,7 +161,7 @@ bool Ascii::ReadHeader() { map fields; - // construcr list of field names. + // construct list of field names. istringstream splitstream(line); int pos=0; while ( splitstream ) { @@ -146,6 +175,7 @@ bool Ascii::ReadHeader() { for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { + (*it).second.columnMap.clear(); for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { const Field* field = (*it).second.fields[i]; @@ -372,7 +402,6 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { // read the entire file and send appropriate thingies back to InputMgr bool Ascii::DoUpdate() { - // dirty, fix me. (well, apparently after trying seeking, etc - this is not that bad) if ( file && file->is_open() ) { file->close(); @@ -418,6 +447,7 @@ bool Ascii::DoUpdate() { fit != (*it).second.columnMap.end(); fit++ ){ + if ( (*fit).position > pos || (*fit).secondary_position > pos ) { Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", line.c_str(), pos, (*fit).position, (*fit).secondary_position)); return false; @@ -455,6 +485,7 @@ bool Ascii::DoUpdate() { } + //file->clear(); // remove end of file evil bits //file->seekg(0, ios::beg); // and seek to start. @@ -463,3 +494,22 @@ bool Ascii::DoUpdate() { } return true; } + +bool Ascii::DoHeartbeat(double network_time, double current_time) +{ + ReaderBackend::DoHeartbeat(network_time, current_time); + + switch ( mode ) { + case MANUAL: + // yay, we do nothing :) + break; + case REREAD: + + + default: + assert(false); + } + + return true; +} + diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index a3bf5c21a6..766716e29d 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -39,7 +39,7 @@ public: protected: - virtual bool DoInit(string path); + virtual bool DoInit(string path, int mode); virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); @@ -48,9 +48,13 @@ protected: virtual void DoFinish(); virtual bool DoUpdate(); + + virtual bool DoStartReading(); private: + virtual bool DoHeartbeat(double network_time, double current_time); + struct Filter { unsigned int num_fields; @@ -84,6 +88,10 @@ private: string unset_field; + int mode; + + bool started; + }; diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 7b571e753c..472d10139a 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -106,7 +106,7 @@ void Manager::Process() Message* msg = t->RetrieveOut(); - if ( msg->Process() && network_time ) + if ( msg->Process() ) //&& network_time ) // FIXME: ask robin again if he needs this. makes input interface not work in bro_init. did_process = true; else diff --git a/src/types.bif b/src/types.bif index 1529319197..e2a47a7ece 100644 --- a/src/types.bif +++ b/src/types.bif @@ -186,9 +186,9 @@ enum ID %{ %} enum Mode %{ - MANUAL, - REREAD, - STREAM, + MANUAL = 0, + REREAD = 1, + STREAM = 2, %} module GLOBAL; diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 3b75220625..156898edca 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -47,7 +47,6 @@ event bro_init() # first read in the old stuff into the table... Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); - Input::force_update(A::INPUT); Input::remove_tablefilter(A::INPUT, "ssh"); Input::remove_stream(A::INPUT); } diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro index a07f0934a0..41eba1613c 100644 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -38,5 +38,4 @@ event bro_init() { Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_eventfilter(A::INPUT, [$name="input", $fields=Val, $ev=line]); - Input::force_update(A::INPUT); } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro index 712a877960..bcbba05a3e 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -33,7 +33,6 @@ event bro_init() # first read in the old stuff into the table... Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); - Input::force_update(A::INPUT); } event Input::update_finished(id: Input::ID) { diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro index 7b62ddcddd..1c532ba6a9 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -33,7 +33,6 @@ event bro_init() # first read in the old stuff into the table... Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers]); - Input::force_update(A::INPUT); } event Input::update_finished(id: Input::ID) { diff --git a/testing/btest/scripts/base/frameworks/input/port.bro b/testing/btest/scripts/base/frameworks/input/port.bro index 65d73c54f7..801d6bac3f 100644 --- a/testing/btest/scripts/base/frameworks/input/port.bro +++ b/testing/btest/scripts/base/frameworks/input/port.bro @@ -32,7 +32,6 @@ event bro_init() # first read in the old stuff into the table... Input::create_stream(A::INPUT, [$source="input.log"]); Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers]); - Input::force_update(A::INPUT); print servers[1.2.3.4]; print servers[1.2.3.5]; print servers[1.2.3.6]; diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro index bc1ab89bb2..009911e6a8 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -41,7 +41,6 @@ event bro_init() Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); - Input::force_update(A::INPUT); } event Input::update_finished(id: Input::ID) { diff --git a/testing/btest/scripts/base/frameworks/input/tableevent.bro b/testing/btest/scripts/base/frameworks/input/tableevent.bro index 36e8171689..0c86ac94b8 100644 --- a/testing/btest/scripts/base/frameworks/input/tableevent.bro +++ b/testing/btest/scripts/base/frameworks/input/tableevent.bro @@ -44,5 +44,4 @@ event bro_init() { Input::create_stream(A::LOG, [$source="input.log"]); Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); - Input::force_update(A::LOG); } diff --git a/testing/btest/scripts/base/frameworks/input/twofilters.bro b/testing/btest/scripts/base/frameworks/input/twofilters.bro index d5bff0c5bb..260f73e58f 100644 --- a/testing/btest/scripts/base/frameworks/input/twofilters.bro +++ b/testing/btest/scripts/base/frameworks/input/twofilters.bro @@ -40,7 +40,7 @@ global done: bool = F; event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log"]); + Input::create_stream(A::INPUT, [$source="input.log", $autostart=F]); Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=destination1, $want_record=F, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); From d21a450f36ba621f5802f9c1c7b9f28ce2ec264d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 16 Feb 2012 15:40:07 -0800 Subject: [PATCH 098/651] add streaming reads & automatic re-reading of files to ascii reader. completely untested, but compiles & old tests still work --- src/input/Manager.cc | 7 ---- src/input/readers/Ascii.cc | 70 ++++++++++++++++++++++++++++++-------- src/input/readers/Ascii.h | 1 + 3 files changed, 57 insertions(+), 21 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 4438a07c6c..ea4c5643fa 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -789,13 +789,6 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va InputHash *h = filter->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before - - valhash->Hash(); - - h->valhash->Hash(); - - - if ( filter->num_val_fields == 0 || h->valhash->Hash() == valhash->Hash() ) { // ok, exact duplicate filter->lastDict->Remove(idxhash); diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index cd1723e5e4..5a3569a95f 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -10,6 +10,11 @@ #define MANUAL 0 #define REREAD 1 +#define STREAM 2 + +#include +#include +#include using namespace input::reader; using threading::Value; @@ -84,6 +89,7 @@ bool Ascii::DoInit(string path, int arg_mode) started = false; fname = path; mode = arg_mode; + mtime = 0; file = new ifstream(path.c_str()); if ( !file->is_open() ) { @@ -91,7 +97,7 @@ bool Ascii::DoInit(string path, int arg_mode) return false; } - if ( ( mode != MANUAL ) && (mode != REREAD) ) { + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); return false; } @@ -402,23 +408,58 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { // read the entire file and send appropriate thingies back to InputMgr bool Ascii::DoUpdate() { - // dirty, fix me. (well, apparently after trying seeking, etc - this is not that bad) - if ( file && file->is_open() ) { - file->close(); - } - file = new ifstream(fname.c_str()); - if ( !file->is_open() ) { - Error(Fmt("cannot open %s", fname.c_str())); - return false; + switch ( mode ) { + case REREAD: + // check if the file has changed + struct stat sb; + if ( stat(fname.c_str(), &sb) == -1 ) { + Error(Fmt("Could not get stat for %s", fname.c_str())); + return false; + } + + if ( sb.st_mtime <= mtime ) { + // no change + return true; + } + + mtime = sb.st_mtime; + // file changed. reread. + + // fallthrough + case MANUAL: + case STREAM: + + // dirty, fix me. (well, apparently after trying seeking, etc - this is not that bad) + if ( file && file->is_open() ) { + if ( mode == STREAM ) { + file->clear(); // remove end of file evil bits + break; + } + file->close(); + } + file = new ifstream(fname.c_str()); + if ( !file->is_open() ) { + Error(Fmt("cannot open %s", fname.c_str())); + return false; + } + + + if ( ReadHeader() == false ) { + return false; + } + + break; + default: + assert(false); + } + + // // file->seekg(0, ios::beg); // do not forget clear. - if ( ReadHeader() == false ) { - return false; - } string line; while ( GetLine(line ) ) { @@ -504,8 +545,9 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) // yay, we do nothing :) break; case REREAD: - - + case STREAM: + DoUpdate(); + break; default: assert(false); } diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index 766716e29d..017e5630d4 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -91,6 +91,7 @@ private: int mode; bool started; + time_t mtime; }; From 4126b458ca002093bf964243bb5cdd8a45931544 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 20 Feb 2012 13:18:15 -0800 Subject: [PATCH 099/651] Automatic file re-refresh and streaming works. * simple testcase for file refresh (check for changes) and streaming reads * add events for simple put and delete operations * fix bugs in table filter events (type for first element was wrong) * and I think a couple of other small bugs --- .../scripts.base.frameworks.input.reread/out | 67 ++++++++++ .../scripts.base.frameworks.input.stream/out | 115 ++++++++++++++++++ .../scripts/base/frameworks/input/reread.bro | 92 ++++++++++++++ .../scripts/base/frameworks/input/stream.bro | 89 ++++++++++++++ 4 files changed, 363 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.reread/out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.stream/out create mode 100644 testing/btest/scripts/base/frameworks/input/reread.bro create mode 100644 testing/btest/scripts/base/frameworks/input/stream.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out new file mode 100644 index 0000000000..4234a5056d --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -0,0 +1,67 @@ +{ +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +{ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +{ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +done diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.stream/out b/testing/btest/Baseline/scripts.base.frameworks.input.stream/out new file mode 100644 index 0000000000..39b06c9092 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.stream/out @@ -0,0 +1,115 @@ +============EVENT============ +Input::EVENT_NEW +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============SERVERS============ +{ +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +============EVENT============ +Input::EVENT_NEW +[i=-43] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============SERVERS============ +{ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +============EVENT============ +Input::EVENT_CHANGED +[i=-43] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============SERVERS============ +{ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +done diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro new file mode 100644 index 0000000000..5058f4a068 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/reread.bro @@ -0,0 +1,92 @@ +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run bro bro %INPUT +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cp input2.log input.log +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cp input3.log input.log +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input2.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input3.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load frameworks/communication/listen + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Input::ID += { INPUT }; +} + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event bro_init() +{ + outfile = open ("../out"); + try = 0; + # first read in the old stuff into the table... + Input::create_stream(A::INPUT, [$source="../input.log", $mode=Input::REREAD]); + Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); +} + +event Input::update_finished(id: Input::ID) { + print outfile, servers; + + try = try + 1; + if ( try == 3 ) { + print outfile, "done"; + close(outfile); + Input::remove_tablefilter(A::INPUT, "ssh"); + Input::remove_stream(A::INPUT); + } +} diff --git a/testing/btest/scripts/base/frameworks/input/stream.bro b/testing/btest/scripts/base/frameworks/input/stream.bro new file mode 100644 index 0000000000..db368074aa --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/stream.bro @@ -0,0 +1,89 @@ +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run bro bro %INPUT +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cat input2.log >> input.log +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cat input3.log >> input.log +# @TEST-EXEC: btest-bg-wait -k 3 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input2.log +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input3.log +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load frameworks/communication/listen + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Input::ID += { INPUT }; +} + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event line(tpe: Input::Event, left: Idx, right: Val) { + print outfile, "============EVENT============"; + print outfile, tpe; + print outfile, left; + print outfile, right; + print outfile, "============SERVERS============"; + print outfile, servers; + + try = try + 1; + + if ( try == 3 ) { + print outfile, "done"; + close(outfile); + Input::remove_tablefilter(A::INPUT, "ssh"); + Input::remove_stream(A::INPUT); + } +} + +event bro_init() +{ + outfile = open ("../out"); + try = 0; + # first read in the old stuff into the table... + Input::create_stream(A::INPUT, [$source="../input.log", $mode=Input::STREAM]); + Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); +} + From 4f57817b1a7af9b4084a22d00e195c46afa3c3af Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 20 Feb 2012 13:20:29 -0800 Subject: [PATCH 100/651] ...forgotten in last commit. --- src/input/Manager.cc | 163 +++++++++++++++++++++++++++++++++---- src/input/readers/Ascii.cc | 56 +++++++++---- src/input/readers/Ascii.h | 5 +- 3 files changed, 191 insertions(+), 33 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index ea4c5643fa..66dadfdb2d 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -818,13 +818,6 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va } - Val* oldval = 0; - if ( updated == true ) { - assert(filter->num_val_fields > 0); - // in that case, we need the old value to send the event (if we send an event). - oldval = filter->tab->Lookup(idxval); - } - // call filter first to determine if we really add / change the entry if ( filter->pred ) { @@ -865,6 +858,13 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va } + Val* oldval = 0; + if ( updated == true ) { + assert(filter->num_val_fields > 0); + // in that case, we need the old value to send the event (if we send an event). + oldval = filter->tab->Lookup(idxval); + } + //i->tab->Assign(idxval, valval); HashKey* k = filter->tab->ComputeHash(idxval); if ( !k ) { @@ -884,21 +884,22 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va if ( filter->event ) { EnumVal* ev; - Ref(idxval); + int startpos = 0; + Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); if ( updated ) { // in case of update send back the old value. assert ( filter->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); Ref(oldval); - SendEvent(filter->event, 3, ev, idxval, oldval); + SendEvent(filter->event, 3, ev, predidx, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); Ref(valval); if ( filter->num_val_fields == 0 ) { - SendEvent(filter->event, 3, ev, idxval); + SendEvent(filter->event, 3, ev, predidx); } else { - SendEvent(filter->event, 3, ev, idxval, valval); + SendEvent(filter->event, 3, ev, predidx, valval); } } } @@ -973,10 +974,11 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { } if ( filter->event ) { - Ref(idx); + int startpos = 0; + Val* predidx = ListValToRecordVal(idx, filter->itype, &startpos); Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - SendEvent(filter->event, 3, ev, idx, val); + SendEvent(filter->event, 3, ev, predidx, val); } filter->tab->Delete(ih->idxkey); @@ -991,8 +993,6 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { filter->currDict = new PDict(InputHash); // Send event that the current update is indeed finished. - - EventHandler* handler = event_registry->Lookup("Input::update_finished"); if ( handler == 0 ) { reporter->InternalError("Input::update_finished not found!"); @@ -1077,6 +1077,7 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; + int position = filter->num_idx_fields; if ( filter->num_val_fields == 0 ) { @@ -1087,7 +1088,91 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * valval = ValueToRecordVal(vals, filter->rtype, &position); } - filter->tab->Assign(idxval, valval); + // if we have a subscribed event, we need to figure out, if this is an update or not + // same for predicates + if ( filter->pred || filter->event ) { + bool updated = false; + Val* oldval = 0; + + if ( filter->num_val_fields > 0 ) { + // in that case, we need the old value to send the event (if we send an event). + oldval = filter->tab->Lookup(idxval, false); + } + + if ( oldval != 0 ) { + // it is an update + updated = true; + Ref(oldval); // have to do that, otherwise it may disappear in assign + } + + + // predicate if we want the update or not + if ( filter->pred ) { + EnumVal* ev; + int startpos = 0; + Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + Ref(valval); + + if ( updated ) { + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); + } else { + ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); + } + + val_list vl( 2 + (filter->num_val_fields > 0) ); // 2 if we don't have values, 3 otherwise. + vl.append(ev); + vl.append(predidx); + if ( filter->num_val_fields > 0 ) + vl.append(valval); + + Val* v = filter->pred->Call(&vl); + bool result = v->AsBool(); + Unref(v); + + if ( result == false ) { + // do nothing + Unref(idxval); + Unref(valval); + Unref(oldval); + return filter->num_val_fields + filter->num_idx_fields; + } + + } + + + filter->tab->Assign(idxval, valval); + + if ( filter->event ) { + EnumVal* ev; + int startpos = 0; + Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + + if ( updated ) { // in case of update send back the old value. + assert ( filter->num_val_fields > 0 ); + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); + assert ( oldval != 0 ); + SendEvent(filter->event, 3, ev, predidx, oldval); + } else { + ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); + Ref(valval); + if ( filter->num_val_fields == 0 ) { + SendEvent(filter->event, 3, ev, predidx); + } else { + SendEvent(filter->event, 3, ev, predidx, valval); + } + } + + } + + + + + + } else { + // no predicates or other stuff + + filter->tab->Assign(idxval, valval); + } return filter->num_idx_fields + filter->num_val_fields; } @@ -1122,8 +1207,52 @@ bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { if ( i->filters[id]->filter_type == TABLE_FILTER ) { TableFilter* filter = (TableFilter*) i->filters[id]; Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); + assert(idxval != 0); readVals = filter->num_idx_fields + filter->num_val_fields; - success = ( filter->tab->Delete(idxval) != 0 ); + bool filterresult = true; + + if ( filter->pred || filter->event ) { + Val *val = filter->tab->Lookup(idxval); + + if ( filter->pred ) { + Ref(val); + EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + int startpos = 0; + Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + + val_list vl(3); + vl.append(ev); + vl.append(predidx); + vl.append(val); + Val* v = filter->pred->Call(&vl); + filterresult = v->AsBool(); + Unref(v); + + if ( filterresult == false ) { + // keep it. + Unref(idxval); + success = true; + } + + } + + // only if filter = true -> no filtering + if ( filterresult && filter->event ) { + Ref(idxval); + assert(val != 0); + Ref(val); + EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + SendEvent(filter->event, 3, ev, idxval, val); + } + } + + // only if filter = true -> no filtering + if ( filterresult ) { + success = ( filter->tab->Delete(idxval) != 0 ); + if ( !success ) { + reporter->Error("Internal error while deleting values from input table"); + } + } } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); readVals = SendEventFilterEvent(reader, type, id, vals); diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 5a3569a95f..b0b046b75b 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -91,16 +91,22 @@ bool Ascii::DoInit(string path, int arg_mode) mode = arg_mode; mtime = 0; + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); + return false; + } + file = new ifstream(path.c_str()); if ( !file->is_open() ) { Error(Fmt("Init: cannot open %s", fname.c_str())); return false; } - - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { - Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); + + if ( ReadHeader(false) == false ) { + Error(Fmt("Init: cannot open %s; headers are incorrect", fname.c_str())); + file->close(); return false; - } + } return true; } @@ -114,6 +120,8 @@ bool Ascii::DoStartReading() { started = true; switch ( mode ) { case MANUAL: + case REREAD: + case STREAM: DoUpdate(); break; default: @@ -157,16 +165,25 @@ bool Ascii::HasFilter(int id) { } -bool Ascii::ReadHeader() { +bool Ascii::ReadHeader(bool useCached) { // try to read the header line... string line; - if ( !GetLine(line) ) { - Error("could not read first line"); - return false; - } - map fields; + if ( !useCached ) { + if ( !GetLine(line) ) { + Error("could not read first line"); + return false; + } + + + + headerline = line; + + } else { + line = headerline; + } + // construct list of field names. istringstream splitstream(line); int pos=0; @@ -179,7 +196,7 @@ bool Ascii::ReadHeader() { pos++; } - + //printf("Updating fields from description %s\n", line.c_str()); for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { (*it).second.columnMap.clear(); @@ -433,6 +450,7 @@ bool Ascii::DoUpdate() { if ( file && file->is_open() ) { if ( mode == STREAM ) { file->clear(); // remove end of file evil bits + ReadHeader(true); // in case filters changed break; } file->close(); @@ -444,7 +462,7 @@ bool Ascii::DoUpdate() { } - if ( ReadHeader() == false ) { + if ( ReadHeader(false) == false ) { return false; } @@ -512,9 +530,14 @@ bool Ascii::DoUpdate() { fpos++; } + //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); assert ( (unsigned int) fpos == (*it).second.num_fields ); - SendEntry((*it).first, fields); + if ( mode == STREAM ) { + Put((*it).first, fields); + } else { + SendEntry((*it).first, fields); + } /* Do not do this, ownership changes to other thread * for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { @@ -530,9 +553,12 @@ bool Ascii::DoUpdate() { //file->clear(); // remove end of file evil bits //file->seekg(0, ios::beg); // and seek to start. - for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - EndCurrentSend((*it).first); + if ( mode != STREAM ) { + for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { + EndCurrentSend((*it).first); + } } + return true; } diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index 017e5630d4..d2376e4fe1 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -69,7 +69,7 @@ private: TransportProto StringToProto(const string &proto); - bool ReadHeader(); + bool ReadHeader(bool useCached); threading::Value* EntryToVal(string s, FieldMapping type); bool GetLine(string& str); @@ -87,6 +87,9 @@ private: string empty_field; string unset_field; + + // keep a copy of the headerline to determine field locations when filters change + string headerline; int mode; From fe5b376d2858d599c2a14ddc8003c59c217550c5 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 20 Feb 2012 13:23:25 -0800 Subject: [PATCH 101/651] ...and update for table event testcase after fix. --- .../scripts.base.frameworks.input.tableevent/out | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out index e32a2aea00..54048a86b8 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out @@ -1,21 +1,21 @@ Input::EVENT_NEW -1 +[i=1] T Input::EVENT_NEW -2 +[i=2] T Input::EVENT_NEW -3 +[i=3] F Input::EVENT_NEW -4 +[i=4] F Input::EVENT_NEW -5 +[i=5] F Input::EVENT_NEW -6 +[i=6] F Input::EVENT_NEW -7 +[i=7] T From edd30da082d288d295939429689c0a74a0787340 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 20 Feb 2012 15:30:21 -0800 Subject: [PATCH 102/651] better testcase & fix a few bugs (that took way too long to find). --- src/input/Manager.cc | 34 ++++++++----- src/input/readers/Ascii.cc | 21 +++++--- .../scripts.base.frameworks.input.reread/out | 48 +++++++++++++++++++ .../scripts/base/frameworks/input/reread.bro | 11 ++++- 4 files changed, 95 insertions(+), 19 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 66dadfdb2d..243567e0e6 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -22,7 +22,7 @@ using threading::Value; using threading::Field; struct InputHash { - HashKey* valhash; + hash_t valhash; HashKey* idxkey; // does not need ref or whatever - if it is present here, it is also still present in the TableVal. }; @@ -776,11 +776,15 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va //reporter->Error("Hashing %d index fields", i->num_idx_fields); HashKey* idxhash = HashValues(filter->num_idx_fields, vals); - //reporter->Error("Result: %d", (uint64_t) idxhash->Hash()); + //reporter->Error("Result: %d\n", (uint64_t) idxhash->Hash()); //reporter->Error("Hashing %d val fields", i->num_val_fields); - HashKey* valhash = 0; - if ( filter->num_val_fields > 0 ) - valhash = HashValues(filter->num_val_fields, vals+filter->num_idx_fields); + + hash_t valhash = 0; + if ( filter->num_val_fields > 0 ) { + HashKey* valhashkey = HashValues(filter->num_val_fields, vals+filter->num_idx_fields); + valhash = valhashkey->Hash(); + delete(valhashkey); + } //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); @@ -789,7 +793,7 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va InputHash *h = filter->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before - if ( filter->num_val_fields == 0 || h->valhash->Hash() == valhash->Hash() ) { + if ( filter->num_val_fields == 0 || h->valhash == valhash ) { // ok, exact duplicate filter->lastDict->Remove(idxhash); filter->currDict->Insert(idxhash, h); @@ -862,7 +866,7 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va if ( updated == true ) { assert(filter->num_val_fields > 0); // in that case, we need the old value to send the event (if we send an event). - oldval = filter->tab->Lookup(idxval); + oldval = filter->tab->Lookup(idxval, false); } //i->tab->Assign(idxval, valval); @@ -872,6 +876,8 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va return filter->num_val_fields + filter->num_idx_fields; } + if ( filter->event && updated ) + Ref(oldval); // otherwise it is no longer accessible after the assignment filter->tab->Assign(idxval, k, valval); InputHash* ih = new InputHash(); @@ -891,7 +897,6 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va assert ( filter->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); - Ref(oldval); SendEvent(filter->event, 3, ev, predidx, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); @@ -1468,7 +1473,7 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { int length = 0; memcpy(data+startpos, (const void*) &(val->val.port_val.port), sizeof(val->val.port_val.port)); length += sizeof(val->val.port_val.port); - memcpy(data+startpos, (const void*) &(val->val.port_val.proto), sizeof(val->val.port_val.proto)); + memcpy(data+startpos+length, (const void*) &(val->val.port_val.proto), sizeof(val->val.port_val.proto)); length += sizeof(val->val.port_val.proto); return length; break; @@ -1500,7 +1505,7 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { int length = 0; memcpy(data+startpos,(const char*) &(val->val.subnet_val.width), sizeof(val->val.subnet_val.width) ); length += sizeof(val->val.subnet_val.width); - memcpy(data+startpos, (const char*) &(val->val.subnet_val.net), sizeof(val->val.subnet_val.net) ); + memcpy(data+startpos+length, (const char*) &(val->val.subnet_val.net), sizeof(val->val.subnet_val.net) ); length += sizeof(val->val.subnet_val.net); return length; break; @@ -1508,7 +1513,8 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { case TYPE_TABLE: { int length = 0; - for ( int i = 0; i < val->val.set_val.size; i++ ) { + int j = val->val.set_val.size; + for ( int i = 0; i < j; i++ ) { length += CopyValue(data, startpos+length, val->val.set_val.vals[i]); } return length; @@ -1531,6 +1537,7 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { } reporter->InternalError("internal error"); + assert(false); return 0; } @@ -1550,13 +1557,16 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { if ( data == 0 ) { reporter->InternalError("Could not malloc?"); } + memset(data, 0, length); for ( int i = 0; i < num_elements; i++ ) { const Value* val = vals[i]; position += CopyValue(data, position, val); } + hash_t key = HashKey::HashBytes(data, length); + assert(position == length); - return new HashKey(data, length); + return new HashKey(data, length, key, true); } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index b0b046b75b..d4b3d91e00 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -268,7 +268,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { if ( s.compare(unset_field) == 0 ) { // field is not set... return new Value(field.type, false); } - + switch ( field.type ) { case TYPE_ENUM: case TYPE_STRING: @@ -302,6 +302,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { break; case TYPE_PORT: + val->val.port_val.port = 0; val->val.port_val.port = atoi(s.c_str()); val->val.port_val.proto = TRANSPORT_UNKNOWN; break; @@ -312,19 +313,27 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { val->val.subnet_val.width = atoi(width.c_str()); string addr = s.substr(0, pos); s = addr; - // NOTE: dotted_to_addr BREAKS THREAD SAFETY! it uses reporter. - // Solve this some other time.... #ifdef BROv6 if ( s.find(':') != s.npos ) { - uint32* addr = dotted_to_addr6(s.c_str()); + uint32* addr = new uint32[4]; + if ( inet_pton(AF_INET6, s.c_str(), addr) <= 0 ) { + Error(Fmt("Bad IPv6 address: %s", s.c_str())); + val->val.subnet_val.net[0] = val->val.subnet_val.net[1] = val->val.subnet_val.net[2] = val->val.subnet_val.net[3] = 0; + } copy_addr(val->val.subnet_val.net, addr); delete addr; } else { val->val.subnet_val.net[0] = val->val.subnet_val.net[1] = val->val.subnet_val.net[2] = 0; - val->val.subnet_val.net[3] = dotted_to_addr(s.c_str()); + if ( inet_aton(s.c_str(), &(val->val.subnet_val.net[3])) <= 0 ) { + Error(Fmt("Bad addres: %s", s.c_str())); + val->val.subnet_val.net[3] = 0; + } } #else - val->val.subnet_val.net = dotted_to_addr(s.c_str()); + if ( inet_aton(s.c_str(), (in_addr*) &(val->val.subnet_val.net)) <= 0 ) { + Error(Fmt("Bad addres: %s", s.c_str())); + val->val.subnet_val.net = 0; + } #endif break; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index 4234a5056d..9516cb2a92 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -1,3 +1,19 @@ +============EVENT============ +Input::EVENT_NEW +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +==========SERVERS============ { [-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, @@ -12,6 +28,22 @@ BB }, vc=[10, 20, 30], ve=[]] } +============EVENT============ +Input::EVENT_NEW +[i=-43] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +==========SERVERS============ { [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, @@ -38,6 +70,22 @@ BB }, vc=[10, 20, 30], ve=[]] } +============EVENT============ +Input::EVENT_CHANGED +[i=-43] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +==========SERVERS============ { [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro index 5058f4a068..58df37af84 100644 --- a/testing/btest/scripts/base/frameworks/input/reread.bro +++ b/testing/btest/scripts/base/frameworks/input/reread.bro @@ -70,16 +70,25 @@ global outfile: file; global try: count; +event line(tpe: Input::Event, left: Idx, right: Val) { + print outfile, "============EVENT============"; + print outfile, tpe; + print outfile, left; + print outfile, right; +} + event bro_init() { outfile = open ("../out"); try = 0; # first read in the old stuff into the table... Input::create_stream(A::INPUT, [$source="../input.log", $mode=Input::REREAD]); - Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); } + event Input::update_finished(id: Input::ID) { + print outfile, "==========SERVERS============"; print outfile, servers; try = try + 1; From d5b413c4e719eed31a998c7ddc4d5361c8fbb670 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 20 Feb 2012 17:13:41 -0800 Subject: [PATCH 103/651] reduce number of needed hash operations --- src/input/Manager.cc | 26 ++++++++------------------ src/input/Manager.h | 2 -- src/input/readers/Ascii.cc | 1 - 3 files changed, 8 insertions(+), 21 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 243567e0e6..a7afdc3a78 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -782,8 +782,8 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va hash_t valhash = 0; if ( filter->num_val_fields > 0 ) { HashKey* valhashkey = HashValues(filter->num_val_fields, vals+filter->num_idx_fields); - valhash = valhashkey->Hash(); - delete(valhashkey); + valhash = valhashkey->Hash(); + delete(valhashkey); } //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); @@ -873,19 +873,17 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va HashKey* k = filter->tab->ComputeHash(idxval); if ( !k ) { reporter->InternalError("could not hash"); - return filter->num_val_fields + filter->num_idx_fields; + assert(false); } + InputHash* ih = new InputHash(); + ih->idxkey = new HashKey(k->Key(), k->Size(), k->Hash()); + ih->valhash = valhash; + if ( filter->event && updated ) Ref(oldval); // otherwise it is no longer accessible after the assignment filter->tab->Assign(idxval, k, valval); - InputHash* ih = new InputHash(); - k = filter->tab->ComputeHash(idxval); - ih->idxkey = k; - ih->valhash = valhash; - //i->tab->Delete(k); - filter->currDict->Insert(idxhash, ih); if ( filter->event ) { @@ -1557,7 +1555,7 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { if ( data == 0 ) { reporter->InternalError("Could not malloc?"); } - memset(data, 0, length); + //memset(data, 0, length); for ( int i = 0; i < num_elements; i++ ) { const Value* val = vals[i]; position += CopyValue(data, position, val); @@ -1695,11 +1693,3 @@ Manager::ReaderInfo* Manager::FindReader(const EnumVal* id) return 0; } - -string Manager::Hash(const string &input) { - unsigned char digest[16]; - hash_md5(input.length(), (const unsigned char*) input.c_str(), digest); - string out((const char*) digest, 16); - return out; -} - diff --git a/src/input/Manager.h b/src/input/Manager.h index be84ee416d..b4fc6cff7f 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -202,8 +202,6 @@ private: vector readers; - string Hash(const string &input); - class Filter; class TableFilter; class EventFilter; diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index d4b3d91e00..e128cd1164 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -302,7 +302,6 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { break; case TYPE_PORT: - val->val.port_val.port = 0; val->val.port_val.port = atoi(s.c_str()); val->val.port_val.proto = TRANSPORT_UNKNOWN; break; From 531189b5fdd39fb9f927b8af075957ad2892b9d4 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 22 Feb 2012 08:56:45 -0800 Subject: [PATCH 104/651] try to make ascii reader a little bit more robust to failure - mainly ignore messages after a reader has disabled itself --- src/input/ReaderBackend.cc | 20 +++++++++++++++++++- src/input/ReaderBackend.h | 7 ++++++- src/input/readers/Ascii.cc | 8 ++++++-- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index cfc74d33a8..8ddb6a2f42 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -203,6 +203,9 @@ bool ReaderBackend::Init(string arg_source, int mode, bool arg_autostart) } bool ReaderBackend::StartReading() { + if ( disabled ) + return false; + int success = DoStartReading(); if ( success == false ) { @@ -215,6 +218,9 @@ bool ReaderBackend::StartReading() { bool ReaderBackend::AddFilter(int id, int arg_num_fields, const Field* const * arg_fields) { + if ( disabled ) + return false; + bool success = DoAddFilter(id, arg_num_fields, arg_fields); if ( success && autostart) { autostart = false; @@ -225,6 +231,9 @@ bool ReaderBackend::AddFilter(int id, int arg_num_fields, bool ReaderBackend::RemoveFilter(int id) { + if ( disabled ) + return false; + bool success = DoRemoveFilter(id); SendOut(new FilterRemovedMessage(frontend, id)); return success; // yes, I know, noone reads this. @@ -240,11 +249,20 @@ void ReaderBackend::Finish() bool ReaderBackend::Update() { - return DoUpdate(); + if ( disabled ) + return false; + + bool success = DoUpdate(); + if ( !success ) { + DisableFrontend(); + } + + return success; } void ReaderBackend::DisableFrontend() { + disabled = true; // we also set disabled here, because there still may be other messages queued and we will dutifully ignore these from now SendOut(new DisableMessage(frontend)); } diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index e34db3e559..68fd5f3a37 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -177,7 +177,12 @@ protected: * * A reader implementation must override this method but it can just ignore * calls, if a forced update does not fit the input source or the current input - * reading mode + * reading mode. + * + * If it returns false, it will be assumed that a fatal error has occured + * that prevents the reader from further operation; it will then be + * disabled and eventually deleted. When returning false, an implementation + * should also call Error to indicate what happened. */ virtual bool DoUpdate() = 0; diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index e128cd1164..73b8500d5e 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -458,7 +458,10 @@ bool Ascii::DoUpdate() { if ( file && file->is_open() ) { if ( mode == STREAM ) { file->clear(); // remove end of file evil bits - ReadHeader(true); // in case filters changed + if ( !ReadHeader(true) ) // in case filters changed + { + return false; // header reading failed + } break; } file->close(); @@ -522,6 +525,7 @@ bool Ascii::DoUpdate() { Value* val = EntryToVal(stringfields[(*fit).position], *fit); if ( val == 0 ) { + Error("Could not convert String value to Val"); return false; } @@ -580,7 +584,7 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) break; case REREAD: case STREAM: - DoUpdate(); + Update(); // call update and not DoUpdate, because update actually checks disabled. break; default: assert(false); From 7e5f7338269d22332950b2ed9dd446c673fa21c3 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 22 Feb 2012 09:44:45 -0800 Subject: [PATCH 105/651] raw input reader for seth, which can simply read a file into string-events given a line separator. --- scripts/base/frameworks/input/__load__.bro | 1 + scripts/base/frameworks/input/readers/raw.bro | 9 + src/CMakeLists.txt | 1 + src/input.bif | 2 + src/input/Manager.cc | 2 + src/input/readers/Ascii.cc | 6 +- src/input/readers/Raw.cc | 230 ++++++++++++++++++ src/input/readers/Raw.h | 70 ++++++ src/types.bif | 1 + .../scripts.base.frameworks.input.raw/out | 8 + .../scripts/base/frameworks/input/raw.bro | 35 +++ 11 files changed, 363 insertions(+), 2 deletions(-) create mode 100644 scripts/base/frameworks/input/readers/raw.bro create mode 100644 src/input/readers/Raw.cc create mode 100644 src/input/readers/Raw.h create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.raw/out create mode 100644 testing/btest/scripts/base/frameworks/input/raw.bro diff --git a/scripts/base/frameworks/input/__load__.bro b/scripts/base/frameworks/input/__load__.bro index a3315186d5..b41fe5e95f 100644 --- a/scripts/base/frameworks/input/__load__.bro +++ b/scripts/base/frameworks/input/__load__.bro @@ -1,3 +1,4 @@ @load ./main @load ./readers/ascii +@load ./readers/raw diff --git a/scripts/base/frameworks/input/readers/raw.bro b/scripts/base/frameworks/input/readers/raw.bro new file mode 100644 index 0000000000..45deed3eda --- /dev/null +++ b/scripts/base/frameworks/input/readers/raw.bro @@ -0,0 +1,9 @@ +##! Interface for the raw input reader. + +module InputRaw; + +export { + ## Separator between input records. + ## Please note that the separator has to be exactly one character long + const record_separator = "\n" &redef; +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6a84053bce..dd294ace7c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -424,6 +424,7 @@ set(bro_SRCS input/ReaderBackend.cc input/ReaderFrontend.cc input/readers/Ascii.cc + input/readers/Raw.cc ${dns_SRCS} diff --git a/src/input.bif b/src/input.bif index 2e9324ec56..5418b7bbd4 100644 --- a/src/input.bif +++ b/src/input.bif @@ -62,3 +62,5 @@ const set_separator: string; const empty_field: string; const unset_field: string; +module InputRaw; +const record_separator: string; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index a7afdc3a78..d3009aa619 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -6,6 +6,7 @@ #include "ReaderFrontend.h" #include "ReaderBackend.h" #include "readers/Ascii.h" +#include "readers/Raw.h" #include "Event.h" #include "EventHandler.h" @@ -143,6 +144,7 @@ struct ReaderDefinition { ReaderDefinition input_readers[] = { { BifEnum::Input::READER_ASCII, "Ascii", 0, reader::Ascii::Instantiate }, + { BifEnum::Input::READER_RAW, "Raw", 0, reader::Raw::Instantiate }, // End marker { BifEnum::Input::READER_DEFAULT, "None", 0, (ReaderBackend* (*)(ReaderFrontend* frontend))0 } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 73b8500d5e..733cca6352 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -133,6 +133,7 @@ bool Ascii::DoStartReading() { bool Ascii::DoAddFilter( int id, int arg_num_fields, const Field* const* fields ) { if ( HasFilter(id) ) { + Error("Filter was added twice, ignoring."); return false; // no, we don't want to add this a second time } @@ -147,6 +148,7 @@ bool Ascii::DoAddFilter( int id, int arg_num_fields, const Field* const* fields bool Ascii::DoRemoveFilter ( int id ) { if (!HasFilter(id) ) { + Error("Filter removal of nonexisting filter requested."); return false; } @@ -263,11 +265,11 @@ TransportProto Ascii::StringToProto(const string &proto) { Value* Ascii::EntryToVal(string s, FieldMapping field) { - Value* val = new Value(field.type, true); - if ( s.compare(unset_field) == 0 ) { // field is not set... return new Value(field.type, false); } + + Value* val = new Value(field.type, true); switch ( field.type ) { case TYPE_ENUM: diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc new file mode 100644 index 0000000000..c435624865 --- /dev/null +++ b/src/input/readers/Raw.cc @@ -0,0 +1,230 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Raw.h" +#include "NetVar.h" + +#include +#include + +#include "../../threading/SerializationTypes.h" + +#define MANUAL 0 +#define REREAD 1 +#define STREAM 2 + +#include +#include +#include + +using namespace input::reader; +using threading::Value; +using threading::Field; + +Raw::Raw(ReaderFrontend *frontend) : ReaderBackend(frontend) +{ + file = 0; + + //keyMap = new map(); + + separator.assign( (const char*) BifConst::InputRaw::record_separator->Bytes(), BifConst::InputRaw::record_separator->Len()); + if ( separator.size() != 1 ) { + Error("separator length has to be 1. Separator will be truncated."); + } + +} + +Raw::~Raw() +{ + DoFinish(); +} + +void Raw::DoFinish() +{ + filters.empty(); + if ( file != 0 ) { + file->close(); + delete(file); + file = 0; + } +} + +bool Raw::DoInit(string path, int arg_mode) +{ + started = false; + fname = path; + mode = arg_mode; + mtime = 0; + + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); + return false; + } + + file = new ifstream(path.c_str()); + if ( !file->is_open() ) { + Error(Fmt("Init: cannot open %s", fname.c_str())); + return false; + } + + return true; +} + +bool Raw::DoStartReading() { + if ( started == true ) { + Error("Started twice"); + return false; + } + + started = true; + switch ( mode ) { + case MANUAL: + case REREAD: + case STREAM: + DoUpdate(); + break; + default: + assert(false); + } + + return true; +} + +bool Raw::DoAddFilter( int id, int arg_num_fields, const Field* const* fields ) { + + if ( arg_num_fields != 1 ) { + Error("Filter for raw reader contains more than one field. Filters for the raw reader may only contain exactly one string field. Filter ignored."); + return false; + } + + if ( fields[0]->type != TYPE_STRING ) { + Error("Filter for raw reader contains a field that is not of type string."); + return false; + } + + if ( HasFilter(id) ) { + Error("Filter was added twice, ignoring"); + return false; // no, we don't want to add this a second time + } + + Filter f; + f.num_fields = arg_num_fields; + f.fields = fields; + + filters[id] = f; + + return true; +} + +bool Raw::DoRemoveFilter ( int id ) { + if (!HasFilter(id) ) { + Error("Filter removal of nonexisting filter requested."); + return false; + } + + assert ( filters.erase(id) == 1 ); + + return true; +} + + +bool Raw::HasFilter(int id) { + map::iterator it = filters.find(id); + if ( it == filters.end() ) { + return false; + } + return true; +} + +bool Raw::GetLine(string& str) { + while ( getline(*file, str, separator[0]) ) { + return true; + } + + return false; +} + + +// read the entire file and send appropriate thingies back to InputMgr +bool Raw::DoUpdate() { + switch ( mode ) { + case REREAD: + // check if the file has changed + struct stat sb; + if ( stat(fname.c_str(), &sb) == -1 ) { + Error(Fmt("Could not get stat for %s", fname.c_str())); + return false; + } + + if ( sb.st_mtime <= mtime ) { + // no change + return true; + } + + mtime = sb.st_mtime; + // file changed. reread. + + // fallthrough + case MANUAL: + case STREAM: + + if ( file && file->is_open() ) { + if ( mode == STREAM ) { + file->clear(); // remove end of file evil bits + break; + } + file->close(); + } + file = new ifstream(fname.c_str()); + if ( !file->is_open() ) { + Error(Fmt("cannot open %s", fname.c_str())); + return false; + } + + break; + default: + assert(false); + + } + + string line; + while ( GetLine(line) ) { + for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { + + assert ((*it).second.num_fields == 1); + + Value** fields = new Value*[1]; + + // filter has exactly one text field. convert to it. + Value* val = new Value(TYPE_STRING, true); + val->val.string_val = new string(line); + fields[0] = val; + + Put((*it).first, fields); + + } + + } + + return true; +} + + +bool Raw::DoHeartbeat(double network_time, double current_time) +{ + ReaderBackend::DoHeartbeat(network_time, current_time); + + switch ( mode ) { + case MANUAL: + // yay, we do nothing :) + break; + case REREAD: + case STREAM: + Update(); // call update and not DoUpdate, because update actually checks disabled. + break; + default: + assert(false); + } + + return true; +} + diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h new file mode 100644 index 0000000000..e046cb2ff7 --- /dev/null +++ b/src/input/readers/Raw.h @@ -0,0 +1,70 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUT_READERS_RAW_H +#define INPUT_READERS_RAW_H + +#include +#include + +#include "../ReaderBackend.h" + +namespace input { namespace reader { + +class Raw : public ReaderBackend { +public: + Raw(ReaderFrontend* frontend); + ~Raw(); + + static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } + +protected: + + virtual bool DoInit(string path, int mode); + + virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); + + virtual bool DoRemoveFilter ( int id ); + + virtual void DoFinish(); + + virtual bool DoUpdate(); + + virtual bool DoStartReading(); + +private: + + virtual bool DoHeartbeat(double network_time, double current_time); + + struct Filter { + unsigned int num_fields; + + const threading::Field* const * fields; // raw mapping + }; + + bool HasFilter(int id); + + bool GetLine(string& str); + + ifstream* file; + string fname; + + map filters; + + // Options set from the script-level. + string separator; + + // keep a copy of the headerline to determine field locations when filters change + string headerline; + + int mode; + + bool started; + time_t mtime; + +}; + + +} +} + +#endif /* INPUT_READERS_RAW_H */ diff --git a/src/types.bif b/src/types.bif index e2a47a7ece..a9c6ecb3a8 100644 --- a/src/types.bif +++ b/src/types.bif @@ -173,6 +173,7 @@ module Input; enum Reader %{ READER_DEFAULT, READER_ASCII, + READER_RAW, %} enum Event %{ diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out new file mode 100644 index 0000000000..2059013c5d --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out @@ -0,0 +1,8 @@ +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/scripts/base/frameworks/input/raw.bro b/testing/btest/scripts/base/frameworks/input/raw.bro new file mode 100644 index 0000000000..5f196648b6 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw.bro @@ -0,0 +1,35 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + + +module A; + +export { + redef enum Input::ID += { INPUT }; +} + +type Val: record { + s: string; +}; + +event line(tpe: Input::Event, s: string) { + print s; +} + +event bro_init() +{ + Input::create_stream(A::INPUT, [$source="input.log", $reader=Input::READER_RAW, $mode=Input::STREAM]); + Input::add_eventfilter(A::INPUT, [$name="input", $fields=Val, $ev=line]); +} From 93fac7a4be74d441001ec3ddb70e13655bee636c Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 22 Feb 2012 10:46:35 -0800 Subject: [PATCH 106/651] fix one of the bugs seth found in the input framework. (bug in PutTable when the table contained only one element and that element should not be wrapped into a record) --- src/input/Manager.cc | 6 +- .../out | 7 + .../scripts.base.frameworks.input.reread/out | 267 ++++++++++++++++++ .../frameworks/input/predicate-stream.bro | 83 ++++++ .../scripts/base/frameworks/input/reread.bro | 32 ++- 5 files changed, 388 insertions(+), 7 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.predicate-stream/out create mode 100644 testing/btest/scripts/base/frameworks/input/predicate-stream.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index d3009aa619..9a350bef20 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1083,12 +1083,11 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; - int position = filter->num_idx_fields; if ( filter->num_val_fields == 0 ) { valval = 0; - } else if ( filter->num_val_fields == 1 && !filter->want_record ) { - valval = ValueToVal(vals[filter->num_idx_fields], filter->rtype->FieldType(filter->num_idx_fields)); + } else if ( filter->num_val_fields == 1 && filter->want_record == 0 ) { + valval = ValueToVal(vals[position], filter->rtype->FieldType(0)); } else { valval = ValueToRecordVal(vals, filter->rtype, &position); } @@ -1130,6 +1129,7 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * if ( filter->num_val_fields > 0 ) vl.append(valval); + Val* v = filter->pred->Call(&vl); bool result = v->AsBool(); Unref(v); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.predicate-stream/out b/testing/btest/Baseline/scripts.base.frameworks.input.predicate-stream/out new file mode 100644 index 0000000000..d805f804d8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.predicate-stream/out @@ -0,0 +1,7 @@ +VALID +VALID +VALID +VALID +VALID +VALID +VALID diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index 9516cb2a92..b844990978 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -110,6 +110,273 @@ AA, BB }, se={ +}, vc=[10, 20, 30], ve=[]] +} +============EVENT============ +Input::EVENT_NEW +[i=-44] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_NEW +[i=-45] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_NEW +[i=-46] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_NEW +[i=-47] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_NEW +[i=-48] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +==========SERVERS============ +{ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-47] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +============EVENT============ +Input::EVENT_REMOVED +[i=-43] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_REMOVED +[i=-46] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_REMOVED +[i=-44] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_REMOVED +[i=-47] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_REMOVED +[i=-45] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_REMOVED +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +==========SERVERS============ +{ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + }, vc=[10, 20, 30], ve=[]] } done diff --git a/testing/btest/scripts/base/frameworks/input/predicate-stream.bro b/testing/btest/scripts/base/frameworks/input/predicate-stream.bro new file mode 100644 index 0000000000..f08aaef998 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicate-stream.bro @@ -0,0 +1,83 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out +# +# only difference from predicate.bro is, that this one uses a stream source. +# the reason is, that the code-paths are quite different, because then the ascii reader uses the put and not the sendevent interface + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +export { + redef enum Input::ID += { INPUT }; +} + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global servers: table[int] of Val = table(); +global ct: int; + +event line(tpe: Input::Event, left: Idx, right: bool) { + ct = ct + 1; + if ( ct < 3 ) { + return; + } + if ( ct > 3 ) { + print "Too many events"; + return; + } + + if ( 1 in servers ) { + print "VALID"; + } + if ( 2 in servers ) { + print "VALID"; + } + if ( !(3 in servers) ) { + print "VALID"; + } + if ( !(4 in servers) ) { + print "VALID"; + } + if ( !(5 in servers) ) { + print "VALID"; + } + if ( !(6 in servers) ) { + print "VALID"; + } + if ( 7 in servers ) { + print "VALID"; + } +} + +event bro_init() +{ + ct = 0; + # first read in the old stuff into the table... + Input::create_stream(A::INPUT, [$source="input.log", $mode=Input::STREAM]); + Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $ev=line, + $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } + ]); +} + diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro index 58df37af84..8e573494fd 100644 --- a/testing/btest/scripts/base/frameworks/input/reread.bro +++ b/testing/btest/scripts/base/frameworks/input/reread.bro @@ -1,11 +1,15 @@ # # @TEST-EXEC: cp input1.log input.log # @TEST-EXEC: btest-bg-run bro bro %INPUT -# @TEST-EXEC: sleep 3 +# @TEST-EXEC: sleep 2 # @TEST-EXEC: cp input2.log input.log -# @TEST-EXEC: sleep 3 +# @TEST-EXEC: sleep 2 # @TEST-EXEC: cp input3.log input.log -# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input4.log input.log +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input5.log input.log +# @TEST-EXEC: btest-bg-wait -k 2 # @TEST-EXEC: btest-diff out @TEST-START-FILE input1.log @@ -31,6 +35,26 @@ T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} @TEST-END-FILE +@TEST-START-FILE input4.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -45 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -46 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -47 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input5.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE @load frameworks/communication/listen @@ -92,7 +116,7 @@ event Input::update_finished(id: Input::ID) { print outfile, servers; try = try + 1; - if ( try == 3 ) { + if ( try == 5 ) { print outfile, "done"; close(outfile); Input::remove_tablefilter(A::INPUT, "ssh"); From d81607c3e93452595e3453cbfe9c378fb71a40fb Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 23 Feb 2012 14:36:04 -0800 Subject: [PATCH 107/651] fix empty field bug in threaded version --- src/input/Manager.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 9a350bef20..9dba56b174 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1547,7 +1547,8 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { for ( int i = 0; i < num_elements; i++ ) { const Value* val = vals[i]; - length += GetValueLength(val); + if ( val->present ) + length += GetValueLength(val); } //reporter->Error("Length: %d", length); @@ -1560,7 +1561,8 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { //memset(data, 0, length); for ( int i = 0; i < num_elements; i++ ) { const Value* val = vals[i]; - position += CopyValue(data, position, val); + if ( val->present ) + position += CopyValue(data, position, val); } hash_t key = HashKey::HashBytes(data, length); From d553a3c6f69c1110b37196632e47cb6ffd10e406 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 23 Feb 2012 15:30:39 -0800 Subject: [PATCH 108/651] fix strange bug when using predicates and events at the same time on a tablefilter. Testcase is now more involved. --- src/input/Manager.cc | 52 +++-- .../scripts.base.frameworks.input.reread/out | 212 +++++++++++++++++- .../scripts/base/frameworks/input/reread.bro | 10 +- 3 files changed, 249 insertions(+), 25 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 9dba56b174..b709d26ea8 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -364,20 +364,21 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { } - Val* name = fval->Lookup(rtype->FieldOffset("name")); - Val* pred = fval->Lookup(rtype->FieldOffset("pred")); + Val* name = fval->LookupWithDefault(rtype->FieldOffset("name")); + Val* pred = fval->LookupWithDefault(rtype->FieldOffset("pred")); - RecordType *idx = fval->Lookup(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); + RecordType *idx = fval->LookupWithDefault(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); RecordType *val = 0; if ( fval->Lookup(rtype->FieldOffset("val")) != 0 ) { - val = fval->Lookup(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); + val = fval->LookupWithDefault(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); } - TableVal *dst = fval->Lookup(rtype->FieldOffset("destination"))->AsTableVal(); + TableVal *dst = fval->LookupWithDefault(rtype->FieldOffset("destination"))->AsTableVal(); Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); - Val* event_val = fval->Lookup(rtype->FieldOffset("ev")); + Val* event_val = fval->LookupWithDefault(rtype->FieldOffset("ev")); Func* event = event_val ? event_val->AsFunc() : 0; + Unref(event_val); if ( event ) { FuncType* etype = event->FType()->AsFuncType(); @@ -450,14 +451,17 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { filter->pred = pred ? pred->AsFunc() : 0; filter->num_idx_fields = idxfields; filter->num_val_fields = valfields; - filter->tab = dst->Ref()->AsTableVal(); - filter->rtype = val ? val->Ref()->AsRecordType() : 0; - filter->itype = idx->Ref()->AsRecordType(); + filter->tab = dst->AsTableVal(); + filter->rtype = val ? val->AsRecordType() : 0; + filter->itype = idx->AsRecordType(); filter->event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; filter->currDict = new PDict(InputHash); filter->lastDict = new PDict(InputHash); filter->want_record = ( want_record->InternalInt() == 1 ); + Unref(want_record); // ref'd by lookupwithdefault + Unref(name); + Unref(pred); if ( valfields > 1 ) { assert(filter->want_record); @@ -861,7 +865,7 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va } } - } + } Val* oldval = 0; @@ -948,16 +952,16 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { val = filter->tab->Lookup(idx); assert(val != 0); } + int startpos = 0; + Val* predidx = ListValToRecordVal(idx, filter->itype, &startpos); + EnumVal* ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + if ( filter->pred ) { - - bool doBreak = false; // ask predicate, if we want to expire this element... - EnumVal* ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - //Ref(idx); - int startpos = 0; - Val* predidx = ListValToRecordVal(idx, filter->itype, &startpos); + Ref(ev); + Ref(predidx); Ref(val); val_list vl(3); @@ -971,21 +975,23 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { if ( result == false ) { // Keep it. Hence - we quit and simply go to the next entry of lastDict // ah well - and we have to add the entry to currDict... + Unref(predidx); + Unref(ev); filter->currDict->Insert(lastDictIdxKey, filter->lastDict->RemoveEntry(lastDictIdxKey)); continue; - } - - - } + } + } if ( filter->event ) { - int startpos = 0; - Val* predidx = ListValToRecordVal(idx, filter->itype, &startpos); + Ref(predidx); Ref(val); - EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); + Ref(ev); SendEvent(filter->event, 3, ev, predidx, val); } + Unref(predidx); + Unref(ev); + filter->tab->Delete(ih->idxkey); filter->lastDict->Remove(lastDictIdxKey); // deletex in next line delete(ih); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index b844990978..f244f11a73 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -1,3 +1,18 @@ +============PREDICATE============ +Input::EVENT_NEW +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_NEW [i=-42] @@ -28,6 +43,21 @@ BB }, vc=[10, 20, 30], ve=[]] } +============PREDICATE============ +Input::EVENT_NEW +[i=-43] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_NEW [i=-43] @@ -70,6 +100,21 @@ BB }, vc=[10, 20, 30], ve=[]] } +============PREDICATE============ +Input::EVENT_CHANGED +[i=-43] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_CHANGED [i=-43] @@ -112,6 +157,21 @@ BB }, vc=[10, 20, 30], ve=[]] } +============PREDICATE============ +Input::EVENT_NEW +[i=-44] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_NEW [i=-44] @@ -126,6 +186,21 @@ AA, BB }, se={ +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_NEW +[i=-45] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + }, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_NEW @@ -142,7 +217,7 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -============EVENT============ +============PREDICATE============ Input::EVENT_NEW [i=-46] [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ @@ -159,6 +234,21 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_NEW +[i=-46] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_NEW [i=-47] [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, @@ -171,6 +261,36 @@ AA, BB }, se={ +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Input::EVENT_NEW +[i=-47] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_NEW +[i=-48] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + }, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_NEW @@ -274,6 +394,96 @@ BB }, vc=[10, 20, 30], ve=[]] } +============PREDICATE============ +Input::EVENT_REMOVED +[i=-43] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-46] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-44] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-47] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-45] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] ============EVENT============ Input::EVENT_REMOVED [i=-43] diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro index 8e573494fd..742d68605b 100644 --- a/testing/btest/scripts/base/frameworks/input/reread.bro +++ b/testing/btest/scripts/base/frameworks/input/reread.bro @@ -107,7 +107,15 @@ event bro_init() try = 0; # first read in the old stuff into the table... Input::create_stream(A::INPUT, [$source="../input.log", $mode=Input::REREAD]); - Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); + Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print outfile, "============PREDICATE============"; + print outfile, typ; + print outfile, left; + print outfile, right; + return T; + } + ]); } From 14916b43f634c9b0dc989c8b89ffcd272951b633 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 27 Feb 2012 21:39:56 -0800 Subject: [PATCH 109/651] Readding deleted functions. These are needed in debug mode in turns out. --- src/net_util.cc | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/net_util.cc b/src/net_util.cc index a65afb1a25..f78246c634 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -147,6 +147,26 @@ char addr_to_class(uint32 addr) return 'A'; } +const char* fmt_conn_id(const IPAddr& src_addr, uint32 src_port, + const IPAddr& dst_addr, uint32 dst_port) + { + static char buffer[512]; + + safe_snprintf(buffer, sizeof(buffer), "%s:%d > %s:%d", + string(src_addr).c_str(), src_port, + string(dst_addr).c_str(), dst_port); + + return buffer; + } + +const char* fmt_conn_id(const uint32* src_addr, uint32 src_port, + const uint32* dst_addr, uint32 dst_port) + { + IPAddr src(IPAddr::IPv6, src_addr, IPAddr::Network); + IPAddr dst(IPAddr::IPv6, dst_addr, IPAddr::Network); + return fmt_conn_id(src, src_port, dst, dst_port); + } + uint32 extract_uint32(const u_char* data) { uint32 val; From edc9bb14af6b2fe56318e5dbb07847d4413cc408 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 28 Feb 2012 15:12:35 -0800 Subject: [PATCH 110/651] Making exchange of addresses between threads thread-safe. As we can't use the IPAddr class (because it's not thread-safe), this involved a bit manual address manipulation and also shuffling some things around a bit. Not fully working yet, the tests for remote logging still fail. --- src/Anon.cc | 2 +- src/CompHash.cc | 2 +- src/ConnCompressor.cc | 16 ++++---- src/DCE_RPC.cc | 4 +- src/DNS_Mgr.cc | 4 +- src/Desc.cc | 10 +++++ src/Desc.h | 7 ++-- src/IPAddr.cc | 4 +- src/IPAddr.h | 45 ++++++++++++++++++-- src/RemoteSerializer.cc | 4 +- src/RuleMatcher.cc | 2 +- src/SerializationFormat.cc | 80 +++++++++++++++++++++++++++++++++++- src/SerializationFormat.h | 15 +++++++ src/Val.cc | 26 ++++++++---- src/Val.h | 8 +--- src/bro.bif | 22 +++++----- src/logging/Manager.cc | 4 +- src/logging/WriterBackend.cc | 36 ++++++++++++++++ src/logging/WriterBackend.h | 15 +++++++ src/logging/writers/Ascii.cc | 4 +- src/net_util.cc | 4 +- src/net_util.h | 10 ++++- src/threading/SerialTypes.cc | 63 +++++++++++++++++++--------- src/threading/SerialTypes.h | 22 ++++++++-- 24 files changed, 325 insertions(+), 84 deletions(-) diff --git a/src/Anon.cc b/src/Anon.cc index 4c4146ac3e..d2a28a0e08 100644 --- a/src/Anon.cc +++ b/src/Anon.cc @@ -154,7 +154,7 @@ void AnonymizeIPAddr_A50::init() int AnonymizeIPAddr_A50::PreservePrefix(ipaddr32_t input, int num_bits) { DEBUG_MSG("%s/%d\n", - IPAddr(IPAddr::IPv4, &input, IPAddr::Network).AsString().c_str(), + IPAddr(IPv4, &input, IPAddr::Network).AsString().c_str(), num_bits); if ( ! before_anonymization ) diff --git a/src/CompHash.cc b/src/CompHash.cc index ae0e082216..86677f9719 100644 --- a/src/CompHash.cc +++ b/src/CompHash.cc @@ -709,7 +709,7 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0, const uint32* const kp = AlignType(kp0); kp1 = reinterpret_cast(kp+4); - IPAddr addr(IPAddr::IPv6, kp, IPAddr::Network); + IPAddr addr(IPv6, kp, IPAddr::Network); switch ( tag ) { case TYPE_ADDR: diff --git a/src/ConnCompressor.cc b/src/ConnCompressor.cc index 29e24457f5..e2b297220a 100644 --- a/src/ConnCompressor.cc +++ b/src/ConnCompressor.cc @@ -236,7 +236,7 @@ Connection* ConnCompressor::NextPacket(double t, HashKey* key, const IP_Hdr* ip, } else if ( ip->SrcAddr() == - IPAddr(IPAddr::IPv6, SrcAddr(pending), IPAddr::Network) && + IPAddr(IPv6, SrcAddr(pending), IPAddr::Network) && tp->th_sport == SrcPort(pending) ) // Another packet from originator. tc = NextFromOrig(pending, t, key, ip, tp); @@ -508,8 +508,8 @@ Connection* ConnCompressor::Instantiate(HashKey* key, PendingConn* pending) { // Instantantiate a Connection. ConnID conn_id; - conn_id.src_addr = IPAddr(IPAddr::IPv6, SrcAddr(pending), IPAddr::Network); - conn_id.dst_addr = IPAddr(IPAddr::IPv6, DstAddr(pending), IPAddr::Network); + conn_id.src_addr = IPAddr(IPv6, SrcAddr(pending), IPAddr::Network); + conn_id.dst_addr = IPAddr(IPv6, DstAddr(pending), IPAddr::Network); conn_id.src_port = SrcPort(pending); conn_id.dst_port = DstPort(pending); @@ -608,7 +608,7 @@ void ConnCompressor::PktHdrToPendingConn(double time, const HashKey* key, memcpy(&c->key, key->Key(), key->Size()); c->hash = key->Hash(); - IPAddr ip1(IPAddr::IPv6, c->key.ip1, IPAddr::Network); + IPAddr ip1(IPv6, c->key.ip1, IPAddr::Network); c->ip1_is_src = ip1 == ip->SrcAddr() && c->key.port1 == tp->th_sport; c->time = time; @@ -658,10 +658,10 @@ const IP_Hdr* ConnCompressor::PendingConnToPacket(const PendingConn* c) tp->th_urp = 0; } - IPAddr ip1(IPAddr::IPv6, c->key.ip1, IPAddr::Network); - IPAddr ip2(IPAddr::IPv6, c->key.ip2, IPAddr::Network); - if ( ip1.GetFamily() == IPAddr::IPv6 || - ip2.GetFamily() == IPAddr::IPv6 ) + IPAddr ip1(IPv6, c->key.ip1, IPAddr::Network); + IPAddr ip2(IPv6, c->key.ip2, IPAddr::Network); + if ( ip1.GetFamily() == IPv6 || + ip2.GetFamily() == IPv6 ) reporter->InternalError("IPv6 snuck into connection compressor"); else { diff --git a/src/DCE_RPC.cc b/src/DCE_RPC.cc index 88cdb48e80..21cb3be9a0 100644 --- a/src/DCE_RPC.cc +++ b/src/DCE_RPC.cc @@ -137,7 +137,7 @@ static bool is_mapped_dce_rpc_endpoint(const dce_rpc_endpoint_addr& addr) bool is_mapped_dce_rpc_endpoint(const ConnID* id, TransportProto proto) { - if ( id->dst_addr.GetFamily() == IPAddr::IPv6 ) + if ( id->dst_addr.GetFamily() == IPv6 ) // TODO: Does the protocol support v6 addresses? #773 return false; @@ -414,7 +414,7 @@ void DCE_RPC_Session::DeliverEpmapperMapResponse( case binpac::DCE_RPC_Simple::EPM_PROTOCOL_IP: uint32 hostip = floor->rhs()->data()->ip(); - mapped.addr.addr = IPAddr(IPAddr::IPv4, &hostip, IPAddr::Host); + mapped.addr.addr = IPAddr(IPv4, &hostip, IPAddr::Host); break; } } diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index a80de42704..9e65d3c9a9 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -321,10 +321,10 @@ void DNS_Mapping::Init(struct hostent* h) addrs = new IPAddr[num_addrs]; for ( int i = 0; i < num_addrs; ++i ) if ( h->h_addrtype == AF_INET ) - addrs[i] = IPAddr(IPAddr::IPv4, (uint32*)h->h_addr_list[i], + addrs[i] = IPAddr(IPv4, (uint32*)h->h_addr_list[i], IPAddr::Network); else if ( h->h_addrtype == AF_INET6 ) - addrs[i] = IPAddr(IPAddr::IPv6, (uint32*)h->h_addr_list[i], + addrs[i] = IPAddr(IPv6, (uint32*)h->h_addr_list[i], IPAddr::Network); } else diff --git a/src/Desc.cc b/src/Desc.cc index 12b4a524eb..9d94321427 100644 --- a/src/Desc.cc +++ b/src/Desc.cc @@ -157,6 +157,16 @@ void ODesc::Add(double d) } } +void ODesc::Add(const IPAddr& addr) + { + Add(addr.AsString()); + } + +void ODesc::Add(const IPPrefix& prefix) + { + Add(prefix.AsString()); + } + void ODesc::AddCS(const char* s) { int n = strlen(s); diff --git a/src/Desc.h b/src/Desc.h index 9f8d7ab109..9c60c68106 100644 --- a/src/Desc.h +++ b/src/Desc.h @@ -8,7 +8,6 @@ #include #include "BroString.h" -#include "IPAddr.h" typedef enum { DESC_READABLE, @@ -23,6 +22,8 @@ typedef enum { } desc_style; class BroFile; +class IPAddr; +class IPPrefix; class ODesc { public: @@ -76,8 +77,8 @@ public: void Add(int64 i); void Add(uint64 u); void Add(double d); - void Add(const IPAddr& addr) { Add(addr.AsString()); } - void Add(const IPPrefix& prefix) { Add(prefix.AsString()); } + void Add(const IPAddr& addr); + void Add(const IPPrefix& prefix); // Add s as a counted string. void AddCS(const char* s); diff --git a/src/IPAddr.cc b/src/IPAddr.cc index ff124025f9..29c8f1b8cf 100644 --- a/src/IPAddr.cc +++ b/src/IPAddr.cc @@ -250,7 +250,7 @@ IPPrefix::IPPrefix(const in6_addr& in6, uint8_t length) IPPrefix::IPPrefix(const IPAddr& addr, uint8_t length) : prefix(addr) { - if ( prefix.GetFamily() == IPAddr::IPv4 ) + if ( prefix.GetFamily() == IPv4 ) { if ( length > 32 ) reporter->InternalError("Bad IPAddr(v4) IPPrefix length : %d", @@ -275,7 +275,7 @@ string IPPrefix::AsString() const { char l[16]; - if ( prefix.GetFamily() == IPAddr::IPv4 ) + if ( prefix.GetFamily() == IPv4 ) modp_uitoa10(length - 96, l); else modp_uitoa10(length, l); diff --git a/src/IPAddr.h b/src/IPAddr.h index f0c0ac12c8..67d6f2112e 100644 --- a/src/IPAddr.h +++ b/src/IPAddr.h @@ -10,6 +10,8 @@ #include "BroString.h" #include "Hash.h" #include "util.h" +#include "Type.h" +#include "threading/SerialTypes.h" struct ConnID; class ExpectedConn; @@ -25,7 +27,7 @@ public: /** * Address family. */ - enum Family { IPv4, IPv6 }; + typedef IPFamily Family; /** * Byte order. @@ -318,14 +320,19 @@ public: return memcmp(&addr1.in6, &addr2.in6, sizeof(in6_addr)) < 0; } + /** Converts the address into the type used internally by the + * inter-thread communication. + */ + void ConvertToThreadingValue(threading::Value::addr_t* v) const; + friend HashKey* BuildConnIDHashKey(const ConnID& id); friend HashKey* BuildExpectedConnHashKey(const ExpectedConn& c); - friend class IPPrefix; - unsigned int MemoryAllocation() const { return padded_sizeof(*this); } private: + friend class IPPrefix; + /** * Initializes an address instance from a string representation. * @@ -384,6 +391,25 @@ inline bool IPAddr::IsLoopback() const && (in6.s6_addr[14] == 0) && (in6.s6_addr[15] == 1)); } +inline void IPAddr::ConvertToThreadingValue(threading::Value::addr_t* v) const + { + v->family = GetFamily(); + + switch ( v->family ) { + + case IPv4: + CopyIPv4(&v->in.in4); + return; + + case IPv6: + CopyIPv6(&v->in.in6); + return; + + // Can't be reached. + abort(); + } + } + /** * Returns a hash key for a given ConnID. Passes ownership to caller. */ @@ -459,7 +485,7 @@ public: */ uint8_t Length() const { - return prefix.GetFamily() == IPAddr::IPv4 ? length - 96 : length; + return prefix.GetFamily() == IPv4 ? length - 96 : length; } /** @@ -497,6 +523,8 @@ public: */ string AsString() const; + /** Converts the address into the type used internally by the inter-thread communicastion. + */ operator std::string() const { return AsString(); } /** @@ -516,6 +544,15 @@ public: return new HashKey(&key, sizeof(key)); } + /** Converts the prefix into the type used internally by the + * inter-thread communication. + */ + void ConvertToThreadingValue(threading::Value::subnet_t* v) const + { + v->length = length; + prefix.ConvertToThreadingValue(&v->prefix); + } + unsigned int MemoryAllocation() const { return padded_sizeof(*this); } /** diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 0d89e5ce99..4b8f527f2b 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -681,7 +681,7 @@ RemoteSerializer::PeerID RemoteSerializer::Connect(const IPAddr& ip, if ( ! initialized ) reporter->InternalError("remote serializer not initialized"); - if ( ip.GetFamily() == IPAddr::IPv6 ) + if ( ip.GetFamily() == IPv6 ) Error("inter-Bro communication not supported over IPv6"); const uint32* bytes; @@ -1238,7 +1238,7 @@ bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl) if ( ! initialized ) reporter->InternalError("remote serializer not initialized"); - if ( ip.GetFamily() == IPAddr::IPv6 ) + if ( ip.GetFamily() == IPv6 ) Error("inter-Bro communication not supported over IPv6"); const uint32* bytes; diff --git a/src/RuleMatcher.cc b/src/RuleMatcher.cc index a90bc83293..da12b1b679 100644 --- a/src/RuleMatcher.cc +++ b/src/RuleMatcher.cc @@ -1082,7 +1082,7 @@ static bool val_to_maskedval(Val* v, maskedvalue_list* append_to) bool is_v4_mask = m[0] == 0xffffffff && m[1] == m[0] && m[2] == m[0]; - if ( v->AsSubNet().Prefix().GetFamily() == IPAddr::IPv4 && + if ( v->AsSubNet().Prefix().GetFamily() == IPv4 && is_v4_mask ) { mval->val = ntohl(*n); diff --git a/src/SerializationFormat.cc b/src/SerializationFormat.cc index af3d9b44c2..ef2dc80cd7 100644 --- a/src/SerializationFormat.cc +++ b/src/SerializationFormat.cc @@ -250,9 +250,9 @@ bool BinarySerializationFormat::Read(IPAddr* addr, const char* tag) } if ( n == 1 ) - *addr = IPAddr(IPAddr::IPv4, raw, IPAddr::Network); + *addr = IPAddr(IPv4, raw, IPAddr::Network); else - *addr = IPAddr(IPAddr::IPv6, raw, IPAddr::Network); + *addr = IPAddr(IPv6, raw, IPAddr::Network); return true; } @@ -269,6 +269,33 @@ bool BinarySerializationFormat::Read(IPPrefix* prefix, const char* tag) return true; } +bool BinarySerializationFormat::Read(struct in_addr* addr, const char* tag) + { + uint32_t* bytes = (uint32_t*) &addr->s_addr; + + if ( ! Read(&bytes[0], "addr4") ) + return false; + + bytes[0] = htonl(bytes[0]); + return true; + } + +bool BinarySerializationFormat::Read(struct in6_addr* addr, const char* tag) + { + uint32_t* bytes = (uint32_t*) &addr->s6_addr; + + for ( int i = 0; i < 4; ++i ) + { + if ( ! Read(&bytes[i], "addr6-part") ) + return false; + + bytes[i] = htonl(bytes[i]); + } + + return true; + } + + bool BinarySerializationFormat::Write(char v, const char* tag) { DBG_LOG(DBG_SERIAL, "Write char %s [%s]", fmt_bytes(&v, 1), tag); @@ -362,6 +389,31 @@ bool BinarySerializationFormat::Write(const IPPrefix& prefix, const char* tag) return Write(prefix.Prefix(), "prefix") && Write(prefix.Length(), "width"); } +bool BinarySerializationFormat::Write(struct in_addr& addr, const char* tag) + { + const uint32_t* bytes; + bytes = (uint32_t*) &addr.s_addr; + + if ( ! Write(ntohl(bytes[0]), "addr4") ) + return false; + + return true; + } + +bool BinarySerializationFormat::Write(struct in6_addr& addr, const char* tag) + { + const uint32_t* bytes; + bytes = (uint32_t*) &addr.s6_addr; + + for ( int i = 0; i < 4; ++i ) + { + if ( ! Write(ntohl(bytes[i]), "addr6-part") ) + return false; + } + + return true; + } + bool BinarySerializationFormat::WriteOpenTag(const char* tag) { return true; @@ -464,6 +516,18 @@ bool XMLSerializationFormat::Read(IPPrefix* prefix, const char* tag) return false; } +bool XMLSerializationFormat::Read(struct in_addr* addr, const char* tag) + { + reporter->InternalError("no reading of xml"); + return false; + } + +bool XMLSerializationFormat::Read(struct in6_addr* addr, const char* tag) + { + reporter->InternalError("no reading of xml"); + return false; + } + bool XMLSerializationFormat::Write(char v, const char* tag) { return WriteElem(tag, "char", &v, 1); @@ -556,6 +620,18 @@ bool XMLSerializationFormat::Write(const IPPrefix& prefix, const char* tag) return false; } +bool XMLSerializationFormat::Write(struct in_addr& addr, const char* tag) + { + reporter->InternalError("XML output of in_addr not implemented"); + return false; + } + +bool XMLSerializationFormat::Write(struct in6_addr& addr, const char* tag) + { + reporter->InternalError("XML output of in6_addr not implemented"); + return false; + } + bool XMLSerializationFormat::WriteEncodedString(const char* s, int len) { while ( len-- ) diff --git a/src/SerializationFormat.h b/src/SerializationFormat.h index f5eb77c608..ba5ad195a2 100644 --- a/src/SerializationFormat.h +++ b/src/SerializationFormat.h @@ -9,6 +9,9 @@ using namespace std; #include "util.h" +class IPAddr; +class IPPrefix; + // Abstract base class. class SerializationFormat { public: @@ -30,6 +33,8 @@ public: virtual bool Read(string* s, const char* tag) = 0; virtual bool Read(IPAddr* addr, const char* tag) = 0; virtual bool Read(IPPrefix* prefix, const char* tag) = 0; + virtual bool Read(struct in_addr* addr, const char* tag) = 0; + virtual bool Read(struct in6_addr* addr, const char* tag) = 0; // Returns number of raw bytes read since last call to StartRead(). int BytesRead() const { return bytes_read; } @@ -54,6 +59,8 @@ public: virtual bool Write(const string& s, const char* tag) = 0; virtual bool Write(const IPAddr& addr, const char* tag) = 0; virtual bool Write(const IPPrefix& prefix, const char* tag) = 0; + virtual bool Write(struct in_addr& addr, const char* tag) = 0; + virtual bool Write(struct in6_addr& addr, const char* tag) = 0; virtual bool WriteOpenTag(const char* tag) = 0; virtual bool WriteCloseTag(const char* tag) = 0; @@ -96,6 +103,8 @@ public: virtual bool Read(string* s, const char* tag); virtual bool Read(IPAddr* addr, const char* tag); virtual bool Read(IPPrefix* prefix, const char* tag); + virtual bool Read(struct in_addr* addr, const char* tag); + virtual bool Read(struct in6_addr* addr, const char* tag); virtual bool Write(int v, const char* tag); virtual bool Write(uint16 v, const char* tag); virtual bool Write(uint32 v, const char* tag); @@ -109,6 +118,8 @@ public: virtual bool Write(const string& s, const char* tag); virtual bool Write(const IPAddr& addr, const char* tag); virtual bool Write(const IPPrefix& prefix, const char* tag); + virtual bool Write(struct in_addr& addr, const char* tag); + virtual bool Write(struct in6_addr& addr, const char* tag); virtual bool WriteOpenTag(const char* tag); virtual bool WriteCloseTag(const char* tag); virtual bool WriteSeparator(); @@ -133,6 +144,8 @@ public: virtual bool Write(const string& s, const char* tag); virtual bool Write(const IPAddr& addr, const char* tag); virtual bool Write(const IPPrefix& prefix, const char* tag); + virtual bool Write(struct in_addr& addr, const char* tag); + virtual bool Write(struct in6_addr& addr, const char* tag); virtual bool WriteOpenTag(const char* tag); virtual bool WriteCloseTag(const char* tag); virtual bool WriteSeparator(); @@ -150,6 +163,8 @@ public: virtual bool Read(string* s, const char* tag); virtual bool Read(IPAddr* addr, const char* tag); virtual bool Read(IPPrefix* prefix, const char* tag); + virtual bool Read(struct in_addr* addr, const char* tag); + virtual bool Read(struct in6_addr* addr, const char* tag); private: // Encodes non-printable characters. diff --git a/src/Val.cc b/src/Val.cc index db6e9eb23a..83bbc59b9d 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -606,7 +606,7 @@ ID* MutableVal::Bind() const ip = htonl(0x7f000001); // 127.0.0.1 safe_snprintf(name, MAX_NAME_SIZE, "#%s#%d#", - IPAddr(IPAddr::IPv4, &ip, IPAddr::Network)->AsString().c_str(), + IPAddr(IPv4, &ip, IPAddr::Network)->AsString().c_str(), getpid()); #else safe_snprintf(name, MAX_NAME_SIZE, "#%s#%d#", host, getpid()); @@ -864,12 +864,12 @@ AddrVal::AddrVal(const char* text) : Val(TYPE_ADDR) AddrVal::AddrVal(uint32 addr) : Val(TYPE_ADDR) { // ### perhaps do gethostbyaddr here? - val.addr_val = new IPAddr(IPAddr::IPv4, &addr, IPAddr::Network); + val.addr_val = new IPAddr(IPv4, &addr, IPAddr::Network); } AddrVal::AddrVal(const uint32 addr[4]) : Val(TYPE_ADDR) { - val.addr_val = new IPAddr(IPAddr::IPv6, addr, IPAddr::Network); + val.addr_val = new IPAddr(IPv6, addr, IPAddr::Network); } AddrVal::AddrVal(const IPAddr& addr) : Val(TYPE_ADDR) @@ -889,7 +889,7 @@ unsigned int AddrVal::MemoryAllocation() const Val* AddrVal::SizeVal() const { - if ( val.addr_val->GetFamily() == IPAddr::IPv4 ) + if ( val.addr_val->GetFamily() == IPv4 ) return new Val(32, TYPE_COUNT); else return new Val(128, TYPE_COUNT); @@ -933,13 +933,13 @@ SubNetVal::SubNetVal(const char* text, int width) : Val(TYPE_SUBNET) SubNetVal::SubNetVal(uint32 addr, int width) : Val(TYPE_SUBNET) { - IPAddr a(IPAddr::IPv4, &addr, IPAddr::Network); + IPAddr a(IPv4, &addr, IPAddr::Network); val.subnet_val = new IPPrefix(a, width); } SubNetVal::SubNetVal(const uint32* addr, int width) : Val(TYPE_SUBNET) { - IPAddr a(IPAddr::IPv6, addr, IPAddr::Network); + IPAddr a(IPv6, addr, IPAddr::Network); val.subnet_val = new IPPrefix(a, width); } @@ -953,6 +953,16 @@ SubNetVal::~SubNetVal() delete val.subnet_val; } +const IPAddr& SubNetVal::Prefix() const + { + return val.subnet_val->Prefix(); + } + +int SubNetVal::Width() const + { + return val.subnet_val->Length(); + } + unsigned int SubNetVal::MemoryAllocation() const { return padded_sizeof(*this) + val.subnet_val->MemoryAllocation(); @@ -978,7 +988,7 @@ IPAddr SubNetVal::Mask() const uint32 m[4]; for ( unsigned int i = 0; i < 4; ++i ) m[i] = 0; - IPAddr rval(IPAddr::IPv6, m, IPAddr::Host); + IPAddr rval(IPv6, m, IPAddr::Host); return rval; } @@ -994,7 +1004,7 @@ IPAddr SubNetVal::Mask() const while ( ++mp < m + 4 ) *mp = 0; - IPAddr rval(IPAddr::IPv6, m, IPAddr::Host); + IPAddr rval(IPv6, m, IPAddr::Host); return rval; } diff --git a/src/Val.h b/src/Val.h index 415996d97a..e939e51cdc 100644 --- a/src/Val.h +++ b/src/Val.h @@ -513,10 +513,6 @@ protected: #define UDP_PORT_MASK 0x20000 #define ICMP_PORT_MASK 0x30000 -typedef enum { - TRANSPORT_UNKNOWN, TRANSPORT_TCP, TRANSPORT_UDP, TRANSPORT_ICMP, -} TransportProto; - class PortVal : public Val { public: // Constructors - both take the port number in host order. @@ -588,8 +584,8 @@ public: Val* SizeVal() const; - const IPAddr& Prefix() const { return val.subnet_val->Prefix(); } - int Width() const { return val.subnet_val->Length(); } + const IPAddr& Prefix() const; + int Width() const; IPAddr Mask() const; bool Contains(const IPAddr& addr) const; diff --git a/src/bro.bif b/src/bro.bif index ff06288940..684b888202 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1949,7 +1949,7 @@ function is_local_interface%(ip: addr%) : bool if ( ent ) { for ( unsigned int len = 0; ent->h_addr_list[len]; ++len ) - addrs.push_back(IPAddr(IPAddr::IPv4, (uint32*)ent->h_addr_list[len], + addrs.push_back(IPAddr(IPv4, (uint32*)ent->h_addr_list[len], IPAddr::Network)); } @@ -1958,7 +1958,7 @@ function is_local_interface%(ip: addr%) : bool if ( ent ) { for ( unsigned int len = 0; ent->h_addr_list[len]; ++len ) - addrs.push_back(IPAddr(IPAddr::IPv6, (uint32*)ent->h_addr_list[len], + addrs.push_back(IPAddr(IPv6, (uint32*)ent->h_addr_list[len], IPAddr::Network)); } @@ -2024,7 +2024,7 @@ function gethostname%(%) : string ## Returns: true if *a* is an IPv4 address, else false. function is_v4_addr%(a: addr%): bool %{ - if ( a->AsAddr().GetFamily() == IPAddr::IPv4 ) + if ( a->AsAddr().GetFamily() == IPv4 ) return new Val(1, TYPE_BOOL); else return new Val(0, TYPE_BOOL); @@ -2037,7 +2037,7 @@ function is_v4_addr%(a: addr%): bool ## Returns: true if *a* is an IPv6 address, else false. function is_v6_addr%(a: addr%): bool %{ - if ( a->AsAddr().GetFamily() == IPAddr::IPv6 ) + if ( a->AsAddr().GetFamily() == IPv6 ) return new Val(1, TYPE_BOOL); else return new Val(0, TYPE_BOOL); @@ -3522,7 +3522,7 @@ function lookup_location%(a: addr%) : geo_location } #ifdef HAVE_GEOIP_COUNTRY_EDITION_V6 - if ( geoip_v6 && a->AsAddr().GetFamily() == IPAddr::IPv6 ) + if ( geoip_v6 && a->AsAddr().GetFamily() == IPv6 ) { geoipv6_t ga; a->AsAddr().CopyIPv6(&ga); @@ -3534,7 +3534,7 @@ function lookup_location%(a: addr%) : geo_location else #endif - if ( geoip && a->AsAddr().GetFamily() == IPAddr::IPv4 ) + if ( geoip && a->AsAddr().GetFamily() == IPv4 ) { const uint32* bytes; a->AsAddr().GetBytes(&bytes); @@ -3617,7 +3617,7 @@ function lookup_asn%(a: addr%) : count { // IPv6 support showed up in 1.4.5. #ifdef HAVE_GEOIP_COUNTRY_EDITION_V6 - if ( a->AsAddr().GetFamily() == IPAddr::IPv6 ) + if ( a->AsAddr().GetFamily() == IPv6 ) { geoipv6_t ga; a->AsAddr().CopyIPv6(&ga); @@ -3626,7 +3626,7 @@ function lookup_asn%(a: addr%) : count else #endif - if ( a->AsAddr().GetFamily() == IPAddr::IPv4 ) + if ( a->AsAddr().GetFamily() == IPv4 ) { const uint32* bytes; a->AsAddr().GetBytes(&bytes); @@ -5353,7 +5353,7 @@ function preserve_prefix%(a: addr, width: count%): any AnonymizeIPAddr* ip_anon = ip_anonymizer[PREFIX_PRESERVING_A50]; if ( ip_anon ) { - if ( a->AsAddr().GetFamily() == IPAddr::IPv6 ) + if ( a->AsAddr().GetFamily() == IPv6 ) builtin_error("preserve_prefix() not supported for IPv6 addresses"); else { @@ -5382,7 +5382,7 @@ function preserve_subnet%(a: subnet%): any AnonymizeIPAddr* ip_anon = ip_anonymizer[PREFIX_PRESERVING_A50]; if ( ip_anon ) { - if ( a->AsSubNet().Prefix().GetFamily() == IPAddr::IPv6 ) + if ( a->AsSubNet().Prefix().GetFamily() == IPv6 ) builtin_error("preserve_subnet() not supported for IPv6 addresses"); else { @@ -5418,7 +5418,7 @@ function anonymize_addr%(a: addr, cl: IPAddrAnonymizationClass%): addr if ( anon_class < 0 || anon_class >= NUM_ADDR_ANONYMIZATION_CLASSES ) builtin_error("anonymize_addr(): invalid ip addr anonymization class"); - if ( a->AsAddr().GetFamily() == IPAddr::IPv6 ) + if ( a->AsAddr().GetFamily() == IPv6 ) { builtin_error("anonymize_addr() not supported for IPv6 addresses"); return 0; diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index ca9ec1c3c4..0753296cb4 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -862,11 +862,11 @@ threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) break; case TYPE_SUBNET: - lval->val.subnet_val = new IPPrefix(val->AsSubNet()); + val->AsSubNet().ConvertToThreadingValue(&lval->val.subnet_val); break; case TYPE_ADDR: - lval->val.addr_val = new IPAddr(val->AsAddr()); + val->AsAddr().ConvertToThreadingValue(&lval->val.addr_val); break; case TYPE_DOUBLE: diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index f4e48ebaef..7c71c09604 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -242,4 +242,40 @@ bool WriterBackend::DoHeartbeat(double network_time, double current_time) return true; } +string WriterBackend::Render(const threading::Value::addr_t& addr) const + { + if ( addr.family == IPv4 ) + { + char s[INET_ADDRSTRLEN]; + + if ( inet_ntop(AF_INET, &addr.in.in4, s, INET_ADDRSTRLEN) == NULL ) + return ""; + else + return s; + } + else + { + char s[INET6_ADDRSTRLEN]; + + if ( inet_ntop(AF_INET6, &addr.in.in6, s, INET6_ADDRSTRLEN) == NULL ) + return ""; + else + return s; + } + } + +string WriterBackend::Render(const threading::Value::subnet_t& subnet) const + { + char l[16]; + + if ( subnet.prefix.family == IPv4 ) + modp_uitoa10(subnet.length - 96, l); + else + modp_uitoa10(subnet.length, l); + + string s = Render(subnet.prefix) + "/" + l; + + return s; + } + diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index f9653d6b69..efb3b5d95e 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -158,6 +158,21 @@ public: bool FinishedRotation(string new_name, string old_name, double open, double close, bool terminating); + /** Helper method to render an IP address as a string. + * + * @param addr The address. + * + * @return An ASCII representation of the address. + */ + string Render(const threading::Value::addr_t& addr) const; + + /** Helper method to render an subnet value as a string. + * + * @param addr The address. + * + * @return An ASCII representation of the address. + */ + string Render(const threading::Value::subnet_t& subnet) const; protected: /** diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index e5bfc205be..0759e60a82 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -177,11 +177,11 @@ bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) break; case TYPE_SUBNET: - desc->Add(*val->val.subnet_val); + desc->Add(Render(val->val.subnet_val)); break; case TYPE_ADDR: - desc->Add(*val->val.addr_val); + desc->Add(Render(val->val.addr_val)); break; case TYPE_TIME: diff --git a/src/net_util.cc b/src/net_util.cc index f78246c634..5e403a349f 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -162,8 +162,8 @@ const char* fmt_conn_id(const IPAddr& src_addr, uint32 src_port, const char* fmt_conn_id(const uint32* src_addr, uint32 src_port, const uint32* dst_addr, uint32 dst_port) { - IPAddr src(IPAddr::IPv6, src_addr, IPAddr::Network); - IPAddr dst(IPAddr::IPv6, dst_addr, IPAddr::Network); + IPAddr src(IPv6, src_addr, IPAddr::Network); + IPAddr dst(IPv6, dst_addr, IPAddr::Network); return fmt_conn_id(src, src_port, dst, dst_port); } diff --git a/src/net_util.h b/src/net_util.h index 8787340328..f61340869a 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -5,6 +5,13 @@ #include "config.h" +// Define first. +typedef enum { + TRANSPORT_UNKNOWN, TRANSPORT_TCP, TRANSPORT_UDP, TRANSPORT_ICMP, +} TransportProto; + +typedef enum { IPv4, IPv6 } IPFamily; + #include #include @@ -21,7 +28,6 @@ #include #include "util.h" -#include "IPAddr.h" #ifdef HAVE_NETINET_IP6_H #include @@ -58,6 +64,8 @@ inline int seq_delta(uint32 a, uint32 b) return int(a-b); } +class IPAddr; + // Returns the ones-complement checksum of a chunk of b short-aligned bytes. extern int ones_complement_checksum(const void* p, int b, uint32 sum); extern int ones_complement_checksum(const IPAddr& a, uint32 sum); diff --git a/src/threading/SerialTypes.cc b/src/threading/SerialTypes.cc index d3735e34f3..32569a5442 100644 --- a/src/threading/SerialTypes.cc +++ b/src/threading/SerialTypes.cc @@ -30,12 +30,6 @@ Value::~Value() && present ) delete val.string_val; - if ( type == TYPE_ADDR && present ) - delete val.addr_val; - - if ( type == TYPE_SUBNET && present ) - delete val.subnet_val; - if ( type == TYPE_TABLE && present ) { for ( int i = 0; i < val.set_val.size; i++ ) @@ -130,8 +124,8 @@ bool Value::Read(SerializationFormat* fmt) if ( ! (fmt->Read(&val.port_val.port, "port") && fmt->Read(&proto, "proto") ) ) { return false; } - - switch (proto) { + + switch ( proto ) { case 0: val.port_val.proto = TRANSPORT_UNKNOWN; break; @@ -147,20 +141,35 @@ bool Value::Read(SerializationFormat* fmt) default: return false; } + return true; } - - case TYPE_SUBNET: - { - val.subnet_val = new IPPrefix; - return fmt->Read(val.subnet_val, "subnet"); - } - case TYPE_ADDR: { - val.addr_val = new IPAddr; - return fmt->Read(val.addr_val, "addr"); + int family; + + if ( ! fmt->Read(&family, "addr-family") ) + return false; + + switch ( family ) { + case 4: + val.addr_val.family = IPv4; + return fmt->Read(&val.addr_val.in.in4, "addr-in4"); + + case 6: + val.addr_val.family = IPv6; + return fmt->Read(&val.addr_val.in.in6, "addr-in6"); + + } + + // Can't be reached. + abort(); + } + + case TYPE_SUBNET: + { + // FIXME. } case TYPE_DOUBLE: @@ -239,13 +248,27 @@ bool Value::Write(SerializationFormat* fmt) const return fmt->Write(val.uint_val, "uint"); case TYPE_PORT: - return fmt->Write(val.port_val.port, "port") && fmt->Write(val.port_val.proto, "proto"); + return fmt->Write(val.port_val.port, "port") && fmt->Write(val.port_val.proto, "proto"); case TYPE_SUBNET: - return fmt->Write(*val.subnet_val, "subnet"); + return false; // FIXME. case TYPE_ADDR: - return fmt->Write(*val.addr_val, "addr"); + { + switch ( val.addr_val.family ) { + case IPv4: + return fmt->Write((int)4, "addr-family") + && fmt->Write(val.addr_val.in.in4, "addr-in4"); + + case IPv6: + return fmt->Write((int)6, "addr-family") + && fmt->Write(val.addr_val.in.in6, "addr-in6"); + break; + } + + // Can't be reached. + abort(); + } case TYPE_DOUBLE: case TYPE_TIME: diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index c53ca37dc0..adff2035d7 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -2,10 +2,13 @@ #ifndef THREADING_SERIALIZATIONTYPES_H #define THREADING_SERIALIZATIONTYPES_H -#include "../RemoteSerializer.h" - using namespace std; +#include "Type.h" +#include "net_util.h" + +class SerializationFormat; + namespace threading { /** @@ -62,6 +65,16 @@ struct Value { typedef set_t vec_t; struct port_t { bro_uint_t port; TransportProto proto; }; + struct addr_t { + IPFamily family; + union { + struct in_addr in4; + struct in6_addr in6; + } in; + }; + + struct subnet_t { addr_t prefix; uint8_t length; }; + /** * This union is a subset of BroValUnion, including only the types we * can log directly. See IsCompatibleType(). @@ -73,8 +86,8 @@ struct Value { double double_val; set_t set_val; vec_t vector_val; - IPAddr* addr_val; - IPPrefix* subnet_val; + addr_t addr_val; + subnet_t subnet_val; string* string_val; } val; @@ -120,6 +133,7 @@ struct Value { static bool IsCompatibleType(BroType* t, bool atomic_only=false); private: +friend class ::IPAddr; Value(const Value& other) { } // Disabled. }; From 6a3d0147a89aed473c9151d72cced6cbbb76a8e0 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 29 Feb 2012 11:29:28 -0600 Subject: [PATCH 111/651] Fix compile failure after merge from master --- src/ICMP.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ICMP.cc b/src/ICMP.cc index 7883686b00..b2b8f092f1 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -270,7 +270,7 @@ TransportProto ICMP_Analyzer::GetContextProtocol(const IP_Hdr* ip_hdr, uint32* s RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) { - const IP_Hdr ip_hdr_data((const struct ip*) data); + const IP_Hdr ip_hdr_data((const struct ip*) data, false); const IP_Hdr* ip_hdr = &ip_hdr_data; uint32 ip_hdr_len = ip_hdr->HdrLen(); @@ -338,7 +338,7 @@ RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) { - const IP_Hdr ip_hdr_data((const struct ip6_hdr*) data); + const IP_Hdr ip_hdr_data((const struct ip6_hdr*) data, false); const IP_Hdr* ip_hdr = &ip_hdr_data; int DF = 0, MF = 0, bad_hdr_len = 0, bad_checksum = 0; TransportProto proto = TRANSPORT_UNKNOWN; From df874f0f6284ea8c916633754430a0e37f45c69b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 29 Feb 2012 14:32:29 -0800 Subject: [PATCH 112/651] A number of bugfixes for the recent threading updates. All tests pass now except one: scripts.base.frameworks.metrics.cluster-intermediate-update Couldn't figure out yet why that still fails. --- src/IPAddr.h | 6 ++--- src/SerializationFormat.cc | 15 +++++------ src/SerializationFormat.h | 12 ++++----- src/threading/Manager.cc | 7 +++-- src/threading/SerialTypes.cc | 51 +++++++++++++++++++++++++++++++----- 5 files changed, 63 insertions(+), 28 deletions(-) diff --git a/src/IPAddr.h b/src/IPAddr.h index 67d6f2112e..8e1921e07b 100644 --- a/src/IPAddr.h +++ b/src/IPAddr.h @@ -47,7 +47,7 @@ public: * * @param in6 The IPv6 address. */ - IPAddr(const in4_addr& in4) + explicit IPAddr(const in4_addr& in4) { memcpy(in6.s6_addr, v4_mapped_prefix, sizeof(v4_mapped_prefix)); memcpy(&in6.s6_addr[12], &in4.s_addr, sizeof(in4.s_addr)); @@ -58,7 +58,7 @@ public: * * @param in6 The IPv6 address. */ - IPAddr(const in6_addr& arg_in6) : in6(arg_in6) { } + explicit IPAddr(const in6_addr& arg_in6) : in6(arg_in6) { } /** * Constructs an address instance from a string representation. @@ -523,8 +523,6 @@ public: */ string AsString() const; - /** Converts the address into the type used internally by the inter-thread communicastion. - */ operator std::string() const { return AsString(); } /** diff --git a/src/SerializationFormat.cc b/src/SerializationFormat.cc index ef2dc80cd7..10dd4f29ea 100644 --- a/src/SerializationFormat.cc +++ b/src/SerializationFormat.cc @@ -295,7 +295,6 @@ bool BinarySerializationFormat::Read(struct in6_addr* addr, const char* tag) return true; } - bool BinarySerializationFormat::Write(char v, const char* tag) { DBG_LOG(DBG_SERIAL, "Write char %s [%s]", fmt_bytes(&v, 1), tag); @@ -389,10 +388,9 @@ bool BinarySerializationFormat::Write(const IPPrefix& prefix, const char* tag) return Write(prefix.Prefix(), "prefix") && Write(prefix.Length(), "width"); } -bool BinarySerializationFormat::Write(struct in_addr& addr, const char* tag) +bool BinarySerializationFormat::Write(const struct in_addr& addr, const char* tag) { - const uint32_t* bytes; - bytes = (uint32_t*) &addr.s_addr; + const uint32_t* bytes = (uint32_t*) &addr.s_addr; if ( ! Write(ntohl(bytes[0]), "addr4") ) return false; @@ -400,10 +398,9 @@ bool BinarySerializationFormat::Write(struct in_addr& addr, const char* tag) return true; } -bool BinarySerializationFormat::Write(struct in6_addr& addr, const char* tag) +bool BinarySerializationFormat::Write(const struct in6_addr& addr, const char* tag) { - const uint32_t* bytes; - bytes = (uint32_t*) &addr.s6_addr; + const uint32_t* bytes = (uint32_t*) &addr.s6_addr; for ( int i = 0; i < 4; ++i ) { @@ -620,13 +617,13 @@ bool XMLSerializationFormat::Write(const IPPrefix& prefix, const char* tag) return false; } -bool XMLSerializationFormat::Write(struct in_addr& addr, const char* tag) +bool XMLSerializationFormat::Write(const struct in_addr& addr, const char* tag) { reporter->InternalError("XML output of in_addr not implemented"); return false; } -bool XMLSerializationFormat::Write(struct in6_addr& addr, const char* tag) +bool XMLSerializationFormat::Write(const struct in6_addr& addr, const char* tag) { reporter->InternalError("XML output of in6_addr not implemented"); return false; diff --git a/src/SerializationFormat.h b/src/SerializationFormat.h index ba5ad195a2..f270b61bae 100644 --- a/src/SerializationFormat.h +++ b/src/SerializationFormat.h @@ -59,8 +59,8 @@ public: virtual bool Write(const string& s, const char* tag) = 0; virtual bool Write(const IPAddr& addr, const char* tag) = 0; virtual bool Write(const IPPrefix& prefix, const char* tag) = 0; - virtual bool Write(struct in_addr& addr, const char* tag) = 0; - virtual bool Write(struct in6_addr& addr, const char* tag) = 0; + virtual bool Write(const struct in_addr& addr, const char* tag) = 0; + virtual bool Write(const struct in6_addr& addr, const char* tag) = 0; virtual bool WriteOpenTag(const char* tag) = 0; virtual bool WriteCloseTag(const char* tag) = 0; @@ -118,8 +118,8 @@ public: virtual bool Write(const string& s, const char* tag); virtual bool Write(const IPAddr& addr, const char* tag); virtual bool Write(const IPPrefix& prefix, const char* tag); - virtual bool Write(struct in_addr& addr, const char* tag); - virtual bool Write(struct in6_addr& addr, const char* tag); + virtual bool Write(const struct in_addr& addr, const char* tag); + virtual bool Write(const struct in6_addr& addr, const char* tag); virtual bool WriteOpenTag(const char* tag); virtual bool WriteCloseTag(const char* tag); virtual bool WriteSeparator(); @@ -144,8 +144,8 @@ public: virtual bool Write(const string& s, const char* tag); virtual bool Write(const IPAddr& addr, const char* tag); virtual bool Write(const IPPrefix& prefix, const char* tag); - virtual bool Write(struct in_addr& addr, const char* tag); - virtual bool Write(struct in6_addr& addr, const char* tag); + virtual bool Write(const struct in_addr& addr, const char* tag); + virtual bool Write(const struct in6_addr& addr, const char* tag); virtual bool WriteOpenTag(const char* tag); virtual bool WriteCloseTag(const char* tag); virtual bool WriteSeparator(); diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index d008d2e5e8..24e100fe37 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -106,8 +106,11 @@ void Manager::Process() Message* msg = t->RetrieveOut(); - if ( msg->Process() && network_time ) - did_process = true; + if ( msg->Process() ) + { + if ( network_time ) + did_process = true; + } else { diff --git a/src/threading/SerialTypes.cc b/src/threading/SerialTypes.cc index 32569a5442..a5692b2ffd 100644 --- a/src/threading/SerialTypes.cc +++ b/src/threading/SerialTypes.cc @@ -147,7 +147,7 @@ bool Value::Read(SerializationFormat* fmt) case TYPE_ADDR: { - int family; + char family; if ( ! fmt->Read(&family, "addr-family") ) return false; @@ -169,7 +169,27 @@ bool Value::Read(SerializationFormat* fmt) case TYPE_SUBNET: { - // FIXME. + char length; + char family; + + if ( ! (fmt->Read(&length, "subnet-len") && fmt->Read(&family, "subnet-family")) ) + return false; + + switch ( family ) { + case 4: + val.subnet_val.length = (uint8_t)length; + val.subnet_val.prefix.family = IPv4; + return fmt->Read(&val.subnet_val.prefix.in.in4, "subnet-in4"); + + case 6: + val.subnet_val.length = (uint8_t)length; + val.subnet_val.prefix.family = IPv6; + return fmt->Read(&val.subnet_val.prefix.in.in6, "subnet-in6"); + + } + + // Can't be reached. + abort(); } case TYPE_DOUBLE: @@ -250,18 +270,15 @@ bool Value::Write(SerializationFormat* fmt) const case TYPE_PORT: return fmt->Write(val.port_val.port, "port") && fmt->Write(val.port_val.proto, "proto"); - case TYPE_SUBNET: - return false; // FIXME. - case TYPE_ADDR: { switch ( val.addr_val.family ) { case IPv4: - return fmt->Write((int)4, "addr-family") + return fmt->Write((char)4, "addr-family") && fmt->Write(val.addr_val.in.in4, "addr-in4"); case IPv6: - return fmt->Write((int)6, "addr-family") + return fmt->Write((char)6, "addr-family") && fmt->Write(val.addr_val.in.in6, "addr-in6"); break; } @@ -270,6 +287,26 @@ bool Value::Write(SerializationFormat* fmt) const abort(); } + case TYPE_SUBNET: + { + if ( ! fmt->Write((char)val.subnet_val.length, "subnet-length") ) + return false; + + switch ( val.subnet_val.prefix.family ) { + case IPv4: + return fmt->Write((char)4, "subnet-family") + && fmt->Write(val.subnet_val.prefix.in.in4, "subnet-in4"); + + case IPv6: + return fmt->Write((char)6, "subnet-family") + && fmt->Write(val.subnet_val.prefix.in.in6, "subnet-in6"); + break; + } + + // Can't be reached. + abort(); + } + case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: From 6df9004423dc0ea06aebe8201f4ca4bb937eda09 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 29 Feb 2012 16:06:54 -0800 Subject: [PATCH 113/651] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broccoli b/aux/broccoli index 3b63c3f1e7..4d2dde5573 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 3b63c3f1e7d915b1bda16862bfa4a8593ffc38f6 +Subproject commit 4d2dde55733ed86ea3f2db8df5b78b0bcfbb54c4 From 355c447698dd86fc6a7e78694318a911bb7ee3e4 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 29 Feb 2012 16:08:37 -0800 Subject: [PATCH 114/651] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broccoli b/aux/broccoli index 4d2dde5573..d6e36c95e0 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 4d2dde55733ed86ea3f2db8df5b78b0bcfbb54c4 +Subproject commit d6e36c95e0335f7cc081191c8612085bd12706f9 From 56dd7918d0475df364b47b6133745bbb598536fc Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 29 Feb 2012 16:46:21 -0800 Subject: [PATCH 115/651] Adding missing includes needed on FreeBSD. --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- src/threading/SerialTypes.h | 4 ++++ 5 files changed, 8 insertions(+), 4 deletions(-) diff --git a/aux/binpac b/aux/binpac index 43308aab47..3034da8f08 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 43308aab47a3357ca1885e1b6954154a2744d821 +Subproject commit 3034da8f082b61157e234237993ffd7a95be6e62 diff --git a/aux/bro-aux b/aux/bro-aux index 139cc2e1e0..f53bcb2b49 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 139cc2e1e049c4e1cc7e95f20866102be1d3d599 +Subproject commit f53bcb2b492cb0db3dd288384040abc2ab711767 diff --git a/aux/broccoli b/aux/broccoli index d6e36c95e0..2602eb53e7 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit d6e36c95e0335f7cc081191c8612085bd12706f9 +Subproject commit 2602eb53e70d7f0afae8fac58d7636b9291974a4 diff --git a/aux/broctl b/aux/broctl index e908ba686d..954538514d 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit e908ba686dceb56065bdf569c18dd0f67f662f6b +Subproject commit 954538514d71983e7ef3f0e109960466096e1c1d diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index adff2035d7..db7dc837bd 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -4,6 +4,10 @@ using namespace std; +#include +#include +#include + #include "Type.h" #include "net_util.h" From 6429d1248adf0caa275187d8e204b52638ab3332 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 1 Mar 2012 16:00:30 -0800 Subject: [PATCH 116/651] Prevent manager from busy looping. I saw this with the new threading code but I'm wondering if it also helps with the "high CPU usage with low traffiv volume" problem. --- src/Net.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Net.cc b/src/Net.cc index d93f1e1a85..2ebbfb20ed 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -486,6 +486,8 @@ void net_run() // since Bro timers are not high-precision anyway.) if ( ! using_communication ) usleep(100000); + else + usleep(1000); // Flawfinder says about usleep: // From 554a29b3edd5e1504c5e611c7b7ff9287abae330 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 1 Mar 2012 16:04:34 -0800 Subject: [PATCH 117/651] Preventing busy looping when no threads have been spawned. --- src/threading/Manager.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 24e100fe37..f5770e2dd8 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -10,7 +10,7 @@ Manager::Manager() did_process = true; next_beat = 0; terminating = false; - idle = false; + idle = true; } Manager::~Manager() @@ -58,6 +58,7 @@ void Manager::AddThread(BasicThread* thread) { DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name().c_str()); all_threads.push_back(thread); + idle = false; } void Manager::AddMsgThread(MsgThread* thread) From 6eb9f63e17845099e5c1137ba6e87b7f1821a700 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 2 Mar 2012 12:29:18 -0600 Subject: [PATCH 118/651] Add more icmpv6 events, and general code cleanup --- src/AnalyzerTags.h | 3 +- src/ICMP.cc | 59 ++++++++++---------- src/Sessions.cc | 18 +++---- src/Val.cc | 1 + src/Val.h | 1 - src/event.bif | 130 ++++++++++++++++++++++++++++++++++++++++++--- src/net_util.cc | 6 +-- 7 files changed, 164 insertions(+), 54 deletions(-) diff --git a/src/AnalyzerTags.h b/src/AnalyzerTags.h index 42d2f5626c..dc10a55f22 100644 --- a/src/AnalyzerTags.h +++ b/src/AnalyzerTags.h @@ -20,8 +20,7 @@ namespace AnalyzerTag { PIA_TCP, PIA_UDP, // Transport-layer analyzers. - ICMP, - TCP, UDP, + ICMP, TCP, UDP, // Application-layer analyzers (hand-written). BitTorrent, BitTorrentTracker, diff --git a/src/ICMP.cc b/src/ICMP.cc index b2b8f092f1..2fb1a9daab 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -52,10 +52,10 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, assert(caplen >= len); // Should have been caught earlier already. if ( ! ignore_checksums ) - { - int chksum = 0; + { + int chksum = 0; - switch ( ip->NextProto() ) + switch ( ip->NextProto() ) { case IPPROTO_ICMP: chksum = icmp_checksum(icmpp, len); @@ -69,11 +69,11 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, reporter->InternalError("unexpected IP proto in ICMP analyzer"); } - if ( chksum != 0xffff ) - { - Weird("bad_ICMP6_checksum"); - return; - } + if ( chksum != 0xffff ) + { + Weird("bad_ICMP_checksum"); + return; + } } Conn()->SetLastTime(current_timestamp); @@ -147,6 +147,8 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c break; // Router related messages. + case ND_NEIGHBOR_SOLICIT: + case ND_NEIGHBOR_ADVERT: case ND_REDIRECT: case ND_ROUTER_SOLICIT: case ICMP6_ROUTER_RENUMBERING: @@ -156,17 +158,9 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c #if 0 // Currently not specifically implemented. - case ICMP6_PARAM_PROB: case MLD_LISTENER_QUERY: case MLD_LISTENER_REPORT: case MLD_LISTENER_REDUCTION: - case ND_NEIGHBOR_SOLICIT: - case ND_NEIGHBOR_ADVERT: - case ND_REDIRECT: - case ICMP6_ROUTER_RENUMBERING: - case ND_NEIGHBOR_SOLICIT: - case ND_NEIGHBOR_ADVERT: - case ICMP6_TIME_EXCEEDED: #endif default: ICMPEvent(icmp_sent, icmpp, len, 1); @@ -221,7 +215,7 @@ TransportProto ICMP_Analyzer::GetContextProtocol(const IP_Hdr* ip_hdr, uint32* s case 1: proto = TRANSPORT_ICMP; break; case 6: proto = TRANSPORT_TCP; break; case 17: proto = TRANSPORT_UDP; break; - case 58: proto = TRANSPORT_ICMP; //TransportProto Hack // XXX What's this? + case 58: proto = TRANSPORT_ICMP; break; default: proto = TRANSPORT_UNKNOWN; break; } @@ -386,15 +380,8 @@ RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) iprec->Assign(0, id_val); iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); - //TransportProto Hack // XXX Likewise. - if ( ip_hdr->NextProto() == 58 || 17 ) //if the encap packet is ICMPv6 we force this... (cause there is no IGMP (by that name) for ICMPv6), rather ugly hack once more - { - iprec->Assign(2, new Val(58, TYPE_COUNT)); - } - else - { - iprec->Assign(2, new Val(proto, TYPE_COUNT)); - } + //if the encap packet is ICMPv6 we force this... (cause there is no IGMP (by that name) for ICMPv6), rather ugly hack once more + iprec->Assign(2, new Val(58, TYPE_COUNT)); iprec->Assign(3, new Val(bad_hdr_len, TYPE_BOOL)); @@ -509,12 +496,21 @@ void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, switch ( icmpp->icmp_type ) { + case ND_NEIGHBOR_ADVERT: + f = icmp_neighbor_advertisement; + break; + case ND_NEIGHBOR_SOLICIT: + f = icmp_neighbor_solicitation; + break; case ND_ROUTER_ADVERT: f = icmp_router_advertisement; break; - - case ND_REDIRECT: case ND_ROUTER_SOLICIT: + f = icmp_router_solicitation; + break; + case ND_REDIRECT: + f = icmp_redirect; + break; case ICMP6_ROUTER_RENUMBERING: default: ICMPEvent(icmp_sent, icmpp, len, 1); @@ -567,11 +563,14 @@ void ICMP_Analyzer::Context6(double t, const struct icmp* icmpp, case ICMP6_DST_UNREACH: f = icmp_unreachable; break; - case ICMP6_PARAM_PROB: + f = icmp_parameter_problem; + break; case ICMP6_TIME_EXCEEDED: + f = icmp_time_exceeded; + break; case ICMP6_PACKET_TOO_BIG: - f = icmp_error_message; + f = icmp_packet_too_big; break; } diff --git a/src/Sessions.cc b/src/Sessions.cc index cd9d7bb250..309e841a06 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -310,8 +310,6 @@ void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* ++num_packets_processed; - - uint32 caplen = hdr->caplen - hdr_size; if ( caplen < sizeof(struct ip) ) { @@ -446,7 +444,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, int proto = ip_hdr->NextProto(); if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP && - proto != IPPROTO_ICMP && proto != IPPROTO_ICMPV6) // Added ICMPV6, Matti + proto != IPPROTO_ICMP && proto != IPPROTO_ICMPV6) { dump_this_packet = 1; return; @@ -489,7 +487,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, caplen -= ip_hdr_len; uint32 min_hdr_len = (proto == IPPROTO_TCP) ? sizeof(struct tcphdr) : - (proto == IPPROTO_UDP ? sizeof(struct udphdr) : ICMP_MINLEN); //needs checking for ICMPV6?, Matti + (proto == IPPROTO_UDP ? sizeof(struct udphdr) : ICMP_MINLEN); if ( len < min_hdr_len ) { @@ -550,12 +548,11 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, break; } - case IPPROTO_ICMPV6: // new case, identical to ICMP, is this correct?? Matti + case IPPROTO_ICMPV6: { const struct icmp* icmpp = (const struct icmp *) data; id.src_port = icmpp->icmp_type; - //printf("TYPE: %d\n", id.src_port); //testing, Matti id.dst_port = ICMP6_counterpart(icmpp->icmp_type, icmpp->icmp_code, id.is_one_way); @@ -565,8 +562,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, d = &icmp_conns; break; - } + default: Weird(fmt("unknown_protocol %d", proto), hdr, pkt); return; @@ -735,13 +732,11 @@ Val* NetSessions::BuildHeader(const struct ip* ip) break; } - case IPPROTO_ICMPV6: //Added, Matti + case IPPROTO_ICMPV6: { const struct icmp* icmpp = (const struct icmp *) data; RecordVal* icmp_hdr = new RecordVal(icmp_hdr_type); - //printf("datalen:%d",data_len); //Testing, Matti - icmp_hdr->Assign(0, new Val(icmpp->icmp_type, TYPE_COUNT)); pkt_hdr->Assign(3, icmp_hdr); @@ -1065,7 +1060,7 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, case IPPROTO_UDP: tproto = TRANSPORT_UDP; break; - case IPPROTO_ICMPV6: //TransportProto Hack + case IPPROTO_ICMPV6: tproto = TRANSPORT_ICMP; break; default: @@ -1150,6 +1145,7 @@ bool NetSessions::IsLikelyServerPort(uint32 port, TransportProto proto) const port |= UDP_PORT_MASK; else if ( proto == TRANSPORT_ICMP ) port |= ICMP_PORT_MASK; + return port_cache.find(port) != port_cache.end(); } diff --git a/src/Val.cc b/src/Val.cc index e19fffa379..db6e9eb23a 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -788,6 +788,7 @@ PortVal::PortVal(uint32 p, TransportProto port_type) : Val(TYPE_PORT) case TRANSPORT_ICMP: p |= ICMP_PORT_MASK; break; + default: break; // "other" } diff --git a/src/Val.h b/src/Val.h index fe6164e48f..ae8c53308e 100644 --- a/src/Val.h +++ b/src/Val.h @@ -534,7 +534,6 @@ public: int IsUDP() const; int IsICMP() const; - TransportProto PortType() const { if ( IsTCP() ) diff --git a/src/event.bif b/src/event.bif index db7464db55..d4148a2611 100644 --- a/src/event.bif +++ b/src/event.bif @@ -798,6 +798,24 @@ event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, ## icmp_time_exceeded icmp_unreachable event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); +## Generated for all ICMP error messages that are not handled separately with dedicated +## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly +## with dedicated events. This handler acts as a fallback for those it doesn't. +## The *icmp* record provides more information about the message. +## +## See `Wikipedia +## `__ for more +## information about the ICMP protocol. +## +## c: The connection record for the corresponding ICMP flow. +## +## icmp: Additional ICMP-specific information augmenting the standard +## connection record *c*. +## +## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect +## icmp_time_exceeded icmp_unreachable +event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); + ## Generated for ICMP *destination unreachable* messages. ## ## See `Wikipedia @@ -821,13 +839,28 @@ event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, pa ## icmp_time_exceeded event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -event icmp_router_advertisement%(c: connection, icmp: icmp_conn%); - - - -event icmp6_placeholder%(c: connection, icmp: icmp_conn, ICMP6: bool%); - +## Generated for ICMP *packet too big* messages. +## +## See `Wikipedia +## `__ for more +## information about the ICMP protocol. +## +## c: The connection record for the corresponding ICMP flow. +## +## icmp: Additional ICMP-specific information augmenting the standard connection +## record *c*. +## +## code: The ICMP code of the *too big* message. +## +## context: A record with specifics of the original packet that the message refers +## to. *Too big* messages should include the original IP header from the packet +## that triggered them, and Bro parses that into the *context* structure. Note +## that if the *too big* includes only a partial IP header for some reason, no +## fields of *context* will be filled out. +## +## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent +## icmp_time_exceeded +event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); ## Generated for ICMP *time exceeded* messages. ## @@ -852,6 +885,89 @@ event icmp6_placeholder%(c: connection, icmp: icmp_conn, ICMP6: bool%); ## icmp_unreachable event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); +## Generated for ICMP *parameter problem* messages. +## +## See `Wikipedia +## `__ for more +## information about the ICMP protocol. +## +## c: The connection record for the corresponding ICMP flow. +## +## icmp: Additional ICMP-specific information augmenting the standard connection +## record *c*. +## +## code: The ICMP code of the *parameter problem* message. +## +## context: A record with specifics of the original packet that the message refers +## to. *Parameter problem* messages should include the original IP header from the packet +## that triggered them, and Bro parses that into the *context* structure. Note that +## if the *parameter problem* includes only a partial IP header for some reason, no fields +## of *context* will be filled out. +## +## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent +## icmp_unreachable +event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); + +## Generated for ICMP *router solicitation* messages. +## +## See `Wikipedia +## `__ for more +## information about the ICMP protocol. +## +## c: The connection record for the corresponding ICMP flow. +## +## icmp: Additional ICMP-specific information augmenting the standard connection +## record *c*. +## +## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent +## icmp_time_exceeded icmp_unreachable +event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); + +## Generated for ICMP *router advertisement* messages. +## +## See `Wikipedia +## `__ for more +## information about the ICMP protocol. +## +## c: The connection record for the corresponding ICMP flow. +## +## icmp: Additional ICMP-specific information augmenting the standard connection +## record *c*. +## +## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent +## icmp_time_exceeded icmp_unreachable +event icmp_router_advertisement%(c: connection, icmp: icmp_conn%); + +## Generated for ICMP *neighbor solicitation* messages. +## +## See `Wikipedia +## `__ for more +## information about the ICMP protocol. +## +## c: The connection record for the corresponding ICMP flow. +## +## icmp: Additional ICMP-specific information augmenting the standard connection +## record *c*. +## +## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent +## icmp_time_exceeded icmp_unreachable +event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn%); + +## Generated for ICMP *neighbor advertisement* messages. +## +## See `Wikipedia +## `__ for more +## information about the ICMP protocol. +## +## c: The connection record for the corresponding ICMP flow. +## +## icmp: Additional ICMP-specific information augmenting the standard connection +## record *c*. +## +## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent +## icmp_time_exceeded icmp_unreachable +event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn%); + ## Generated for ICMP *redirect* messages. ## ## See `Wikipedia diff --git a/src/net_util.cc b/src/net_util.cc index d66e56711f..a367429257 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -90,7 +90,7 @@ int udp_checksum(const struct ip* ip, const struct udphdr* up, int len) int udp6_checksum(const struct ip6_hdr* ip6, const struct udphdr* up, int len) { // UDP over IPv6 uses the same checksum function as over IPv4 but a - // different pseuod-header over which it is computed. + // different pseudo-header over which it is computed. uint32 sum; if ( len % 2 == 1 ) @@ -116,8 +116,8 @@ int udp6_checksum(const struct ip6_hdr* ip6, const struct udphdr* up, int len) int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) { - // ICMP6 uses the same checksum function as over ICMP4 but a different - // pseuod-header over which it is computed. + // ICMP6 uses the same checksum function as ICMP4 but a different + // pseudo-header over which it is computed. uint32 sum; if ( len % 2 == 1 ) From 9d1e51a91e162f6d8144aae094983db3dfc36bfa Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 2 Mar 2012 13:52:45 -0600 Subject: [PATCH 119/651] More code cleanup --- src/ICMP.cc | 15 ++++++++------- src/Val.h | 6 +++--- src/net_util.cc | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/ICMP.cc b/src/ICMP.cc index 2fb1a9daab..76b19398fa 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -67,6 +67,7 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, default: reporter->InternalError("unexpected IP proto in ICMP analyzer"); + break; } if ( chksum != 0xffff ) @@ -108,7 +109,7 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, void ICMP_Analyzer::NextICMP4(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr ) - { + { switch ( icmpp->icmp_type ) { case ICMP_ECHO: @@ -119,10 +120,11 @@ void ICMP_Analyzer::NextICMP4(double t, const struct icmp* icmpp, int len, int c case ICMP_UNREACH: case ICMP_TIMXCEED: Context4(t, icmpp, len, caplen, data, ip_hdr); - break; + break; default: - ICMPEvent(icmp_sent, icmpp, len, 0); break; + ICMPEvent(icmp_sent, icmpp, len, 0); + break; } } @@ -257,6 +259,7 @@ TransportProto ICMP_Analyzer::GetContextProtocol(const IP_Hdr* ip_hdr, uint32* s default: *src_port = *dst_port = ntohs(0); + break; } return proto; @@ -350,7 +353,6 @@ RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) src_addr = dst_addr = 0; src_port = dst_port = 0; } - else { ip_len = ip_hdr->TotalLen(); @@ -394,7 +396,6 @@ RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) return iprec; } - bool ICMP_Analyzer::IsReuse(double /* t */, const u_char* /* pkt */) { return 0; @@ -534,11 +535,11 @@ void ICMP_Analyzer::Context4(double t, const struct icmp* icmpp, { case ICMP_UNREACH: f = icmp_unreachable; - break; + break; case ICMP_TIMXCEED: f = icmp_error_message; - break; + break; } if ( f ) diff --git a/src/Val.h b/src/Val.h index ae8c53308e..64c19b2d92 100644 --- a/src/Val.h +++ b/src/Val.h @@ -509,9 +509,9 @@ protected: #define NUM_PORT_SPACES 4 #define PORT_SPACE_MASK 0x30000 -#define TCP_PORT_MASK 0x10000 -#define UDP_PORT_MASK 0x20000 -#define ICMP_PORT_MASK 0x30000 +#define TCP_PORT_MASK 0x10000 +#define UDP_PORT_MASK 0x20000 +#define ICMP_PORT_MASK 0x30000 typedef enum { diff --git a/src/net_util.cc b/src/net_util.cc index a367429257..856e351138 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -122,7 +122,7 @@ int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) if ( len % 2 == 1 ) // Add in pad byte. - sum += htons(((const u_char*) icmpp)[len - 1] << 8); + sum = htons(((const u_char*) icmpp)[len - 1] << 8); else sum = 0; From eb9f686bb20fc1fe5021cd0b92eea3b5a147a1cd Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 2 Mar 2012 20:01:01 -0600 Subject: [PATCH 120/651] Add handling for IPv6 extension header chains (addresses #531) - The script-layer 'pkt_hdr' type is extended with a new 'ip6' field representing the full IPv6 header chain. - The 'new_packet' event is now raised for IPv6 packets (addresses #523) - A new event called 'ipv6_ext_header' is raised for any IPv6 packet containing extension headers. - A new event called 'esp_packet' is raised for any packets using ESP ('new_packet' and 'ipv6_ext_header' events provide connection info, but that info can't be provided here since the upper-layer payload is encrypted). - The 'unknown_protocol' weird is now raised more reliably when Bro sees a transport protocol or IPv6 extension header it can't handle. (addresses #522) Still need to do IPv6 fragment reassembly and needs more testing. --- scripts/base/init-bare.bro | 156 ++++++++++++++++++++- src/CMakeLists.txt | 1 + src/Frag.cc | 13 +- src/Frag.h | 5 +- src/IP.cc | 273 +++++++++++++++++++++++++++++++++++++ src/IP.h | 196 +++++++++++++++++++++++--- src/PacketSort.cc | 2 +- src/Sessions.cc | 151 ++++++++++---------- src/Sessions.h | 10 +- src/TCP.cc | 6 +- src/event.bif | 21 ++- 11 files changed, 724 insertions(+), 110 deletions(-) create mode 100644 src/IP.cc diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index c4ae4b134a..91c6b7856d 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -939,11 +939,154 @@ const IPPROTO_IGMP = 2; ##< Group management protocol. const IPPROTO_IPIP = 4; ##< IP encapsulation in IP. const IPPROTO_TCP = 6; ##< TCP. const IPPROTO_UDP = 17; ##< User datagram protocol. +const IPPROTO_IPV6 = 41; ##< IPv6 header. const IPPROTO_RAW = 255; ##< Raw IP packet. -## Values extracted from an IP header. +# Definitions for IPv6 extension headers. +const IPPROTO_HOPOPTS = 0; ##< IPv6 hop-by-hop-options header. +const IPPROTO_ROUTING = 43; ##< IPv6 routing header. +const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. +const IPPROTO_ESP = 50; ##< IPv6 encapsulating security payload header. +const IPPROTO_AH = 51; ##< IPv6 authentication header. +const IPPROTO_NONE = 59; ##< IPv6 no next header. +const IPPROTO_DSTOPTS = 60; ##< IPv6 destination options header. + +## Values extracted from an IPv6 header. ## -## .. bro:see:: pkt_hdr discarder_check_ip +## .. bro:see:: pkt_hdr ip_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts ip6_routing +## ip6_fragment ip6_ah ip6_esp +type ip6_hdr: record { + class: count; ##< Traffic class. + flow: count; ##< Flow label. + len: count; ##< Payload length. + nxt: count; ##< Next header (RFC 1700 assigned number). + hlim: count; ##< Hop limit. + src: addr; ##< Source address. + dst: addr; ##< Destination address. +}; + +## Values extracted from an IPv6 extension header's (e.g. hop-by-hop or +## destination option headers) option field. +## +## .. bro:see:: ip6_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts +type ip6_option: record { + otype: count; ##< Option type. + len: count; ##< Option data length. + data: string; ##< Option data. +}; + +## Values extracted from an IPv6 Hop-by-Hop options extension header. +## +## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain ip6_option +type ip6_hopopts: record { + ## Next header (RFC 1700 assigned number). + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## The TLV encoded options; + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Destination options extension header. +## +## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain ip6_option +type ip6_dstopts: record { + ## Next header (RFC 1700 assigned number). + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## The TLV encoded options; + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Routing extension header. +## +## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +type ip6_routing: record { + ## Next header (RFC 1700 assigned number). + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## Routing type. + rtype: count; + ## Segments left. + segleft: count; + ## Type-specific data. + data: string; +}; + +## Values extracted from an IPv6 Fragment extension header. +## +## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +type ip6_fragment: record { + ## Next header (RFC 1700 assigned number). + nxt: count; + ## 8-bit reserved field. + rsv1: count; + ## Fragmentation offset. + offset: count; + ## 2-bit reserved field. + rsv2: count; + ## More fragments. + more: bool; + ## Fragment identification. + id: count; +}; + +## Values extracted from an IPv6 Authentication extension header. +## +## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +type ip6_ah: record { + ## Next header (RFC 1700 assigned number). + nxt: count; + ## Length of header in 4-octet units, excluding first two units. + len: count; + ## Reserved field. + rsv: count; + ## Security Parameter Index. + spi: count; + ## Sequence number. + seq: count; + ## Authentication data. + data: string; +}; + +## Values extracted from an IPv6 ESP extension header. +## +## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +type ip6_esp: record { + ## Security Parameters Index. + spi: count; + ## Sequence number. + seq: count; +}; + +## An IPv6 header chain. +## +## .. bro:see:: pkt_hdr ip_hdr +type ip6_hdr_chain: record { + ## The main IPv6 header. + hdr: ip6_hdr; + ## Hop-by-hop option extension header. + hopopts: vector of ip6_hopopts; + ## Destination option extension headers. + dstopts: vector of ip6_dstopts; + ## Routing extension headers. + routing: vector of ip6_routing; + ## Fragment headers. + fragment: vector of ip6_fragment; + ## Authentication extension headers. + ah: vector of ip6_ah; + ## Encapsulating security payload headers. + esp: vector of ip6_esp; + + ## Order of extension headers identified by RFC 1700 assigned numbers. + ext_order: vector of count; +}; + +## Values extracted from an IPv4 header. +## +## .. bro:see:: pkt_hdr ip6_hdr discarder_check_ip type ip_hdr: record { hl: count; ##< Header length in bytes. tos: count; ##< Type of service. @@ -1000,10 +1143,11 @@ type icmp_hdr: record { ## ## .. bro:see:: new_packet type pkt_hdr: record { - ip: ip_hdr; ##< The IP header. - tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet. - udp: udp_hdr &optional; ##< The UDP header if a UDP packet. - icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. + ip: ip_hdr &optional; ##< The IPv4 header if an IPv4 packet. + ip6: ip6_hdr_chain &optional; ##< The IPv6 header chain if an IPv6 packet. + tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet. + udp: udp_hdr &optional; ##< The UDP header if a UDP packet. + icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. }; ## Definition of "secondary filters". A secondary filter is a BPF filter given as diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d51211f0d1..26807a184f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -330,6 +330,7 @@ set(bro_SRCS IntSet.cc InterConn.cc IOSource.cc + IP.cc IPAddr.cc IRC.cc List.cc diff --git a/src/Frag.cc b/src/Frag.cc index 21abc324f8..b5c5e371d4 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -27,7 +27,7 @@ void FragTimer::Dispatch(double t, int /* is_expire */) FragReassembler::FragReassembler(NetSessions* arg_s, const IP_Hdr* ip, const u_char* pkt, - uint32 frag_field, HashKey* k, double t) + HashKey* k, double t) : Reassembler(0, ip->DstAddr(), REASSEM_IP) { s = arg_s; @@ -41,7 +41,7 @@ FragReassembler::FragReassembler(NetSessions* arg_s, reassembled_pkt = 0; frag_size = 0; // flag meaning "not known" - AddFragment(t, ip, pkt, frag_field); + AddFragment(t, ip, pkt); if ( frag_timeout != 0.0 ) { @@ -60,8 +60,7 @@ FragReassembler::~FragReassembler() delete key; } -void FragReassembler::AddFragment(double t, const IP_Hdr* ip, const u_char* pkt, - uint32 frag_field) +void FragReassembler::AddFragment(double t, const IP_Hdr* ip, const u_char* pkt) { const struct ip* ip4 = ip->IP4_Hdr(); @@ -72,16 +71,16 @@ void FragReassembler::AddFragment(double t, const IP_Hdr* ip, const u_char* pkt, // attack. s->Weird("fragment_protocol_inconsistency", ip); - if ( frag_field & 0x4000 ) + if ( ip->DF() ) // Linux MTU discovery for UDP can do this, for example. s->Weird("fragment_with_DF", ip); - int offset = (ntohs(ip4->ip_off) & 0x1fff) * 8; + int offset = ip->FragOffset(); int len = ntohs(ip4->ip_len); int hdr_len = proto_hdr->ip_hl * 4; int upper_seq = offset + len - hdr_len; - if ( (frag_field & 0x2000) == 0 ) + if ( ! ip->MF() ) { // Last fragment. if ( frag_size == 0 ) diff --git a/src/Frag.h b/src/Frag.h index 92bf1b3bbd..4c9886faa2 100644 --- a/src/Frag.h +++ b/src/Frag.h @@ -20,11 +20,10 @@ typedef void (FragReassembler::*frag_timer_func)(double t); class FragReassembler : public Reassembler { public: FragReassembler(NetSessions* s, const IP_Hdr* ip, const u_char* pkt, - uint32 frag_field, HashKey* k, double t); + HashKey* k, double t); ~FragReassembler(); - void AddFragment(double t, const IP_Hdr* ip, const u_char* pkt, - uint32 frag_field); + void AddFragment(double t, const IP_Hdr* ip, const u_char* pkt); void Expire(double t); void DeleteTimer(); diff --git a/src/IP.cc b/src/IP.cc new file mode 100644 index 0000000000..826ae544f6 --- /dev/null +++ b/src/IP.cc @@ -0,0 +1,273 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "IP.h" +#include "Type.h" +#include "Val.h" +#include "Var.h" + +static RecordType* ip_hdr_type = 0; +static RecordType* ip6_hdr_type = 0; +static RecordType* ip6_hdr_chain_type = 0; +static RecordType* ip6_option_type = 0; +static RecordType* ip6_hopopts_type = 0; +static RecordType* ip6_dstopts_type = 0; +static RecordType* ip6_routing_type = 0; +static RecordType* ip6_fragment_type = 0; +static RecordType* ip6_ah_type = 0; +static RecordType* ip6_esp_type = 0; + +static inline RecordType* hdrType(RecordType*& type, const char* name) + { + if ( ! type ) type = internal_type(name)->AsRecordType(); + return type; + } + +RecordVal* IPv6_Hdr::BuildRecordVal() const + { + RecordVal* rv = new RecordVal(hdrType(ip6_hdr_type, "ip6_hdr")); + const struct ip6_hdr* ip6 = (const struct ip6_hdr*)data; + rv->Assign(0, new Val(ntohl(ip6->ip6_flow) & 0x0ff00000, TYPE_COUNT)); + rv->Assign(1, new Val(ntohl(ip6->ip6_flow) & 0x000fffff, TYPE_COUNT)); + rv->Assign(2, new Val(ntohs(ip6->ip6_plen), TYPE_COUNT)); + rv->Assign(3, new Val(ip6->ip6_nxt, TYPE_COUNT)); + rv->Assign(4, new Val(ip6->ip6_hlim, TYPE_COUNT)); + rv->Assign(5, new AddrVal(ip6->ip6_src)); + rv->Assign(6, new AddrVal(ip6->ip6_dst)); + return rv; + } + +static VectorVal* BuildOptionsVal(const u_char* data, uint16 len) + { + VectorVal* vv = new VectorVal(new VectorType(ip6_option_type->Ref())); + while ( len > 0 ) + { + const struct ip6_opt* opt = (const struct ip6_opt*) data; + RecordVal* rv = new RecordVal(hdrType(ip6_option_type, "ip6_option")); + rv->Assign(0, new Val(opt->ip6o_type, TYPE_COUNT)); + rv->Assign(1, new Val(opt->ip6o_len, TYPE_COUNT)); + uint16 off = 2 * sizeof(uint8); + rv->Assign(2, new StringVal( + new BroString(data + off, opt->ip6o_len - off, 1))); + data += opt->ip6o_len + off; + len -= opt->ip6o_len + off; + vv->Assign(vv->Size(), rv, 0); + } + return vv; + } + +RecordVal* IPv6_HopOpts::BuildRecordVal() const + { + RecordVal* rv = new RecordVal(hdrType(ip6_hopopts_type, "ip6_hopopts")); + const struct ip6_hbh* hbh = (const struct ip6_hbh*)data; + rv->Assign(0, new Val(hbh->ip6h_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(hbh->ip6h_len, TYPE_COUNT)); + uint16 off = 2 * sizeof(uint8); + rv->Assign(2, BuildOptionsVal(data + off, Length() - off)); + return rv; + } + +RecordVal* IPv6_DstOpts::BuildRecordVal() const + { + RecordVal* rv = new RecordVal(hdrType(ip6_dstopts_type, "ip6_dstopts")); + const struct ip6_dest* dst = (const struct ip6_dest*)data; + rv->Assign(0, new Val(dst->ip6d_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(dst->ip6d_len, TYPE_COUNT)); + uint16 off = 2 * sizeof(uint8); + rv->Assign(2, BuildOptionsVal(data + off, Length() - off)); + return rv; + } + +RecordVal* IPv6_Routing::BuildRecordVal() const + { + RecordVal* rv = new RecordVal(hdrType(ip6_routing_type, "ip6_routing")); + const struct ip6_rthdr* rt = (const struct ip6_rthdr*)data; + rv->Assign(0, new Val(rt->ip6r_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(rt->ip6r_len, TYPE_COUNT)); + rv->Assign(2, new Val(rt->ip6r_type, TYPE_COUNT)); + rv->Assign(3, new Val(rt->ip6r_segleft, TYPE_COUNT)); + uint16 off = 4 * sizeof(uint8); + rv->Assign(4, new StringVal(new BroString(data + off, Length() - off, 1))); + return rv; + } + +RecordVal* IPv6_Fragment::BuildRecordVal() const + { + RecordVal* rv = new RecordVal(hdrType(ip6_fragment_type, "ip6_fragment")); + const struct ip6_frag* frag = (const struct ip6_frag*)data; + rv->Assign(0, new Val(frag->ip6f_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(frag->ip6f_reserved, TYPE_COUNT)); + rv->Assign(2, new Val(ntohs(frag->ip6f_offlg) & 0xfff8, TYPE_COUNT)); + rv->Assign(3, new Val(ntohs(frag->ip6f_offlg) & 0x0006, TYPE_COUNT)); + rv->Assign(4, new Val(ntohs(frag->ip6f_offlg) & 0x0001, TYPE_BOOL)); + rv->Assign(5, new Val(ntohl(frag->ip6f_ident), TYPE_COUNT)); + return rv; + } + +RecordVal* IPv6_AH::BuildRecordVal() const + { + RecordVal* rv = new RecordVal(hdrType(ip6_ah_type, "ip6_ah")); + rv->Assign(0, new Val(((ip6_ext*)data)->ip6e_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(((ip6_ext*)data)->ip6e_len, TYPE_COUNT)); + rv->Assign(2, new Val(ntohs(((uint16*)data)[1]), TYPE_COUNT)); + rv->Assign(3, new Val(ntohl(((uint32*)data)[1]), TYPE_COUNT)); + rv->Assign(4, new Val(ntohl(((uint32*)data)[2]), TYPE_COUNT)); + uint16 off = 3 * sizeof(uint32); + rv->Assign(5, new StringVal(new BroString(data + off, Length() - off, 1))); + return rv; + } + +RecordVal* IPv6_ESP::BuildRecordVal() const + { + RecordVal* rv = new RecordVal(hdrType(ip6_esp_type, "ip6_esp")); + const uint32* esp = (const uint32*)data; + rv->Assign(0, new Val(ntohl(esp[0]), TYPE_COUNT)); + rv->Assign(1, new Val(ntohl(esp[1]), TYPE_COUNT)); + return rv; + } + +RecordVal* IP_Hdr::BuildRecordVal() const + { + RecordVal* rval = 0; + + if ( ! ip_hdr_type ) + { + ip_hdr_type = internal_type("ip_hdr")->AsRecordType(); + ip6_hdr_type = internal_type("ip6_hdr")->AsRecordType(); + ip6_hdr_chain_type = internal_type("ip6_hdr_chain")->AsRecordType(); + ip6_hopopts_type = internal_type("ip6_hopopts")->AsRecordType(); + ip6_dstopts_type = internal_type("ip6_dstopts")->AsRecordType(); + ip6_routing_type = internal_type("ip6_routing")->AsRecordType(); + ip6_fragment_type = internal_type("ip6_fragment")->AsRecordType(); + ip6_ah_type = internal_type("ip6_ah")->AsRecordType(); + ip6_esp_type = internal_type("ip6_esp")->AsRecordType(); + } + + if ( ip4 ) + { + rval = new RecordVal(ip_hdr_type); + rval->Assign(0, new Val(ip4->ip_hl * 4, TYPE_COUNT)); + rval->Assign(1, new Val(ip4->ip_tos, TYPE_COUNT)); + rval->Assign(2, new Val(ntohs(ip4->ip_len), TYPE_COUNT)); + rval->Assign(3, new Val(ntohs(ip4->ip_id), TYPE_COUNT)); + rval->Assign(4, new Val(ip4->ip_ttl, TYPE_COUNT)); + rval->Assign(5, new Val(ip4->ip_p, TYPE_COUNT)); + rval->Assign(6, new AddrVal(ip4->ip_src.s_addr)); + rval->Assign(7, new AddrVal(ip4->ip_dst.s_addr)); + } + else + { + rval = new RecordVal(ip6_hdr_chain_type); + + VectorVal* hopopts = new VectorVal(new VectorType(ip6_hopopts_type->Ref())); + VectorVal* dstopts = new VectorVal(new VectorType(ip6_dstopts_type->Ref())); + VectorVal* routing = new VectorVal(new VectorType(ip6_routing_type->Ref())); + VectorVal* fragment = new VectorVal(new VectorType(ip6_fragment_type->Ref())); + VectorVal* ah = new VectorVal(new VectorType(ip6_ah_type->Ref())); + VectorVal* esp = new VectorVal(new VectorType(ip6_esp_type->Ref())); + VectorVal* order = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + + for ( size_t i = 1; i < ip6_hdrs->Size(); ++i ) + { + RecordVal* v = ((*ip6_hdrs)[i])->BuildRecordVal(); + uint8 type = ((*ip6_hdrs)[i])->Type(); + switch (type) { + case IPPROTO_HOPOPTS: + hopopts->Assign(hopopts->Size(), v, 0); + break; + case IPPROTO_ROUTING: + routing->Assign(routing->Size(), v, 0); + break; + case IPPROTO_DSTOPTS: + dstopts->Assign(dstopts->Size(), v, 0); + break; + case IPPROTO_FRAGMENT: + fragment->Assign(fragment->Size(), v, 0); + break; + case IPPROTO_AH: + ah->Assign(ah->Size(), v, 0); + break; + case IPPROTO_ESP: + esp->Assign(esp->Size(), v, 0); + break; + case IPPROTO_IPV6: + default: + reporter->InternalError("pkt_hdr assigned bad header %d", type); + break; + } + order->Assign(i, new Val(type, TYPE_COUNT), 0); + } + + rval->Assign(0, ((*ip6_hdrs)[0])->BuildRecordVal()); + rval->Assign(1, hopopts); + rval->Assign(2, dstopts); + rval->Assign(3, routing); + rval->Assign(4, fragment); + rval->Assign(5, ah); + rval->Assign(6, esp); + rval->Assign(7, order); + } + + return rval; + } + +static inline IPv6_Hdr* getIPv6Header(uint8 type, const u_char* d) + { + switch (type) { + case IPPROTO_IPV6: + return new IPv6_Hdr(d); + case IPPROTO_HOPOPTS: + return new IPv6_HopOpts(d); + case IPPROTO_ROUTING: + return new IPv6_Routing(d); + case IPPROTO_DSTOPTS: + return new IPv6_DstOpts(d); + case IPPROTO_FRAGMENT: + return new IPv6_Fragment(d); + case IPPROTO_AH: + return new IPv6_AH(d); + case IPPROTO_ESP: + return new IPv6_ESP(d); + default: + // should never get here if calls are protected by isIPv6ExtHeader() + reporter->InternalError("Unknown IPv6 header type: %d", type); + break; + } + // can't be reached + assert(false); + return 0; + } + +static inline bool isIPv6ExtHeader(uint8 type) + { + switch (type) { + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: + case IPPROTO_FRAGMENT: + case IPPROTO_AH: + case IPPROTO_ESP: + return true; + default: + return false; + } + } + +IPv6_Hdr_Chain::IPv6_Hdr_Chain(const struct ip6_hdr* ip6) + { + length = 0; + uint8 current_type, next_type; + next_type = IPPROTO_IPV6; + const u_char* hdrs = (const u_char*) ip6; + + do + { + current_type = next_type; + chain.push_back(getIPv6Header(current_type, hdrs)); + next_type = chain[chain.size()-1]->NextHdr(); + uint16 len = chain[chain.size()-1]->Length(); + hdrs += len; + length += len; + } while ( current_type != IPPROTO_FRAGMENT && + current_type != IPPROTO_ESP && + isIPv6ExtHeader(next_type) ); + } diff --git a/src/IP.h b/src/IP.h index 36e8634912..09640f47b9 100644 --- a/src/IP.h +++ b/src/IP.h @@ -4,8 +4,139 @@ #define ip_h #include "config.h" +#include "net_util.h" #include "IPAddr.h" -#include +#include "Reporter.h" +#include "Val.h" +#include "Type.h" +#include +#include +#include +#include + +/** + * Base class for IPv6 header/extensions. + */ +class IPv6_Hdr { +public: + IPv6_Hdr() : type(0), data(0) {} + + /** + * Construct the main IPv6 header. + */ + IPv6_Hdr(const u_char* d) : type(IPPROTO_IPV6), data(d) {} + + /** + * Construct an IPv6 header or extension header from assigned type number. + */ + IPv6_Hdr(uint8 t, const u_char* d) : type(t), data(d) {} + + virtual ~IPv6_Hdr() {} + + /** + * Returns the assigned IPv6 extension header type number of the header + * that immediately follows this one. + */ + virtual uint8 NextHdr() const { return ((ip6_hdr*)data)->ip6_nxt; } + + /** + * Returns the length of the header in bytes. + */ + virtual uint16 Length() const { return 40; } + + /** + * Returns the RFC 1700 assigned number indicating the header type. + */ + uint8 Type() const { return type; } + + /** + * Returns the script-layer record representation of the header. + */ + virtual RecordVal* BuildRecordVal() const; + +protected: + uint8 type; + const u_char* data; +}; + +class IPv6_HopOpts : public IPv6_Hdr { +public: + IPv6_HopOpts(const u_char* d) : IPv6_Hdr(IPPROTO_HOPOPTS, d) {} + uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } + RecordVal* BuildRecordVal() const; +}; + +class IPv6_DstOpts : public IPv6_Hdr { +public: + IPv6_DstOpts(const u_char* d) : IPv6_Hdr(IPPROTO_DSTOPTS, d) {} + uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } + RecordVal* BuildRecordVal() const; +}; + +class IPv6_Routing : public IPv6_Hdr { +public: + IPv6_Routing(const u_char* d) : IPv6_Hdr(IPPROTO_ROUTING, d) {} + uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } + RecordVal* BuildRecordVal() const; +}; + +class IPv6_Fragment : public IPv6_Hdr { +public: + IPv6_Fragment(const u_char* d) : IPv6_Hdr(IPPROTO_FRAGMENT, d) {} + uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + uint16 Length() const { return 8; } + RecordVal* BuildRecordVal() const; +}; + +class IPv6_AH : public IPv6_Hdr { +public: + IPv6_AH(const u_char* d) : IPv6_Hdr(IPPROTO_AH, d) {} + uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + uint16 Length() const { return 8 + 4 * ((ip6_ext*)data)->ip6e_len; } + RecordVal* BuildRecordVal() const; +}; + +class IPv6_ESP : public IPv6_Hdr { +public: + IPv6_ESP(const u_char* d) : IPv6_Hdr(IPPROTO_ESP, d) {} + uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + // encrypted payload begins after 8 bytes + uint16 Length() const { return 8; } + RecordVal* BuildRecordVal() const; +}; + +class IPv6_Hdr_Chain { +public: + /** + * Initializes the header chain from an IPv6 header structure. + */ + IPv6_Hdr_Chain(const struct ip6_hdr* ip6); + + ~IPv6_Hdr_Chain() + { for ( size_t i = 0; i < chain.size(); ++i ) delete chain[i]; } + + /** + * Returns the number of headers in the chain. + */ + size_t Size() const { return chain.size(); } + + /** + * Returns the sum of the length of all headers in the chain in bytes. + */ + uint16 TotalLength() const { return length; } + + /** + * Accesses the header at the given location in the chain. + */ + const IPv6_Hdr* operator[](const size_t i) const { return chain[i]; } + +protected: + vector chain; + uint16 length; // The summation of all header lengths in the chain in bytes. +}; class IP_Hdr { public: @@ -17,10 +148,12 @@ public: IP_Hdr(const struct ip6_hdr* arg_ip6, bool arg_del) : ip4(0), ip6(arg_ip6), del(arg_del) { + ip6_hdrs = new IPv6_Hdr_Chain(ip6); } ~IP_Hdr() { + if ( ip6 ) delete ip6_hdrs; if ( del ) { if ( ip4 ) @@ -30,23 +163,23 @@ public: } } + //TODO: audit usages of this for correct IPv6 support or IPv4 assumptions const struct ip* IP4_Hdr() const { return ip4; } + const struct ip6_hdr* IP6_Hdr() const { return ip6; } IPAddr SrcAddr() const { return ip4 ? IPAddr(ip4->ip_src) : IPAddr(ip6->ip6_src); } + IPAddr DstAddr() const { return ip4 ? IPAddr(ip4->ip_dst) : IPAddr(ip6->ip6_dst); } - //TODO: needs adapting/replacement for IPv6 support - uint16 ID4() const { return ip4 ? ip4->ip_id : 0; } - const u_char* Payload() const { if ( ip4 ) return ((const u_char*) ip4) + ip4->ip_hl * 4; else - return ((const u_char*) ip6) + 40; + return ((const u_char*) ip6) + ip6_hdrs->TotalLength(); } uint16 PayloadLen() const @@ -54,33 +187,60 @@ public: if ( ip4 ) return ntohs(ip4->ip_len) - ip4->ip_hl * 4; else - return ntohs(ip6->ip6_plen); + return ntohs(ip6->ip6_plen) - ip6_hdrs->TotalLength(); } uint16 TotalLen() const - { - if ( ip4 ) - return ntohs(ip4->ip_len); - else - return ntohs(ip6->ip6_plen) + 40; - } + { return ip4 ? ntohs(ip4->ip_len) : ntohs(ip6->ip6_plen) + 40; } + + uint16 HdrLen() const + { return ip4 ? ip4->ip_hl * 4 : ip6_hdrs->TotalLength(); } + + uint8 LastHeader() const + { return ip4 ? IPPROTO_RAW : + ((*ip6_hdrs)[ip6_hdrs->Size()-1])->Type(); } - uint16 HdrLen() const { return ip4 ? ip4->ip_hl * 4 : 40; } unsigned char NextProto() const - { return ip4 ? ip4->ip_p : ip6->ip6_nxt; } + { return ip4 ? ip4->ip_p : + ((*ip6_hdrs)[ip6_hdrs->Size()-1])->NextHdr(); } + unsigned char TTL() const { return ip4 ? ip4->ip_ttl : ip6->ip6_hlim; } + + //TODO: check for IPv6 Fragment ext. header + bool IsFragment() const + { return ip4 ? (ntohs(ip4->ip_off) & 0x3fff) != 0 : false; } + + //TODO: check for IPv6 Fragment ext. header + uint16 FragOffset() const + { return ip4 ? (ntohs(ip4->ip_off) & 0x1fff) * 8 : 0; } + + //TODO: check for IPv6 Fragment ext. header uint16 FragField() const - { return ntohs(ip4 ? ip4->ip_off : 0); } + { return ip4 ? ntohs(ip4->ip_off) : 0; } + + //TODO: check for IPv6 Fragment ext. header + uint16 ID() const + { return ip4 ? ntohs(ip4->ip_id) : 0; } + + //TODO: check for IPv6 Fragment ext. header + int MF() const + { return ip4 ? (ntohs(ip4->ip_off) & 0x2000) != 0 : 0; } + + // IPv6 has no "Don't Fragment" flag. int DF() const - { return ip4 ? ((ntohs(ip4->ip_off) & IP_DF) != 0) : 0; } - uint16 IP_ID() const - { return ip4 ? (ntohs(ip4->ip_id)) : 0; } + { return ip4 ? ((ntohs(ip4->ip_off) & 0x4000) != 0) : 0; } + + size_t NumHeaders() const + { return ip4 ? 1 : ip6_hdrs->Size(); } + + RecordVal* BuildRecordVal() const; private: const struct ip* ip4; const struct ip6_hdr* ip6; bool del; + IPv6_Hdr_Chain* ip6_hdrs; }; #endif diff --git a/src/PacketSort.cc b/src/PacketSort.cc index d0e04a37ea..7bfdaba9a0 100644 --- a/src/PacketSort.cc +++ b/src/PacketSort.cc @@ -33,7 +33,7 @@ PacketSortElement::PacketSortElement(PktSrc* arg_src, if ( ip_hdr->NextProto() == IPPROTO_TCP && // Note: can't sort fragmented packets - (ip_hdr->FragField() & 0x3fff) == 0 ) + ( ! ip_hdr->IsFragment() ) ) { tcp_offset = hdr_size + ip_hdr->HdrLen(); if ( caplen >= tcp_offset + sizeof(struct tcphdr) ) diff --git a/src/Sessions.cc b/src/Sessions.cc index 04b877dfe0..e8cece9e46 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -332,7 +332,8 @@ void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* StringVal* cmd_val = new StringVal(sp->Event()->Filter()); args->append(cmd_val); - args->append(BuildHeader(ip)); + IP_Hdr ip_hdr(ip, false); + args->append(BuildHeader(&ip_hdr)); // ### Need to queue event here. try { @@ -400,18 +401,6 @@ int NetSessions::CheckConnectionTag(Connection* conn) return 1; } - -static bool looks_like_IPv4_packet(int len, const struct ip* ip_hdr) - { - if ( (unsigned int) len < sizeof(struct ip) ) - return false; - - if ( ip_hdr->ip_v == 4 && ntohs(ip_hdr->ip_len) == len ) - return true; - else - return false; - } - void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, int hdr_size) @@ -441,18 +430,9 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( discarder && discarder->NextPacket(ip_hdr, len, caplen) ) return; - int proto = ip_hdr->NextProto(); - if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP && - proto != IPPROTO_ICMP ) - { - dump_this_packet = 1; - return; - } - FragReassembler* f = 0; - uint32 frag_field = ip_hdr->FragField(); - if ( (frag_field & 0x3fff) != 0 ) + if ( ip_hdr->IsFragment() ) { dump_this_packet = 1; // always record fragments @@ -463,12 +443,12 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, // Don't try to reassemble, that's doomed. // Discard all except the first fragment (which // is useful in analyzing header-only traces) - if ( (frag_field & 0x1fff) != 0 ) + if ( ip_hdr->FragOffset() != 0 ) return; } else { - f = NextFragment(t, ip_hdr, pkt + hdr_size, frag_field); + f = NextFragment(t, ip_hdr, pkt + hdr_size); const IP_Hdr* ih = f->ReassembledPkt(); if ( ! ih ) // It didn't reassemble into anything yet. @@ -485,21 +465,24 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, len -= ip_hdr_len; // remove IP header caplen -= ip_hdr_len; - uint32 min_hdr_len = (proto == IPPROTO_TCP) ? sizeof(struct tcphdr) : - (proto == IPPROTO_UDP ? sizeof(struct udphdr) : ICMP_MINLEN); - - if ( len < min_hdr_len ) + if ( ip_hdr->LastHeader() == IPPROTO_ESP ) { - Weird("truncated_header", hdr, pkt); - if ( f ) - Remove(f); // ### + if ( esp_packet ) + { + val_list* vl = new val_list(); + vl->append(ip_hdr->BuildRecordVal()); + mgr.QueueEvent(esp_packet, vl); + } + Remove(f); + // Can't do more since upper-layer payloads are going to be encrypted return; } - if ( caplen < min_hdr_len ) + + int proto = ip_hdr->NextProto(); + + if ( CheckHeaderTrunc(proto, len, caplen, hdr, pkt) ) { - Weird("internally_truncated_header", hdr, pkt); - if ( f ) - Remove(f); // ### + Remove(f); return; } @@ -549,6 +532,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, default: Weird(fmt("unknown_protocol %d", proto), hdr, pkt); + Remove(f); return; } @@ -574,6 +558,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( consistent < 0 ) { delete h; + Remove(f); return; } @@ -592,10 +577,11 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } if ( ! conn ) + { delete h; - - if ( ! conn ) + Remove(f); return; + } int record_packet = 1; // whether to record the packet at all int record_content = 1; // whether to record its data @@ -603,8 +589,17 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, int is_orig = (id.src_addr == conn->OrigAddr()) && (id.src_port == conn->OrigPort()); - if ( new_packet && ip4 ) - conn->Event(new_packet, 0, BuildHeader(ip4)); + Val* pkt_hdr_val = 0; + + if ( ipv6_ext_headers && ip_hdr->NumHeaders() > 1 ) + { + pkt_hdr_val = BuildHeader(ip_hdr); + conn->Event(new_packet, 0, pkt_hdr_val); + } + + if ( new_packet ) + conn->Event(new_packet, 0, + pkt_hdr_val ? pkt_hdr_val->Ref() : BuildHeader(ip_hdr)); conn->NextPacket(t, is_orig, ip_hdr, len, caplen, data, record_packet, record_content, @@ -614,7 +609,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, { // Above we already recorded the fragment in its entirety. f->DeleteTimer(); - Remove(f); // ### + Remove(f); } else if ( record_packet ) @@ -630,10 +625,39 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } } -Val* NetSessions::BuildHeader(const struct ip* ip) +bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, + const struct pcap_pkthdr* h, const u_char* p) + { + uint32 min_hdr_len = 0; + switch ( proto ) { + case IPPROTO_TCP: + min_hdr_len = sizeof(struct tcphdr); + break; + case IPPROTO_UDP: + min_hdr_len = sizeof(struct udphdr); + break; + case IPPROTO_ICMP: + default: + min_hdr_len = ICMP_MINLEN; + break; + } + if ( len < min_hdr_len ) + { + Weird("truncated_header", h, p); + return true; + } + if ( caplen < min_hdr_len ) + { + Weird("internally_truncated_header", h, p); + return true; + } + return false; + } + + +Val* NetSessions::BuildHeader(const IP_Hdr* ip) { static RecordType* pkt_hdr_type = 0; - static RecordType* ip_hdr_type = 0; static RecordType* tcp_hdr_type = 0; static RecordType* udp_hdr_type = 0; static RecordType* icmp_hdr_type; @@ -641,7 +665,6 @@ Val* NetSessions::BuildHeader(const struct ip* ip) if ( ! pkt_hdr_type ) { pkt_hdr_type = internal_type("pkt_hdr")->AsRecordType(); - ip_hdr_type = internal_type("ip_hdr")->AsRecordType(); tcp_hdr_type = internal_type("tcp_hdr")->AsRecordType(); udp_hdr_type = internal_type("udp_hdr")->AsRecordType(); icmp_hdr_type = internal_type("icmp_hdr")->AsRecordType(); @@ -649,26 +672,15 @@ Val* NetSessions::BuildHeader(const struct ip* ip) RecordVal* pkt_hdr = new RecordVal(pkt_hdr_type); - RecordVal* ip_hdr = new RecordVal(ip_hdr_type); - - int ip_hdr_len = ip->ip_hl * 4; - int ip_pkt_len = ntohs(ip->ip_len); - - ip_hdr->Assign(0, new Val(ip->ip_hl * 4, TYPE_COUNT)); - ip_hdr->Assign(1, new Val(ip->ip_tos, TYPE_COUNT)); - ip_hdr->Assign(2, new Val(ip_pkt_len, TYPE_COUNT)); - ip_hdr->Assign(3, new Val(ntohs(ip->ip_id), TYPE_COUNT)); - ip_hdr->Assign(4, new Val(ip->ip_ttl, TYPE_COUNT)); - ip_hdr->Assign(5, new Val(ip->ip_p, TYPE_COUNT)); - ip_hdr->Assign(6, new AddrVal(ip->ip_src.s_addr)); - ip_hdr->Assign(7, new AddrVal(ip->ip_dst.s_addr)); - - pkt_hdr->Assign(0, ip_hdr); + if ( ip->IP4_Hdr() ) + pkt_hdr->Assign(0, ip->BuildRecordVal()); + else + pkt_hdr->Assign(1, ip->BuildRecordVal()); // L4 header. - const u_char* data = ((const u_char*) ip) + ip_hdr_len; + const u_char* data = ip->Payload(); - int proto = ip->ip_p; + int proto = ip->NextProto(); switch ( proto ) { case IPPROTO_TCP: { @@ -676,7 +688,7 @@ Val* NetSessions::BuildHeader(const struct ip* ip) RecordVal* tcp_hdr = new RecordVal(tcp_hdr_type); int tcp_hdr_len = tp->th_off * 4; - int data_len = ip_pkt_len - ip_hdr_len - tcp_hdr_len; + int data_len = ip->PayloadLen() - tcp_hdr_len; tcp_hdr->Assign(0, new PortVal(ntohs(tp->th_sport), TRANSPORT_TCP)); tcp_hdr->Assign(1, new PortVal(ntohs(tp->th_dport), TRANSPORT_TCP)); @@ -687,7 +699,7 @@ Val* NetSessions::BuildHeader(const struct ip* ip) tcp_hdr->Assign(6, new Val(tp->th_flags, TYPE_COUNT)); tcp_hdr->Assign(7, new Val(ntohs(tp->th_win), TYPE_COUNT)); - pkt_hdr->Assign(1, tcp_hdr); + pkt_hdr->Assign(2, tcp_hdr); break; } @@ -700,7 +712,7 @@ Val* NetSessions::BuildHeader(const struct ip* ip) udp_hdr->Assign(1, new PortVal(ntohs(up->uh_dport), TRANSPORT_UDP)); udp_hdr->Assign(2, new Val(ntohs(up->uh_ulen), TYPE_COUNT)); - pkt_hdr->Assign(2, udp_hdr); + pkt_hdr->Assign(3, udp_hdr); break; } @@ -711,7 +723,7 @@ Val* NetSessions::BuildHeader(const struct ip* ip) icmp_hdr->Assign(0, new Val(icmpp->icmp_type, TYPE_COUNT)); - pkt_hdr->Assign(3, icmp_hdr); + pkt_hdr->Assign(4, icmp_hdr); break; } @@ -725,9 +737,9 @@ Val* NetSessions::BuildHeader(const struct ip* ip) } FragReassembler* NetSessions::NextFragment(double t, const IP_Hdr* ip, - const u_char* pkt, uint32 frag_field) + const u_char* pkt) { - uint32 frag_id = ntohs(ip->ID4()); // we actually could skip conv. + uint32 frag_id = ip->ID(); ListVal* key = new ListVal(TYPE_ANY); key->Append(new AddrVal(ip->SrcAddr())); @@ -741,7 +753,7 @@ FragReassembler* NetSessions::NextFragment(double t, const IP_Hdr* ip, FragReassembler* f = fragments.Lookup(h); if ( ! f ) { - f = new FragReassembler(this, ip, pkt, frag_field, h, t); + f = new FragReassembler(this, ip, pkt, h, t); fragments.Insert(h, f); Unref(key); return f; @@ -750,7 +762,7 @@ FragReassembler* NetSessions::NextFragment(double t, const IP_Hdr* ip, delete h; Unref(key); - f->AddFragment(t, ip, pkt, frag_field); + f->AddFragment(t, ip, pkt); return f; } @@ -909,6 +921,7 @@ void NetSessions::Remove(Connection* c) void NetSessions::Remove(FragReassembler* f) { + if ( ! f ) return; HashKey* k = f->Key(); if ( ! k ) reporter->InternalError("fragment block not in dictionary"); diff --git a/src/Sessions.h b/src/Sessions.h index 0a6338899b..ac5fcacfb5 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -79,7 +79,7 @@ public: // Returns a reassembled packet, or nil if there are still // some missing fragments. FragReassembler* NextFragment(double t, const IP_Hdr* ip, - const u_char* pkt, uint32 frag_field); + const u_char* pkt); int Get_OS_From_SYN(struct os_type* retval, uint16 tot, uint8 DF_flag, uint8 TTL, uint16 WSS, @@ -193,7 +193,13 @@ protected: // Builds a record encapsulating a packet. This should be more // general, including the equivalent of a union of tcp/udp/icmp // headers . - Val* BuildHeader(const struct ip* ip); + Val* BuildHeader(const IP_Hdr* ip); + + // For a given protocol, checks whether the header's length as derived + // from lower-level headers or the length actually captured is less + // than that protocol's minimum header size. + bool CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, + const struct pcap_pkthdr* hdr, const u_char* pkt); CompositeHash* ch; PDict(Connection) tcp_conns; diff --git a/src/TCP.cc b/src/TCP.cc index 3315db79f3..57e4449bf8 100644 --- a/src/TCP.cc +++ b/src/TCP.cc @@ -1203,7 +1203,7 @@ RecordVal* TCP_Analyzer::BuildOSVal(int is_orig, const IP_Hdr* ip, if ( ip->HdrLen() > 20 ) quirks |= QUIRK_IPOPT; - if ( ip->IP_ID() == 0 ) + if ( ip->ID() == 0 ) quirks |= QUIRK_ZEROID; if ( tcp->th_seq == 0 ) @@ -1942,11 +1942,11 @@ int TCPStats_Endpoint::DataSent(double /* t */, int seq, int len, int caplen, { if ( ++num_pkts == 1 ) { // First packet. - last_id = ntohs(ip->ID4()); + last_id = ip->ID(); return 0; } - int id = ntohs(ip->ID4()); + int id = ip->ID(); if ( id == last_id ) { diff --git a/src/event.bif b/src/event.bif index 1423750f29..1745139f11 100644 --- a/src/event.bif +++ b/src/event.bif @@ -454,11 +454,30 @@ event expected_connection_seen%(c: connection, a: count%); ## ## c: The connection the packet is part of. ## -## p: Informattion from the header of the packet that triggered the event. +## p: Information from the header of the packet that triggered the event. ## ## .. bro:see:: tcp_packet packet_contents event new_packet%(c: connection, p: pkt_hdr%); +## Generated for every IPv6 packet that contains extension headers. +## This is potentially an expensive event to handle if analysiing IPv6 traffic +## that happens to utilize extension headers frequently. +## +## c: The connection the packet is part of. +## +## p: Information from the header of the packet that triggered the event. +## +## .. bro:see:: new_packet tcp_packet packet_contents esp_packet +event ipv6_ext_headers%(c: connection, p: pkt_hdr%); + +## Generated for any packets using the IPv6 Encapsulating Security Payload (ESP) +## extension header. +## +## p: Information from the header of the packet that triggered the event. +## +## .. bro:see:: new_packet tcp_packet ipv6_ext_headers +event esp_packet%(p: pkt_hdr%); + ## Generated for every packet that has non-empty transport-layer payload. This is a ## very low-level and expensive event that should be avoided when at all possible. ## It's usually infeasible to handle when processing even medium volumes of From 9d590456b00628ba258cb0a3eedbe167dbeaf39c Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 6 Mar 2012 16:08:28 -0600 Subject: [PATCH 121/651] Add IPv6 fragment reassembly. --- src/Frag.cc | 57 +++++++++++++++---- src/Frag.h | 3 +- src/IP.cc | 27 ++++----- src/IP.h | 154 +++++++++++++++++++++++++++++++++++++++------------- 4 files changed, 180 insertions(+), 61 deletions(-) diff --git a/src/Frag.cc b/src/Frag.cc index b5c5e371d4..cbdae92883 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -33,13 +33,23 @@ FragReassembler::FragReassembler(NetSessions* arg_s, s = arg_s; key = k; const struct ip* ip4 = ip->IP4_Hdr(); - proto_hdr_len = ip4->ip_hl * 4; - proto_hdr = (struct ip*) new u_char[64]; // max IP header + slop - // Don't do a structure copy - need to pick up options, too. - memcpy((void*) proto_hdr, (const void*) ip4, proto_hdr_len); + if ( ip4 ) + { + proto_hdr_len = ip4->ip_hl * 4; + proto_hdr = new u_char[64]; // max IP header + slop + // Don't do a structure copy - need to pick up options, too. + memcpy((void*) proto_hdr, (const void*) ip4, proto_hdr_len); + } + else + { + proto_hdr_len = ip->HdrLen() - 8; // minus length of fragment header + proto_hdr = new u_char[proto_hdr_len]; + memcpy(proto_hdr, ip->IP6_Hdr(), proto_hdr_len); + } reassembled_pkt = 0; frag_size = 0; // flag meaning "not known" + next_proto = ip->NextProto(); AddFragment(t, ip, pkt); @@ -64,22 +74,37 @@ void FragReassembler::AddFragment(double t, const IP_Hdr* ip, const u_char* pkt) { const struct ip* ip4 = ip->IP4_Hdr(); - if ( ip4->ip_p != proto_hdr->ip_p || ip4->ip_hl != proto_hdr->ip_hl ) + if ( ip4 ) + { + if ( ip4->ip_p != ((const struct ip*)proto_hdr)->ip_p || + ip4->ip_hl != ((const struct ip*)proto_hdr)->ip_hl ) // || ip4->ip_tos != proto_hdr->ip_tos // don't check TOS, there's at least one stack that actually // uses different values, and it's hard to see an associated // attack. s->Weird("fragment_protocol_inconsistency", ip); + } + else + { + if ( ip->NextProto() != next_proto || + ip->HdrLen() - 8 != proto_hdr_len ) + s->Weird("fragment_protocol_inconsistency", ip); + //TODO: more detailed unfrag header consistency checks? + } if ( ip->DF() ) // Linux MTU discovery for UDP can do this, for example. s->Weird("fragment_with_DF", ip); int offset = ip->FragOffset(); - int len = ntohs(ip4->ip_len); - int hdr_len = proto_hdr->ip_hl * 4; + int len = ip->TotalLen(); + int hdr_len = ip->HdrLen(); int upper_seq = offset + len - hdr_len; + if ( ! offset ) + // Make sure to use the first fragment header's next field. + next_proto = ip->NextProto(); + if ( ! ip->MF() ) { // Last fragment. @@ -192,8 +217,7 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) u_char* pkt = new u_char[n]; memcpy((void*) pkt, (const void*) proto_hdr, proto_hdr_len); - struct ip* reassem4 = (struct ip*) pkt; - reassem4->ip_len = htons(frag_size + proto_hdr_len); + u_char* pkt_start = pkt; pkt += proto_hdr_len; @@ -213,7 +237,20 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) } delete reassembled_pkt; - reassembled_pkt = new IP_Hdr(reassem4, true); + + if ( ((const struct ip*)pkt_start)->ip_v == 4 ) + { + struct ip* reassem4 = (struct ip*) pkt_start; + reassem4->ip_len = htons(frag_size + proto_hdr_len); + reassembled_pkt = new IP_Hdr(reassem4, true); + } + else + { + struct ip6_hdr* reassem6 = (struct ip6_hdr*) pkt_start; + reassem6->ip6_plen = htons(frag_size + proto_hdr_len - 40); + const IPv6_Hdr_Chain* chain = new IPv6_Hdr_Chain(reassem6, next_proto); + reassembled_pkt = new IP_Hdr(reassem6, true, chain); + } DeleteTimer(); } diff --git a/src/Frag.h b/src/Frag.h index 4c9886faa2..86cf3a9dd4 100644 --- a/src/Frag.h +++ b/src/Frag.h @@ -36,11 +36,12 @@ protected: void BlockInserted(DataBlock* start_block); void Overlap(const u_char* b1, const u_char* b2, int n); - struct ip* proto_hdr; + u_char* proto_hdr; IP_Hdr* reassembled_pkt; int proto_hdr_len; NetSessions* s; int frag_size; // size of fully reassembled fragment + uint16 next_proto; // first IPv6 fragment header's next proto field HashKey* key; FragTimer* expire_timer; diff --git a/src/IP.cc b/src/IP.cc index 826ae544f6..ce8514519a 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -26,7 +26,7 @@ RecordVal* IPv6_Hdr::BuildRecordVal() const { RecordVal* rv = new RecordVal(hdrType(ip6_hdr_type, "ip6_hdr")); const struct ip6_hdr* ip6 = (const struct ip6_hdr*)data; - rv->Assign(0, new Val(ntohl(ip6->ip6_flow) & 0x0ff00000, TYPE_COUNT)); + rv->Assign(0, new Val((ntohl(ip6->ip6_flow) & 0x0ff00000)>>20, TYPE_COUNT)); rv->Assign(1, new Val(ntohl(ip6->ip6_flow) & 0x000fffff, TYPE_COUNT)); rv->Assign(2, new Val(ntohs(ip6->ip6_plen), TYPE_COUNT)); rv->Assign(3, new Val(ip6->ip6_nxt, TYPE_COUNT)); @@ -96,8 +96,8 @@ RecordVal* IPv6_Fragment::BuildRecordVal() const const struct ip6_frag* frag = (const struct ip6_frag*)data; rv->Assign(0, new Val(frag->ip6f_nxt, TYPE_COUNT)); rv->Assign(1, new Val(frag->ip6f_reserved, TYPE_COUNT)); - rv->Assign(2, new Val(ntohs(frag->ip6f_offlg) & 0xfff8, TYPE_COUNT)); - rv->Assign(3, new Val(ntohs(frag->ip6f_offlg) & 0x0006, TYPE_COUNT)); + rv->Assign(2, new Val((ntohs(frag->ip6f_offlg) & 0xfff8)>>3, TYPE_COUNT)); + rv->Assign(3, new Val((ntohs(frag->ip6f_offlg) & 0x0006)>>1, TYPE_COUNT)); rv->Assign(4, new Val(ntohs(frag->ip6f_offlg) & 0x0001, TYPE_BOOL)); rv->Assign(5, new Val(ntohl(frag->ip6f_ident), TYPE_COUNT)); return rv; @@ -210,23 +210,24 @@ RecordVal* IP_Hdr::BuildRecordVal() const return rval; } -static inline IPv6_Hdr* getIPv6Header(uint8 type, const u_char* d) +static inline IPv6_Hdr* getIPv6Header(uint8 type, const u_char* d, + bool set_next = false, uint16 nxt = 0) { switch (type) { case IPPROTO_IPV6: - return new IPv6_Hdr(d); + return set_next ? new IPv6_Hdr(d, nxt) : new IPv6_Hdr(d); case IPPROTO_HOPOPTS: - return new IPv6_HopOpts(d); + return set_next ? new IPv6_HopOpts(d, nxt) : new IPv6_HopOpts(d); case IPPROTO_ROUTING: - return new IPv6_Routing(d); + return set_next ? new IPv6_Routing(d, nxt) : new IPv6_Routing(d); case IPPROTO_DSTOPTS: - return new IPv6_DstOpts(d); + return set_next ? new IPv6_DstOpts(d, nxt) : new IPv6_DstOpts(d); case IPPROTO_FRAGMENT: - return new IPv6_Fragment(d); + return set_next ? new IPv6_Fragment(d, nxt) : new IPv6_Fragment(d); case IPPROTO_AH: - return new IPv6_AH(d); + return set_next ? new IPv6_AH(d, nxt) : new IPv6_AH(d); case IPPROTO_ESP: - return new IPv6_ESP(d); + return new IPv6_ESP(d); // never able to set ESP header's next default: // should never get here if calls are protected by isIPv6ExtHeader() reporter->InternalError("Unknown IPv6 header type: %d", type); @@ -252,7 +253,7 @@ static inline bool isIPv6ExtHeader(uint8 type) } } -IPv6_Hdr_Chain::IPv6_Hdr_Chain(const struct ip6_hdr* ip6) +void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) { length = 0; uint8 current_type, next_type; @@ -262,7 +263,7 @@ IPv6_Hdr_Chain::IPv6_Hdr_Chain(const struct ip6_hdr* ip6) do { current_type = next_type; - chain.push_back(getIPv6Header(current_type, hdrs)); + chain.push_back(getIPv6Header(current_type, hdrs, set_next, next)); next_type = chain[chain.size()-1]->NextHdr(); uint16 len = chain[chain.size()-1]->Length(); hdrs += len; diff --git a/src/IP.h b/src/IP.h index 09640f47b9..be3d568375 100644 --- a/src/IP.h +++ b/src/IP.h @@ -26,6 +26,16 @@ public: */ IPv6_Hdr(const u_char* d) : type(IPPROTO_IPV6), data(d) {} + /** + * Construct the main IPv6 header, but replace the next protocol field + * if it points to a fragment. + */ + IPv6_Hdr(const u_char* d, uint16 nxt) : type(IPPROTO_IPV6), data(d) + { + if ( ((ip6_hdr*)data)->ip6_nxt == IPPROTO_FRAGMENT ) + ((ip6_hdr*)data)->ip6_nxt = nxt; + } + /** * Construct an IPv6 header or extension header from assigned type number. */ @@ -49,6 +59,11 @@ public: */ uint8 Type() const { return type; } + /** + * Returns pointer to the start of where header structure resides in memory. + */ + const u_char* Data() const { return data; } + /** * Returns the script-layer record representation of the header. */ @@ -59,50 +74,63 @@ protected: const u_char* data; }; -class IPv6_HopOpts : public IPv6_Hdr { +class IPv6_Ext : public IPv6_Hdr { public: - IPv6_HopOpts(const u_char* d) : IPv6_Hdr(IPPROTO_HOPOPTS, d) {} + IPv6_Ext(uint16 type, const u_char* d) : IPv6_Hdr(type, d) {} + IPv6_Ext(uint16 type, const u_char* d, uint16 nxt) : IPv6_Hdr(type, d) + { + if ( ((ip6_ext*)data)->ip6e_nxt == IPPROTO_FRAGMENT ) + ((ip6_ext*)data)->ip6e_nxt = nxt; + } uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + virtual uint16 Length() const = 0; + virtual RecordVal* BuildRecordVal() const = 0; +}; + +class IPv6_HopOpts : public IPv6_Ext { +public: + IPv6_HopOpts(const u_char* d) : IPv6_Ext(IPPROTO_HOPOPTS, d) {} + IPv6_HopOpts(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_HOPOPTS, d, n) {} uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } RecordVal* BuildRecordVal() const; }; -class IPv6_DstOpts : public IPv6_Hdr { +class IPv6_DstOpts : public IPv6_Ext { public: - IPv6_DstOpts(const u_char* d) : IPv6_Hdr(IPPROTO_DSTOPTS, d) {} - uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + IPv6_DstOpts(const u_char* d) : IPv6_Ext(IPPROTO_DSTOPTS, d) {} + IPv6_DstOpts(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_DSTOPTS, d, n) {} uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } RecordVal* BuildRecordVal() const; }; -class IPv6_Routing : public IPv6_Hdr { +class IPv6_Routing : public IPv6_Ext { public: - IPv6_Routing(const u_char* d) : IPv6_Hdr(IPPROTO_ROUTING, d) {} - uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + IPv6_Routing(const u_char* d) : IPv6_Ext(IPPROTO_ROUTING, d) {} + IPv6_Routing(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_ROUTING, d, n) {} uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } RecordVal* BuildRecordVal() const; }; -class IPv6_Fragment : public IPv6_Hdr { +class IPv6_Fragment : public IPv6_Ext { public: - IPv6_Fragment(const u_char* d) : IPv6_Hdr(IPPROTO_FRAGMENT, d) {} - uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + IPv6_Fragment(const u_char* d) : IPv6_Ext(IPPROTO_FRAGMENT, d) {} + IPv6_Fragment(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_FRAGMENT, d, n) + {} uint16 Length() const { return 8; } RecordVal* BuildRecordVal() const; }; -class IPv6_AH : public IPv6_Hdr { +class IPv6_AH : public IPv6_Ext { public: - IPv6_AH(const u_char* d) : IPv6_Hdr(IPPROTO_AH, d) {} - uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + IPv6_AH(const u_char* d) : IPv6_Ext(IPPROTO_AH, d) {} + IPv6_AH(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_AH, d, n) {} uint16 Length() const { return 8 + 4 * ((ip6_ext*)data)->ip6e_len; } RecordVal* BuildRecordVal() const; }; -class IPv6_ESP : public IPv6_Hdr { +class IPv6_ESP : public IPv6_Ext { public: - IPv6_ESP(const u_char* d) : IPv6_Hdr(IPPROTO_ESP, d) {} - uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } + IPv6_ESP(const u_char* d) : IPv6_Ext(IPPROTO_ESP, d) {} // encrypted payload begins after 8 bytes uint16 Length() const { return 8; } RecordVal* BuildRecordVal() const; @@ -113,7 +141,14 @@ public: /** * Initializes the header chain from an IPv6 header structure. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6); + IPv6_Hdr_Chain(const struct ip6_hdr* ip6) { Init(ip6, false); } + + /** + * Initializes the header chain from an IPv6 header structure, and replaces + * the first next protocol pointer field that points to a fragment header. + */ + IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) + { Init(ip6, true, next); } ~IPv6_Hdr_Chain() { for ( size_t i = 0; i < chain.size(); ++i ) delete chain[i]; } @@ -133,22 +168,73 @@ public: */ const IPv6_Hdr* operator[](const size_t i) const { return chain[i]; } + /** + * Returns whether the header chain indicates a fragmented packet. + */ + bool IsFragment() const + { return chain[chain.size()-1]->Type() == IPPROTO_FRAGMENT; } + + /** + * Returns pointer to fragment header structure if the chain contains one. + */ + const struct ip6_frag* GetFragHdr() const + { return IsFragment() ? + (const struct ip6_frag*)chain[chain.size()-1]->Data(): 0; } + + /** + * If the header chain is a fragment, returns the offset in number of bytes + * relative to the start of the Fragmentable Part of the original packet. + */ + uint16 FragOffset() const + { return IsFragment() ? + (ntohs(GetFragHdr()->ip6f_offlg) & 0xfff8) : 0; } + + /** + * If the header chain is a fragment, returns the identification field. + */ + uint32 ID() const + { return IsFragment() ? ntohl(GetFragHdr()->ip6f_ident) : 0; } + + /** + * If the header chain is a fragment, returns the M (more fragments) flag. + */ + int MF() const + { return IsFragment() ? + (ntohs(GetFragHdr()->ip6f_offlg) & 0x0001) != 0 : 0; } + protected: + void Init(const struct ip6_hdr* ip6, bool set_next, uint16 next = 0); + vector chain; uint16 length; // The summation of all header lengths in the chain in bytes. }; class IP_Hdr { public: + IP_Hdr(const u_char* p, bool arg_del) + : ip4(0), ip6(0), del(arg_del), ip6_hdrs(0) + { + if ( ((const struct ip*)p)->ip_v == 4 ) + ip4 = (const struct ip*)p; + else if ( ((const struct ip*)p)->ip_v == 6 ) + { + ip6 = (const struct ip6_hdr*)p; + ip6_hdrs = new IPv6_Hdr_Chain(ip6); + } + else if ( arg_del ) + delete [] p; + } + IP_Hdr(const struct ip* arg_ip4, bool arg_del) - : ip4(arg_ip4), ip6(0), del(arg_del) + : ip4(arg_ip4), ip6(0), del(arg_del), ip6_hdrs(0) { } - IP_Hdr(const struct ip6_hdr* arg_ip6, bool arg_del) - : ip4(0), ip6(arg_ip6), del(arg_del) + IP_Hdr(const struct ip6_hdr* arg_ip6, bool arg_del, + const IPv6_Hdr_Chain* c = 0) + : ip4(0), ip6(arg_ip6), del(arg_del), + ip6_hdrs(c ? c : new IPv6_Hdr_Chain(ip6)) { - ip6_hdrs = new IPv6_Hdr_Chain(ip6); } ~IP_Hdr() @@ -190,7 +276,7 @@ public: return ntohs(ip6->ip6_plen) - ip6_hdrs->TotalLength(); } - uint16 TotalLen() const + uint32 TotalLen() const { return ip4 ? ntohs(ip4->ip_len) : ntohs(ip6->ip6_plen) + 40; } uint16 HdrLen() const @@ -207,25 +293,19 @@ public: unsigned char TTL() const { return ip4 ? ip4->ip_ttl : ip6->ip6_hlim; } - //TODO: check for IPv6 Fragment ext. header bool IsFragment() const - { return ip4 ? (ntohs(ip4->ip_off) & 0x3fff) != 0 : false; } + { return ip4 ? (ntohs(ip4->ip_off) & 0x3fff) != 0 : + ip6_hdrs->IsFragment(); } - //TODO: check for IPv6 Fragment ext. header uint16 FragOffset() const - { return ip4 ? (ntohs(ip4->ip_off) & 0x1fff) * 8 : 0; } + { return ip4 ? (ntohs(ip4->ip_off) & 0x1fff) * 8 : + ip6_hdrs->FragOffset(); } - //TODO: check for IPv6 Fragment ext. header - uint16 FragField() const - { return ip4 ? ntohs(ip4->ip_off) : 0; } + uint32 ID() const + { return ip4 ? ntohs(ip4->ip_id) : ip6_hdrs->ID(); } - //TODO: check for IPv6 Fragment ext. header - uint16 ID() const - { return ip4 ? ntohs(ip4->ip_id) : 0; } - - //TODO: check for IPv6 Fragment ext. header int MF() const - { return ip4 ? (ntohs(ip4->ip_off) & 0x2000) != 0 : 0; } + { return ip4 ? (ntohs(ip4->ip_off) & 0x2000) != 0 : ip6_hdrs->MF(); } // IPv6 has no "Don't Fragment" flag. int DF() const @@ -240,7 +320,7 @@ private: const struct ip* ip4; const struct ip6_hdr* ip6; bool del; - IPv6_Hdr_Chain* ip6_hdrs; + const IPv6_Hdr_Chain* ip6_hdrs; }; #endif From 65307764f476c9f06804f26712b3adde3450ef8e Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 7 Mar 2012 12:40:01 -0600 Subject: [PATCH 122/651] Fix some IPv6 header related bugs. - IPv6 payload length calculation didn't count main 40 byte IPv6 header. - Fix how IPv6 headers that use TLV options are built. - Fix ip6_hdr_chain$ext_order starting index at 1 instead of 0. --- src/IP.cc | 34 +++++++++++++++++++++++++--------- src/IP.h | 2 +- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/IP.cc b/src/IP.cc index ce8514519a..8a102e8542 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -38,18 +38,34 @@ RecordVal* IPv6_Hdr::BuildRecordVal() const static VectorVal* BuildOptionsVal(const u_char* data, uint16 len) { - VectorVal* vv = new VectorVal(new VectorType(ip6_option_type->Ref())); + VectorVal* vv = new VectorVal(new VectorType( + hdrType(ip6_option_type, "ip6_option")->Ref())); + while ( len > 0 ) { const struct ip6_opt* opt = (const struct ip6_opt*) data; - RecordVal* rv = new RecordVal(hdrType(ip6_option_type, "ip6_option")); + RecordVal* rv = new RecordVal(ip6_option_type); rv->Assign(0, new Val(opt->ip6o_type, TYPE_COUNT)); - rv->Assign(1, new Val(opt->ip6o_len, TYPE_COUNT)); - uint16 off = 2 * sizeof(uint8); - rv->Assign(2, new StringVal( - new BroString(data + off, opt->ip6o_len - off, 1))); - data += opt->ip6o_len + off; - len -= opt->ip6o_len + off; + + if ( opt->ip6o_type == 0 ) + { + // Pad1 option + rv->Assign(1, new Val(0, TYPE_COUNT)); + rv->Assign(2, new StringVal("")); + data += sizeof(uint8); + len -= sizeof(uint8); + } + else + { + // PadN or other option + uint16 off = 2 * sizeof(uint8); + rv->Assign(1, new Val(opt->ip6o_len, TYPE_COUNT)); + rv->Assign(2, new StringVal( + new BroString(data + off, opt->ip6o_len, 1))); + data += opt->ip6o_len + off; + len -= opt->ip6o_len + off; + } + vv->Assign(vv->Size(), rv, 0); } return vv; @@ -194,7 +210,7 @@ RecordVal* IP_Hdr::BuildRecordVal() const reporter->InternalError("pkt_hdr assigned bad header %d", type); break; } - order->Assign(i, new Val(type, TYPE_COUNT), 0); + order->Assign(i-1, new Val(type, TYPE_COUNT), 0); } rval->Assign(0, ((*ip6_hdrs)[0])->BuildRecordVal()); diff --git a/src/IP.h b/src/IP.h index be3d568375..8918f9da5b 100644 --- a/src/IP.h +++ b/src/IP.h @@ -273,7 +273,7 @@ public: if ( ip4 ) return ntohs(ip4->ip_len) - ip4->ip_hl * 4; else - return ntohs(ip6->ip6_plen) - ip6_hdrs->TotalLength(); + return ntohs(ip6->ip6_plen) + 40 - ip6_hdrs->TotalLength(); } uint32 TotalLen() const From 76ef36e048c5ccf6298da06b3aee63e5a2864720 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 7 Mar 2012 14:17:56 -0600 Subject: [PATCH 123/651] Add a few comments to IP.h --- src/IP.h | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/src/IP.h b/src/IP.h index 8918f9da5b..62391ca8fc 100644 --- a/src/IP.h +++ b/src/IP.h @@ -260,6 +260,10 @@ public: IPAddr DstAddr() const { return ip4 ? IPAddr(ip4->ip_dst) : IPAddr(ip6->ip6_dst); } + /** + * Returns a pointer to the payload of the IP packet, usually an + * upper-layer protocol. + */ const u_char* Payload() const { if ( ip4 ) @@ -268,6 +272,10 @@ public: return ((const u_char*) ip6) + ip6_hdrs->TotalLength(); } + /** + * Returns the length of the IP packet's payload (length of packet minus + * header length or, for IPv6, also minus length of all extension headers). + */ uint16 PayloadLen() const { if ( ip4 ) @@ -276,16 +284,30 @@ public: return ntohs(ip6->ip6_plen) + 40 - ip6_hdrs->TotalLength(); } + /** + * Returns the length of the IP packet (length of headers and payload). + */ uint32 TotalLen() const { return ip4 ? ntohs(ip4->ip_len) : ntohs(ip6->ip6_plen) + 40; } + /** + * Returns length of IP packet header (includes extension headers for IPv6). + */ uint16 HdrLen() const { return ip4 ? ip4->ip_hl * 4 : ip6_hdrs->TotalLength(); } + /** + * For IPv6 header chains, returns the type of the last header in the chain. + */ uint8 LastHeader() const { return ip4 ? IPPROTO_RAW : ((*ip6_hdrs)[ip6_hdrs->Size()-1])->Type(); } + /** + * Returns the protocol type of the IP packet's payload, usually an + * upper-layer protocol. For IPv6, this returns the last (extension) + * header's Next Header value. + */ unsigned char NextProto() const { return ip4 ? ip4->ip_p : ((*ip6_hdrs)[ip6_hdrs->Size()-1])->NextHdr(); } @@ -297,23 +319,42 @@ public: { return ip4 ? (ntohs(ip4->ip_off) & 0x3fff) != 0 : ip6_hdrs->IsFragment(); } + /** + * Returns the fragment packet's offset in relation to the original + * packet in bytes. + */ uint16 FragOffset() const { return ip4 ? (ntohs(ip4->ip_off) & 0x1fff) * 8 : ip6_hdrs->FragOffset(); } + /** + * Returns the fragment packet's identification field. + */ uint32 ID() const { return ip4 ? ntohs(ip4->ip_id) : ip6_hdrs->ID(); } + /** + * Returns whether a fragment packet's "More Fragments" field is set. + */ int MF() const { return ip4 ? (ntohs(ip4->ip_off) & 0x2000) != 0 : ip6_hdrs->MF(); } - // IPv6 has no "Don't Fragment" flag. + /** + * Returns whether a fragment packet's "Don't Fragment" field is set. + * Note that IPv6 has no such field. + */ int DF() const { return ip4 ? ((ntohs(ip4->ip_off) & 0x4000) != 0) : 0; } + /** + * Returns number of IP headers in packet (includes IPv6 extension headers). + */ size_t NumHeaders() const { return ip4 ? 1 : ip6_hdrs->Size(); } + /** + * Returns an ip_hdr or ip6_hdr_chain RecordVal. + */ RecordVal* BuildRecordVal() const; private: From 0b32c980bf6117d3149d4ce8d41aa46df11c27e4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 8 Mar 2012 13:12:04 -0600 Subject: [PATCH 124/651] Update PacketFilter/Discarder code for IP version independence. The signatures of script-layer functions 'discarder_check_ip', 'discarder_check_tcp', 'discarder_check_udp', and 'discarder_check_icmp' were changed to use the more general 'pkt_hdr' type as a parameter instead of individual header types. --- scripts/base/init-bare.bro | 21 ++-- src/Discard.cc | 83 ++-------------- src/Discard.h | 9 -- src/IP.cc | 84 +++++++++++++++- src/IP.h | 9 +- src/PacketFilter.cc | 7 +- src/Sessions.cc | 90 +---------------- src/Sessions.h | 5 - .../bifs.install_src_addr_filter/output | 8 ++ testing/btest/Baseline/core.discarder/output | 24 +++++ testing/btest/Traces/icmp-unreach.trace | Bin 0 -> 234 bytes .../btest/bifs/install_src_addr_filter.test | 13 +++ testing/btest/core/discarder.bro | 92 ++++++++++++++++++ 13 files changed, 251 insertions(+), 194 deletions(-) create mode 100644 testing/btest/Baseline/bifs.install_src_addr_filter/output create mode 100644 testing/btest/Baseline/core.discarder/output create mode 100644 testing/btest/Traces/icmp-unreach.trace create mode 100644 testing/btest/bifs/install_src_addr_filter.test create mode 100644 testing/btest/core/discarder.bro diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index c007bd8262..a031080f0e 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1167,7 +1167,7 @@ global discarder_maxlen = 128 &redef; ## analysis. If the function signals to discard a packet, no further processing ## will be performed on it. ## -## i: The IP header of the considered packet. +## p: The IP header of the considered packet. ## ## Returns: True if the packet should not be analyzed any further. ## @@ -1176,15 +1176,15 @@ global discarder_maxlen = 128 &redef; ## ## .. note:: This is very low-level functionality and potentially expensive. ## Avoid using it. -global discarder_check_ip: function(i: ip_hdr): bool; +global discarder_check_ip: function(p: pkt_hdr): bool; ## Function for skipping packets based on their TCP header. If defined, this ## function will be called for all TCP packets before Bro performs any further ## analysis. If the function signals to discard a packet, no further processing ## will be performed on it. ## -## i: The IP header of the considered packet. -## t: The TCP header. +## p: The IP and TCP headers of the considered packet. +## ## d: Up to :bro:see:`discarder_maxlen` bytes of the TCP payload. ## ## Returns: True if the packet should not be analyzed any further. @@ -1194,15 +1194,15 @@ global discarder_check_ip: function(i: ip_hdr): bool; ## ## .. note:: This is very low-level functionality and potentially expensive. ## Avoid using it. -global discarder_check_tcp: function(i: ip_hdr, t: tcp_hdr, d: string): bool; +global discarder_check_tcp: function(p: pkt_hdr, d: string): bool; ## Function for skipping packets based on their UDP header. If defined, this ## function will be called for all UDP packets before Bro performs any further ## analysis. If the function signals to discard a packet, no further processing ## will be performed on it. ## -## i: The IP header of the considered packet. -## t: The UDP header. +## p: The IP and UDP headers of the considered packet. +## ## d: Up to :bro:see:`discarder_maxlen` bytes of the UDP payload. ## ## Returns: True if the packet should not be analyzed any further. @@ -1212,15 +1212,14 @@ global discarder_check_tcp: function(i: ip_hdr, t: tcp_hdr, d: string): bool; ## ## .. note:: This is very low-level functionality and potentially expensive. ## Avoid using it. -global discarder_check_udp: function(i: ip_hdr, u: udp_hdr, d: string): bool; +global discarder_check_udp: function(p: pkt_hdr, d: string): bool; ## Function for skipping packets based on their ICMP header. If defined, this ## function will be called for all ICMP packets before Bro performs any further ## analysis. If the function signals to discard a packet, no further processing ## will be performed on it. ## -## i: The IP header of the considered packet. -## ih: The ICMP header. +## p: The IP and ICMP headers of the considered packet. ## ## Returns: True if the packet should not be analyzed any further. ## @@ -1229,7 +1228,7 @@ global discarder_check_udp: function(i: ip_hdr, u: udp_hdr, d: string): bool; ## ## .. note:: This is very low-level functionality and potentially expensive. ## Avoid using it. -global discarder_check_icmp: function(i: ip_hdr, ih: icmp_hdr): bool; +global discarder_check_icmp: function(p: pkt_hdr): bool; ## Bro's watchdog interval. const watchdog_interval = 10 sec &redef; diff --git a/src/Discard.cc b/src/Discard.cc index a71b810601..edfeea1408 100644 --- a/src/Discard.cc +++ b/src/Discard.cc @@ -10,11 +10,6 @@ Discarder::Discarder() { - ip_hdr = internal_type("ip_hdr")->AsRecordType(); - tcp_hdr = internal_type("tcp_hdr")->AsRecordType(); - udp_hdr = internal_type("udp_hdr")->AsRecordType(); - icmp_hdr = internal_type("icmp_hdr")->AsRecordType(); - check_ip = internal_func("discarder_check_ip"); check_tcp = internal_func("discarder_check_tcp"); check_udp = internal_func("discarder_check_udp"); @@ -36,12 +31,10 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) { int discard_packet = 0; - const struct ip* ip4 = ip->IP4_Hdr(); - if ( check_ip ) { val_list* args = new val_list; - args->append(BuildHeader(ip4)); + args->append(ip->BuildPktHdrVal()); try { @@ -59,19 +52,18 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) return discard_packet; } - int proto = ip4->ip_p; + int proto = ip->NextProto(); if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP && proto != IPPROTO_ICMP ) // This is not a protocol we understand. return 0; // XXX shall we only check the first packet??? - uint32 frag_field = ntohs(ip4->ip_off); - if ( (frag_field & 0x3fff) != 0 ) + if ( ip->IsFragment() ) // Never check any fragment. return 0; - int ip_hdr_len = ip4->ip_hl * 4; + int ip_hdr_len = ip->HdrLen(); len -= ip_hdr_len; // remove IP header caplen -= ip_hdr_len; @@ -87,7 +79,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) // Where the data starts - if this is a protocol we know about, // this gets advanced past the transport header. - const u_char* data = ((u_char*) ip4 + ip_hdr_len); + const u_char* data = ip->Payload(); if ( is_tcp ) { @@ -97,8 +89,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) int th_len = tp->th_off * 4; val_list* args = new val_list; - args->append(BuildHeader(ip4)); - args->append(BuildHeader(tp, len)); + args->append(ip->BuildPktHdrVal()); args->append(BuildData(data, th_len, len, caplen)); try @@ -123,8 +114,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) int uh_len = sizeof (struct udphdr); val_list* args = new val_list; - args->append(BuildHeader(ip4)); - args->append(BuildHeader(up)); + args->append(ip->BuildPktHdrVal()); args->append(BuildData(data, uh_len, len, caplen)); try @@ -148,8 +138,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) const struct icmp* ih = (const struct icmp*) data; val_list* args = new val_list; - args->append(BuildHeader(ip4)); - args->append(BuildHeader(ih)); + args->append(ip->BuildPktHdrVal()); try { @@ -168,62 +157,6 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) return discard_packet; } -Val* Discarder::BuildHeader(const struct ip* ip) - { - RecordVal* hdr = new RecordVal(ip_hdr); - - hdr->Assign(0, new Val(ip->ip_hl * 4, TYPE_COUNT)); - hdr->Assign(1, new Val(ip->ip_tos, TYPE_COUNT)); - hdr->Assign(2, new Val(ntohs(ip->ip_len), TYPE_COUNT)); - hdr->Assign(3, new Val(ntohs(ip->ip_id), TYPE_COUNT)); - hdr->Assign(4, new Val(ip->ip_ttl, TYPE_COUNT)); - hdr->Assign(5, new Val(ip->ip_p, TYPE_COUNT)); - hdr->Assign(6, new AddrVal(ip->ip_src.s_addr)); - hdr->Assign(7, new AddrVal(ip->ip_dst.s_addr)); - - return hdr; - } - -Val* Discarder::BuildHeader(const struct tcphdr* tp, int tcp_len) - { - RecordVal* hdr = new RecordVal(tcp_hdr); - - hdr->Assign(0, new PortVal(ntohs(tp->th_sport), TRANSPORT_TCP)); - hdr->Assign(1, new PortVal(ntohs(tp->th_dport), TRANSPORT_TCP)); - hdr->Assign(2, new Val(uint32(ntohl(tp->th_seq)), TYPE_COUNT)); - hdr->Assign(3, new Val(uint32(ntohl(tp->th_ack)), TYPE_COUNT)); - - int tcp_hdr_len = tp->th_off * 4; - - hdr->Assign(4, new Val(tcp_hdr_len, TYPE_COUNT)); - hdr->Assign(5, new Val(tcp_len - tcp_hdr_len, TYPE_COUNT)); - - hdr->Assign(6, new Val(tp->th_flags, TYPE_COUNT)); - hdr->Assign(7, new Val(ntohs(tp->th_win), TYPE_COUNT)); - - return hdr; - } - -Val* Discarder::BuildHeader(const struct udphdr* up) - { - RecordVal* hdr = new RecordVal(udp_hdr); - - hdr->Assign(0, new PortVal(ntohs(up->uh_sport), TRANSPORT_UDP)); - hdr->Assign(1, new PortVal(ntohs(up->uh_dport), TRANSPORT_UDP)); - hdr->Assign(2, new Val(ntohs(up->uh_ulen), TYPE_COUNT)); - - return hdr; - } - -Val* Discarder::BuildHeader(const struct icmp* icmp) - { - RecordVal* hdr = new RecordVal(icmp_hdr); - - hdr->Assign(0, new Val(icmp->icmp_type, TYPE_COUNT)); - - return hdr; - } - Val* Discarder::BuildData(const u_char* data, int hdrlen, int len, int caplen) { len -= hdrlen; diff --git a/src/Discard.h b/src/Discard.h index 16f7a58e6e..f4daabefa7 100644 --- a/src/Discard.h +++ b/src/Discard.h @@ -25,17 +25,8 @@ public: int NextPacket(const IP_Hdr* ip, int len, int caplen); protected: - Val* BuildHeader(const struct ip* ip); - Val* BuildHeader(const struct tcphdr* tp, int tcp_len); - Val* BuildHeader(const struct udphdr* up); - Val* BuildHeader(const struct icmp* icmp); Val* BuildData(const u_char* data, int hdrlen, int len, int caplen); - RecordType* ip_hdr; - RecordType* tcp_hdr; - RecordType* udp_hdr; - RecordType* icmp_hdr; - Func* check_ip; Func* check_tcp; Func* check_udp; diff --git a/src/IP.cc b/src/IP.cc index 8a102e8542..77797ece8f 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -141,7 +141,7 @@ RecordVal* IPv6_ESP::BuildRecordVal() const return rv; } -RecordVal* IP_Hdr::BuildRecordVal() const +RecordVal* IP_Hdr::BuildIPHdrVal() const { RecordVal* rval = 0; @@ -226,6 +226,88 @@ RecordVal* IP_Hdr::BuildRecordVal() const return rval; } +RecordVal* IP_Hdr::BuildPktHdrVal() const + { + static RecordType* pkt_hdr_type = 0; + static RecordType* tcp_hdr_type = 0; + static RecordType* udp_hdr_type = 0; + static RecordType* icmp_hdr_type = 0; + + if ( ! pkt_hdr_type ) + { + pkt_hdr_type = internal_type("pkt_hdr")->AsRecordType(); + tcp_hdr_type = internal_type("tcp_hdr")->AsRecordType(); + udp_hdr_type = internal_type("udp_hdr")->AsRecordType(); + icmp_hdr_type = internal_type("icmp_hdr")->AsRecordType(); + } + + RecordVal* pkt_hdr = new RecordVal(pkt_hdr_type); + + if ( ip4 ) + pkt_hdr->Assign(0, BuildIPHdrVal()); + else + pkt_hdr->Assign(1, BuildIPHdrVal()); + + // L4 header. + const u_char* data = Payload(); + + int proto = NextProto(); + switch ( proto ) { + case IPPROTO_TCP: + { + const struct tcphdr* tp = (const struct tcphdr*) data; + RecordVal* tcp_hdr = new RecordVal(tcp_hdr_type); + + int tcp_hdr_len = tp->th_off * 4; + int data_len = PayloadLen() - tcp_hdr_len; + + tcp_hdr->Assign(0, new PortVal(ntohs(tp->th_sport), TRANSPORT_TCP)); + tcp_hdr->Assign(1, new PortVal(ntohs(tp->th_dport), TRANSPORT_TCP)); + tcp_hdr->Assign(2, new Val(uint32(ntohl(tp->th_seq)), TYPE_COUNT)); + tcp_hdr->Assign(3, new Val(uint32(ntohl(tp->th_ack)), TYPE_COUNT)); + tcp_hdr->Assign(4, new Val(tcp_hdr_len, TYPE_COUNT)); + tcp_hdr->Assign(5, new Val(data_len, TYPE_COUNT)); + tcp_hdr->Assign(6, new Val(tp->th_flags, TYPE_COUNT)); + tcp_hdr->Assign(7, new Val(ntohs(tp->th_win), TYPE_COUNT)); + + pkt_hdr->Assign(2, tcp_hdr); + break; + } + + case IPPROTO_UDP: + { + const struct udphdr* up = (const struct udphdr*) data; + RecordVal* udp_hdr = new RecordVal(udp_hdr_type); + + udp_hdr->Assign(0, new PortVal(ntohs(up->uh_sport), TRANSPORT_UDP)); + udp_hdr->Assign(1, new PortVal(ntohs(up->uh_dport), TRANSPORT_UDP)); + udp_hdr->Assign(2, new Val(ntohs(up->uh_ulen), TYPE_COUNT)); + + pkt_hdr->Assign(3, udp_hdr); + break; + } + + case IPPROTO_ICMP: + { + const struct icmp* icmpp = (const struct icmp *) data; + RecordVal* icmp_hdr = new RecordVal(icmp_hdr_type); + + icmp_hdr->Assign(0, new Val(icmpp->icmp_type, TYPE_COUNT)); + + pkt_hdr->Assign(4, icmp_hdr); + break; + } + + default: + { + // This is not a protocol we understand. + break; + } + } + + return pkt_hdr; + } + static inline IPv6_Hdr* getIPv6Header(uint8 type, const u_char* d, bool set_next = false, uint16 nxt = 0) { diff --git a/src/IP.h b/src/IP.h index 62391ca8fc..53fe1daf84 100644 --- a/src/IP.h +++ b/src/IP.h @@ -249,7 +249,6 @@ public: } } - //TODO: audit usages of this for correct IPv6 support or IPv4 assumptions const struct ip* IP4_Hdr() const { return ip4; } const struct ip6_hdr* IP6_Hdr() const { return ip6; } @@ -355,7 +354,13 @@ public: /** * Returns an ip_hdr or ip6_hdr_chain RecordVal. */ - RecordVal* BuildRecordVal() const; + RecordVal* BuildIPHdrVal() const; + + /** + * Returns a pkt_hdr RecordVal, which includes not only the IP header, but + * also upper-layer (tcp/udp/icmp) headers. + */ + RecordVal* BuildPktHdrVal() const; private: const struct ip* ip4; diff --git a/src/PacketFilter.cc b/src/PacketFilter.cc index 93a452482f..4fb3b1c8f7 100644 --- a/src/PacketFilter.cc +++ b/src/PacketFilter.cc @@ -71,9 +71,7 @@ bool PacketFilter::MatchFilter(const Filter& f, const IP_Hdr& ip, if ( ip.NextProto() == IPPROTO_TCP && f.tcp_flags ) { // Caution! The packet sanity checks have not been performed yet - const struct ip* ip4 = ip.IP4_Hdr(); - - int ip_hdr_len = ip4->ip_hl * 4; + int ip_hdr_len = ip.HdrLen(); len -= ip_hdr_len; // remove IP header caplen -= ip_hdr_len; @@ -82,8 +80,7 @@ bool PacketFilter::MatchFilter(const Filter& f, const IP_Hdr& ip, // Packet too short, will be dropped anyway. return false; - const struct tcphdr* tp = - (const struct tcphdr*) ((u_char*) ip4 + ip_hdr_len); + const struct tcphdr* tp = (const struct tcphdr*) ip.Payload(); if ( tp->th_flags & f.tcp_flags ) // At least one of the flags is set, so don't drop diff --git a/src/Sessions.cc b/src/Sessions.cc index e8cece9e46..b4115f5c16 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -333,7 +333,7 @@ void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* new StringVal(sp->Event()->Filter()); args->append(cmd_val); IP_Hdr ip_hdr(ip, false); - args->append(BuildHeader(&ip_hdr)); + args->append(ip_hdr.BuildPktHdrVal()); // ### Need to queue event here. try { @@ -470,7 +470,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( esp_packet ) { val_list* vl = new val_list(); - vl->append(ip_hdr->BuildRecordVal()); + vl->append(ip_hdr->BuildPktHdrVal()); mgr.QueueEvent(esp_packet, vl); } Remove(f); @@ -593,13 +593,13 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( ipv6_ext_headers && ip_hdr->NumHeaders() > 1 ) { - pkt_hdr_val = BuildHeader(ip_hdr); + pkt_hdr_val = ip_hdr->BuildPktHdrVal(); conn->Event(new_packet, 0, pkt_hdr_val); } if ( new_packet ) conn->Event(new_packet, 0, - pkt_hdr_val ? pkt_hdr_val->Ref() : BuildHeader(ip_hdr)); + pkt_hdr_val ? pkt_hdr_val->Ref() : ip_hdr->BuildPktHdrVal()); conn->NextPacket(t, is_orig, ip_hdr, len, caplen, data, record_packet, record_content, @@ -654,88 +654,6 @@ bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, return false; } - -Val* NetSessions::BuildHeader(const IP_Hdr* ip) - { - static RecordType* pkt_hdr_type = 0; - static RecordType* tcp_hdr_type = 0; - static RecordType* udp_hdr_type = 0; - static RecordType* icmp_hdr_type; - - if ( ! pkt_hdr_type ) - { - pkt_hdr_type = internal_type("pkt_hdr")->AsRecordType(); - tcp_hdr_type = internal_type("tcp_hdr")->AsRecordType(); - udp_hdr_type = internal_type("udp_hdr")->AsRecordType(); - icmp_hdr_type = internal_type("icmp_hdr")->AsRecordType(); - } - - RecordVal* pkt_hdr = new RecordVal(pkt_hdr_type); - - if ( ip->IP4_Hdr() ) - pkt_hdr->Assign(0, ip->BuildRecordVal()); - else - pkt_hdr->Assign(1, ip->BuildRecordVal()); - - // L4 header. - const u_char* data = ip->Payload(); - - int proto = ip->NextProto(); - switch ( proto ) { - case IPPROTO_TCP: - { - const struct tcphdr* tp = (const struct tcphdr*) data; - RecordVal* tcp_hdr = new RecordVal(tcp_hdr_type); - - int tcp_hdr_len = tp->th_off * 4; - int data_len = ip->PayloadLen() - tcp_hdr_len; - - tcp_hdr->Assign(0, new PortVal(ntohs(tp->th_sport), TRANSPORT_TCP)); - tcp_hdr->Assign(1, new PortVal(ntohs(tp->th_dport), TRANSPORT_TCP)); - tcp_hdr->Assign(2, new Val(uint32(ntohl(tp->th_seq)), TYPE_COUNT)); - tcp_hdr->Assign(3, new Val(uint32(ntohl(tp->th_ack)), TYPE_COUNT)); - tcp_hdr->Assign(4, new Val(tcp_hdr_len, TYPE_COUNT)); - tcp_hdr->Assign(5, new Val(data_len, TYPE_COUNT)); - tcp_hdr->Assign(6, new Val(tp->th_flags, TYPE_COUNT)); - tcp_hdr->Assign(7, new Val(ntohs(tp->th_win), TYPE_COUNT)); - - pkt_hdr->Assign(2, tcp_hdr); - break; - } - - case IPPROTO_UDP: - { - const struct udphdr* up = (const struct udphdr*) data; - RecordVal* udp_hdr = new RecordVal(udp_hdr_type); - - udp_hdr->Assign(0, new PortVal(ntohs(up->uh_sport), TRANSPORT_UDP)); - udp_hdr->Assign(1, new PortVal(ntohs(up->uh_dport), TRANSPORT_UDP)); - udp_hdr->Assign(2, new Val(ntohs(up->uh_ulen), TYPE_COUNT)); - - pkt_hdr->Assign(3, udp_hdr); - break; - } - - case IPPROTO_ICMP: - { - const struct icmp* icmpp = (const struct icmp *) data; - RecordVal* icmp_hdr = new RecordVal(icmp_hdr_type); - - icmp_hdr->Assign(0, new Val(icmpp->icmp_type, TYPE_COUNT)); - - pkt_hdr->Assign(4, icmp_hdr); - break; - } - - default: - { - // This is not a protocol we understand. - } - } - - return pkt_hdr; - } - FragReassembler* NetSessions::NextFragment(double t, const IP_Hdr* ip, const u_char* pkt) { diff --git a/src/Sessions.h b/src/Sessions.h index ac5fcacfb5..06c6057dbf 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -190,11 +190,6 @@ protected: void Internal(const char* msg, const struct pcap_pkthdr* hdr, const u_char* pkt); - // Builds a record encapsulating a packet. This should be more - // general, including the equivalent of a union of tcp/udp/icmp - // headers . - Val* BuildHeader(const IP_Hdr* ip); - // For a given protocol, checks whether the header's length as derived // from lower-level headers or the length actually captured is less // than that protocol's minimum header size. diff --git a/testing/btest/Baseline/bifs.install_src_addr_filter/output b/testing/btest/Baseline/bifs.install_src_addr_filter/output new file mode 100644 index 0000000000..bf99083391 --- /dev/null +++ b/testing/btest/Baseline/bifs.install_src_addr_filter/output @@ -0,0 +1,8 @@ +[orig_h=141.142.220.118, orig_p=48649/tcp, resp_h=208.80.152.118, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49996/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49997/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49998/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49999/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=50000/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=50001/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] diff --git a/testing/btest/Baseline/core.discarder/output b/testing/btest/Baseline/core.discarder/output new file mode 100644 index 0000000000..82b4b3e622 --- /dev/null +++ b/testing/btest/Baseline/core.discarder/output @@ -0,0 +1,24 @@ +################ IP Discarder ################ +[orig_h=141.142.220.118, orig_p=35634/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=35634/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +################ TCP Discarder ################ +[orig_h=141.142.220.118, orig_p=48649/tcp, resp_h=208.80.152.118, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49996/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49997/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49998/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=49999/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=50000/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=50001/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +################ UDP Discarder ################ +[orig_h=fe80::217:f2ff:fed7:cf65, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp] +################ ICMP Discarder ################ +Discard icmp packet: [icmp_type=3] diff --git a/testing/btest/Traces/icmp-unreach.trace b/testing/btest/Traces/icmp-unreach.trace new file mode 100644 index 0000000000000000000000000000000000000000..60137bb6fe9a7c942ca6039b194277bb9e0bdca3 GIT binary patch literal 234 zcmca|c+)~A1{MYw`2U}Qff2|_v1ai$7Gh#J1Z0CSgXH7qvC0hGx0555aWJ?tFl^9} zV_;}t>?=R8f-wz<8JU@xcY_oHLh)|9+r10%N7iu{z) Y;sfGHQXF6@!vjtjVkS_rlOp-L0Av?6tN;K2 literal 0 HcmV?d00001 diff --git a/testing/btest/bifs/install_src_addr_filter.test b/testing/btest/bifs/install_src_addr_filter.test new file mode 100644 index 0000000000..5b387832de --- /dev/null +++ b/testing/btest/bifs/install_src_addr_filter.test @@ -0,0 +1,13 @@ +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +event bro_init() + { + install_src_addr_filter(141.142.220.118, TH_SYN, 100.0); + } + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$tcp && p$ip$src == 141.142.220.118 ) + print c$id; + } diff --git a/testing/btest/core/discarder.bro b/testing/btest/core/discarder.bro new file mode 100644 index 0000000000..9dfa9a2cea --- /dev/null +++ b/testing/btest/core/discarder.bro @@ -0,0 +1,92 @@ +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace discarder-ip.bro >output +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace discarder-tcp.bro >>output +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace discarder-udp.bro >>output +# @TEST-EXEC: bro -C -r $TRACES/icmp-unreach.trace discarder-icmp.bro >>output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE discarder-ip.bro + +event bro_init() + { + print "################ IP Discarder ################"; + } + +function discarder_check_ip(p: pkt_hdr): bool + { + if ( p?$ip && p$ip$src == 141.142.220.118 && p$ip$dst == 208.80.152.2 ) + return F; + return T; + } + + +event new_packet(c: connection, p: pkt_hdr) + { + print c$id; + } + +@TEST-END-FILE + +@TEST-START-FILE discarder-tcp.bro + +event bro_init() + { + print "################ TCP Discarder ################"; + } + +function discarder_check_tcp(p: pkt_hdr, d: string): bool + { + if ( p$tcp$flags == TH_SYN ) + return F; + return T; + } + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$tcp ) + print c$id; + } + +@TEST-END-FILE + +@TEST-START-FILE discarder-udp.bro + +event bro_init() + { + print "################ UDP Discarder ################"; + } + +function discarder_check_udp(p: pkt_hdr, d: string): bool + { + if ( p?$ip6 ) + return F; + return T; + } + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$udp ) + print c$id; + } + +@TEST-END-FILE + +@TEST-START-FILE discarder-icmp.bro + +event bro_init() + { + print "################ ICMP Discarder ################"; + } + +function discarder_check_icmp(p: pkt_hdr): bool + { + print fmt("Discard icmp packet: %s", p$icmp); + return T; + } + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$icmp ) + print c$id; + } + +@TEST-END-FILE From c0678e7e1f649d4c69701eb7d03fd28893e12dd4 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 8 Mar 2012 17:14:58 -0800 Subject: [PATCH 125/651] Fixing problem logging remotely when local logging was turned off. For that, moved the remote logging from the Manager to the WriterFrontend. That also simplifies the Manager a bit. --- src/RemoteSerializer.cc | 13 +++++- src/logging/Manager.cc | 63 ++++++---------------------- src/logging/Manager.h | 3 +- src/logging/WriterFrontend.cc | 79 ++++++++++++++++++++++++++++++----- src/logging/WriterFrontend.h | 18 +++++++- src/threading/BasicThread.cc | 15 ++++--- 6 files changed, 116 insertions(+), 75 deletions(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 4b8f527f2b..c6b9623096 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -234,7 +234,7 @@ static const int PRINT_BUFFER_SIZE = 10 * 1024; static const int SOCKBUF_SIZE = 1024 * 1024; // Buffer size for remote-log data. -static const int LOG_BUFFER_SIZE = 50 * 1024; +static const int LOG_BUFFER_SIZE = 512; struct ping_args { uint32 seq; @@ -2587,7 +2587,10 @@ bool RemoteSerializer::SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, st if ( len > (LOG_BUFFER_SIZE - peer->log_buffer_used) || (network_time - last_flush > 1.0) ) { if ( ! FlushLogBuffer(peer) ) + { + delete [] data; return false; + } } // If the data is actually larger than our complete buffer, just send it out. @@ -2631,6 +2634,12 @@ bool RemoteSerializer::ProcessLogCreateWriter() if ( current_peer->state == Peer::CLOSING ) return false; +#ifdef USE_PERFTOOLS + // Don't track allocations here, they'll be released only after the + // main loop exists. And it's just a tiny amount anyway. + HeapLeakChecker::Disabler disabler; +#endif + assert(current_args); EnumVal* id_val = 0; @@ -2666,7 +2675,7 @@ bool RemoteSerializer::ProcessLogCreateWriter() id_val = new EnumVal(id, BifType::Enum::Log::ID); writer_val = new EnumVal(writer, BifType::Enum::Log::Writer); - if ( ! log_mgr->CreateWriter(id_val, writer_val, path, num_fields, fields) ) + if ( ! log_mgr->CreateWriter(id_val, writer_val, path, num_fields, fields, true, false) ) goto error; Unref(id_val); diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 0753296cb4..14fb3428fe 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -753,64 +753,25 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) for ( int j = 0; j < filter->num_fields; ++j ) arg_fields[j] = new Field(*filter->fields[j]); - if ( filter->remote ) - remote_serializer->SendLogCreateWriter(stream->id, - filter->writer, - path, - filter->num_fields, - arg_fields); + writer = CreateWriter(stream->id, filter->writer, + path, filter->num_fields, + arg_fields, filter->local, filter->remote); - if ( filter->local ) + if ( ! writer ) { - writer = CreateWriter(stream->id, filter->writer, - path, filter->num_fields, - arg_fields); - - if ( ! writer ) - { - Unref(columns); - return false; - } + Unref(columns); + return false; } - else - { - // Insert a null pointer into the map to make - // sure we don't try creating it again. - stream->writers.insert(Stream::WriterMap::value_type( - Stream::WriterPathPair(filter->writer->AsEnum(), path), 0)); - for( int i = 0; i < filter->num_fields; ++i) - delete arg_fields[i]; - - delete [] arg_fields; - } } // Alright, can do the write now. - if ( filter->local || filter->remote ) - { - threading::Value** vals = RecordToFilterVals(stream, filter, columns); - - if ( filter->remote ) - remote_serializer->SendLogWrite(stream->id, - filter->writer, - path, - filter->num_fields, - vals); - - if ( filter->local ) - { - // Write takes ownership of vals. - assert(writer); - writer->Write(filter->num_fields, vals); - } - - else - DeleteVals(filter->num_fields, vals); - - } + threading::Value** vals = RecordToFilterVals(stream, filter, columns); + // Write takes ownership of vals. + assert(writer); + writer->Write(filter->num_fields, vals); #ifdef DEBUG DBG_LOG(DBG_LOGGING, "Wrote record to filter '%s' on stream '%s'", @@ -976,7 +937,7 @@ Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, } WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, const Field* const* fields) + int num_fields, const Field* const* fields, bool local, bool remote) { Stream* stream = FindStream(id); @@ -992,7 +953,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, // return it. return w->second->writer; - WriterFrontend* writer_obj = new WriterFrontend(writer->AsEnum()); + WriterFrontend* writer_obj = new WriterFrontend(id, writer, local, remote); assert(writer_obj); writer_obj->Init(path, num_fields, fields); diff --git a/src/logging/Manager.h b/src/logging/Manager.h index d931bfaef8..bf097c5e1a 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -159,7 +159,8 @@ protected: // Takes ownership of fields. WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, const threading::Field* const* fields); + int num_fields, const threading::Field* const* fields, + bool local, bool remote); // Takes ownership of values.. bool Write(EnumVal* id, EnumVal* writer, string path, diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 02f1a188d8..26e8eaf22e 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -99,21 +99,36 @@ public: using namespace logging; -WriterFrontend::WriterFrontend(bro_int_t type) +WriterFrontend::WriterFrontend(EnumVal* arg_stream, EnumVal* arg_writer, bool arg_local, bool arg_remote) { + stream = arg_stream; + writer = arg_writer; + Ref(stream); + Ref(writer); + disabled = initialized = false; buf = true; + local = arg_local; + remote = arg_remote; write_buffer = 0; write_buffer_pos = 0; ty_name = ""; - backend = log_mgr->CreateBackend(this, type); - assert(backend); - backend->Start(); + if ( local ) + { + backend = log_mgr->CreateBackend(this, writer->AsEnum()); + assert(backend); + backend->Start(); + } + + else + backend = 0; } WriterFrontend::~WriterFrontend() { + Unref(stream); + Unref(writer); } string WriterFrontend::Name() const @@ -128,7 +143,9 @@ void WriterFrontend::Stop() { FlushWriteBuffer(); SetDisable(); - backend->Stop(); + + if ( backend ) + backend->Stop(); } void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* const * arg_fields) @@ -144,7 +161,17 @@ void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* cons fields = arg_fields; initialized = true; - backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields)); + + if ( backend ) + backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields)); + + if ( remote ) + remote_serializer->SendLogCreateWriter(stream, + writer, + arg_path, + arg_num_fields, + arg_fields); + } void WriterFrontend::Write(int num_fields, Value** vals) @@ -152,6 +179,19 @@ void WriterFrontend::Write(int num_fields, Value** vals) if ( disabled ) return; + if ( remote ) + remote_serializer->SendLogWrite(stream, + writer, + path, + num_fields, + vals); + + if ( ! backend ) + { + DeleteVals(vals); + return; + } + if ( ! write_buffer ) { // Need new buffer. @@ -173,7 +213,8 @@ void WriterFrontend::FlushWriteBuffer() // Nothing to do. return; - backend->SendIn(new WriteMessage(backend, num_fields, write_buffer_pos, write_buffer)); + if ( backend ) + backend->SendIn(new WriteMessage(backend, num_fields, write_buffer_pos, write_buffer)); // Clear buffer (no delete, we pass ownership to child thread.) write_buffer = 0; @@ -187,7 +228,8 @@ void WriterFrontend::SetBuf(bool enabled) buf = enabled; - backend->SendIn(new SetBufMessage(backend, enabled)); + if ( backend ) + backend->SendIn(new SetBufMessage(backend, enabled)); if ( ! buf ) // Make sure no longer buffer any still queued data. @@ -200,7 +242,9 @@ void WriterFrontend::Flush() return; FlushWriteBuffer(); - backend->SendIn(new FlushMessage(backend)); + + if ( backend ) + backend->SendIn(new FlushMessage(backend)); } void WriterFrontend::Rotate(string rotated_path, double open, double close, bool terminating) @@ -209,7 +253,9 @@ void WriterFrontend::Rotate(string rotated_path, double open, double close, bool return; FlushWriteBuffer(); - backend->SendIn(new RotateMessage(backend, this, rotated_path, open, close, terminating)); + + if ( backend ) + backend->SendIn(new RotateMessage(backend, this, rotated_path, open, close, terminating)); } void WriterFrontend::Finish() @@ -218,7 +264,18 @@ void WriterFrontend::Finish() return; FlushWriteBuffer(); - backend->SendIn(new FinishMessage(backend)); + + if ( backend ) + backend->SendIn(new FinishMessage(backend)); + } + +void WriterFrontend::DeleteVals(Value** vals) + { + // Note this code is duplicated in Manager::DeleteVals(). + for ( int i = 0; i < num_fields; i++ ) + delete vals[i]; + + delete [] vals; } diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index 4386a15f64..3e05d17c9e 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -25,14 +25,21 @@ public: /** * Constructor. * - * type: The backend writer type, with the value corresponding to the + * stream: The logging stream. + * + * writer: The backend writer type, with the value corresponding to the * script-level \c Log::Writer enum (e.g., \a WRITER_ASCII). The * frontend will internally instantiate a WriterBackend of the * corresponding type. + * + * local: If true, the writer will instantiate a local backend. + * + * remote: If true, the writer will forward all data to remote + * clients. * * Frontends must only be instantiated by the main thread. */ - WriterFrontend(bro_int_t type); + WriterFrontend(EnumVal* stream, EnumVal* writer, bool local, bool remote); /** * Destructor. @@ -187,10 +194,17 @@ public: protected: friend class Manager; + void DeleteVals(threading::Value** vals); + + EnumVal* stream; + EnumVal* writer; + WriterBackend* backend; // The backend we have instanatiated. bool disabled; // True if disabled. bool initialized; // True if initialized. bool buf; // True if buffering is enabled (default). + bool local; // True if logging locally. + bool remote; // True if loggin remotely. string ty_name; // Name of the backend type. Set by the manager. string path; // The log path. diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 51c4f7a3bc..e590b13434 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -20,8 +20,8 @@ BasicThread::BasicThread() terminating = false; pthread = 0; - buf = 0; - buf_len = 1024; + buf_len = 2048; + buf = (char*) malloc(buf_len); name = Fmt("thread-%d", ++thread_counter); @@ -57,9 +57,6 @@ void BasicThread::SetOSName(const string& name) const char* BasicThread::Fmt(const char* format, ...) { - if ( ! buf ) - buf = (char*) malloc(buf_len); - va_list al; va_start(al, format); int n = safe_vsnprintf(buf, buf_len, format, al); @@ -67,13 +64,15 @@ const char* BasicThread::Fmt(const char* format, ...) if ( (unsigned int) n >= buf_len ) { // Not enough room, grow the buffer. - buf_len = n + 32; - buf = (char*) realloc(buf, buf_len); + int tmp_len = n + 32; + char* tmp = (char*) malloc(tmp_len); // Is it portable to restart? va_start(al, format); - n = safe_vsnprintf(buf, buf_len, format, al); + n = safe_vsnprintf(tmp, tmp_len, format, al); va_end(al); + + free(tmp); } return buf; From bf14bd91d7332384931ebd1b42401b8dd0c14754 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 8 Mar 2012 17:16:51 -0800 Subject: [PATCH 126/651] Removing some no longer needed checks. --- src/logging/Manager.cc | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 14fb3428fe..4e97351e57 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -105,9 +105,6 @@ Manager::Stream::~Stream() { WriterInfo* winfo = i->second; - if ( ! winfo ) - continue; - if ( winfo->rotation_timer ) timer_mgr->Cancel(winfo->rotation_timer); @@ -207,7 +204,7 @@ Manager::WriterInfo* Manager::FindWriter(WriterFrontend* writer) { WriterInfo* winfo = i->second; - if ( winfo && winfo->writer == writer ) + if ( winfo->writer == writer ) return winfo; } } @@ -221,7 +218,7 @@ void Manager::RemoveDisabledWriters(Stream* stream) for ( Stream::WriterMap::iterator j = stream->writers.begin(); j != stream->writers.end(); j++ ) { - if ( j->second && j->second->writer->Disabled() ) + if ( j->second->writer->Disabled() ) { j->second->writer->Stop(); delete j->second; @@ -740,7 +737,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) if ( w != stream->writers.end() ) // We know this writer already. - writer = w->second ? w->second->writer : 0; + writer = w->second->writer; else { @@ -948,7 +945,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, Stream::WriterMap::iterator w = stream->writers.find(Stream::WriterPathPair(writer->AsEnum(), path)); - if ( w != stream->writers.end() && w->second ) + if ( w != stream->writers.end() ) // If we already have a writer for this. That's fine, we just // return it. return w->second->writer; @@ -1050,8 +1047,7 @@ bool Manager::Write(EnumVal* id, EnumVal* writer, string path, int num_fields, return false; } - if ( w->second ) - w->second->writer->Write(num_fields, vals); + w->second->writer->Write(num_fields, vals); DBG_LOG(DBG_LOGGING, "Wrote pre-filtered record to path '%s' on stream '%s'", @@ -1072,9 +1068,6 @@ void Manager::SendAllWritersTo(RemoteSerializer::PeerID peer) for ( Stream::WriterMap::iterator i = stream->writers.begin(); i != stream->writers.end(); i++ ) { - if ( ! i->second ) - continue; - WriterFrontend* writer = i->second->writer; EnumVal writer_val(i->first.first, BifType::Enum::Log::Writer); @@ -1095,10 +1088,7 @@ bool Manager::SetBuf(EnumVal* id, bool enabled) for ( Stream::WriterMap::iterator i = stream->writers.begin(); i != stream->writers.end(); i++ ) - { - if ( i->second ) - i->second->writer->SetBuf(enabled); - } + i->second->writer->SetBuf(enabled); RemoveDisabledWriters(stream); @@ -1116,10 +1106,7 @@ bool Manager::Flush(EnumVal* id) for ( Stream::WriterMap::iterator i = stream->writers.begin(); i != stream->writers.end(); i++ ) - { - if ( i->second ) - i->second->writer->Flush(); - } + i->second->writer->Flush(); RemoveDisabledWriters(stream); From 83038d78e0511a0fbed649e514a27904f5071c2f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 8 Mar 2012 17:29:23 -0800 Subject: [PATCH 127/651] Adding new leak tests involving remote logging. --- .../manager-1.metrics.log | 10 +++ .../core.leaks.remote/sender.test.failure.log | 10 +++ .../core.leaks.remote/sender.test.log | 12 +++ .../core.leaks.remote/sender.test.success.log | 9 +++ testing/btest/core/leaks/basic-cluster.bro | 39 +++++++++ testing/btest/core/leaks/remote.bro | 79 +++++++++++++++++++ .../external/scripts/perftools-adapt-paths | 2 +- 7 files changed, 160 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log create mode 100644 testing/btest/Baseline/core.leaks.remote/sender.test.failure.log create mode 100644 testing/btest/Baseline/core.leaks.remote/sender.test.log create mode 100644 testing/btest/Baseline/core.leaks.remote/sender.test.success.log create mode 100644 testing/btest/core/leaks/basic-cluster.bro create mode 100644 testing/btest/core/leaks/remote.bro diff --git a/testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log b/testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log new file mode 100644 index 0000000000..42fcd6a526 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path metrics +#fields ts metric_id filter_name index.host index.str index.network value +#types time enum string addr string subnet count +1331256494.591966 TEST_METRIC foo-bar 6.5.4.3 - - 4 +1331256494.591966 TEST_METRIC foo-bar 7.2.1.5 - - 2 +1331256494.591966 TEST_METRIC foo-bar 1.2.3.4 - - 6 diff --git a/testing/btest/Baseline/core.leaks.remote/sender.test.failure.log b/testing/btest/Baseline/core.leaks.remote/sender.test.failure.log new file mode 100644 index 0000000000..5a26f322f4 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.remote/sender.test.failure.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path test.failure +#fields t id.orig_h id.orig_p id.resp_h id.resp_p status country +#types time addr port addr port string string +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure US +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure UK +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure MX diff --git a/testing/btest/Baseline/core.leaks.remote/sender.test.log b/testing/btest/Baseline/core.leaks.remote/sender.test.log new file mode 100644 index 0000000000..9d2ba26f48 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.remote/sender.test.log @@ -0,0 +1,12 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path test +#fields t id.orig_h id.orig_p id.resp_h id.resp_p status country +#types time addr port addr port string string +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success unknown +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure US +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure UK +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success BR +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure MX diff --git a/testing/btest/Baseline/core.leaks.remote/sender.test.success.log b/testing/btest/Baseline/core.leaks.remote/sender.test.success.log new file mode 100644 index 0000000000..1b2ed452a0 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.remote/sender.test.success.log @@ -0,0 +1,9 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path test.success +#fields t id.orig_h id.orig_p id.resp_h id.resp_p status country +#types time addr port addr port string string +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success unknown +1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success BR diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro new file mode 100644 index 0000000000..a82f52c8b2 --- /dev/null +++ b/testing/btest/core/leaks/basic-cluster.bro @@ -0,0 +1,39 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT +# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT +# @TEST-EXEC: btest-bg-wait -k 30 +# @TEST-EXEC: btest-diff manager-1/metrics.log + +@TEST-START-FILE cluster-layout.bro +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1")], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +redef enum Metrics::ID += { + TEST_METRIC, +}; + +event bro_init() &priority=5 + { + Metrics::add_filter(TEST_METRIC, + [$name="foo-bar", + $break_interval=3secs]); + + if ( Cluster::local_node_type() == Cluster::WORKER ) + { + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); + Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); + Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + } + } diff --git a/testing/btest/core/leaks/remote.bro b/testing/btest/core/leaks/remote.bro new file mode 100644 index 0000000000..fa72ce6024 --- /dev/null +++ b/testing/btest/core/leaks/remote.bro @@ -0,0 +1,79 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: btest-bg-run sender HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m --pseudo-realtime %INPUT ../sender.bro +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-run receiver HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m --pseudo-realtime %INPUT ../receiver.bro +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-diff sender/test.log +# @TEST-EXEC: btest-diff sender/test.failure.log +# @TEST-EXEC: btest-diff sender/test.success.log +# @TEST-EXEC: cmp receiver/test.log sender/test.log +# @TEST-EXEC: cmp receiver/test.failure.log sender/test.failure.log +# @TEST-EXEC: cmp receiver/test.success.log sender/test.success.log + +# This is the common part loaded by both sender and receiver. +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event bro_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); + Log::add_filter(Test::LOG, [$name="f1", $path="test.success", $pred=function(rec: Log): bool { return rec$status == "success"; }]); +} + +##### + +@TEST-START-FILE sender.bro + +module Test; + +@load frameworks/communication/listen + +function fail(rec: Log): bool + { + return rec$status != "success"; + } + +event remote_connection_handshake_done(p: event_peer) + { + Log::add_filter(Test::LOG, [$name="f2", $path="test.failure", $pred=fail]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + local r: Log = [$t=network_time(), $id=cid, $status="success"]; + + # Log something. + Log::write(Test::LOG, r); + Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + disconnect(p); + } +@TEST-END-FILE + +@TEST-START-FILE receiver.bro + +##### + +redef Communication::nodes += { + ["foo"] = [$host = 127.0.0.1, $connect=T, $request_logs=T] +}; + +@TEST-END-FILE diff --git a/testing/external/scripts/perftools-adapt-paths b/testing/external/scripts/perftools-adapt-paths index 2eda2477c7..cfecd39993 100755 --- a/testing/external/scripts/perftools-adapt-paths +++ b/testing/external/scripts/perftools-adapt-paths @@ -7,4 +7,4 @@ cat $1 | sed "s#bro *\"\./#../../../build/src/bro \".tmp/$TEST_NAME/#g" | sed 's/ *--gv//g' >$1.tmp && mv $1.tmp $1 -grep -q "No leaks found" $1 +grep -qv "detected leaks of" $1 From 51009b73bcd23c812f8244f55ee5c6e5172598fd Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 8 Mar 2012 18:13:17 -0800 Subject: [PATCH 128/651] Finetuning communication CPU usage. --- .../base/frameworks/cluster/setup-connections.bro | 2 +- src/RemoteSerializer.cc | 14 ++++++++++++++ src/RemoteSerializer.h | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/scripts/base/frameworks/cluster/setup-connections.bro b/scripts/base/frameworks/cluster/setup-connections.bro index b5a0d25e1f..20646525be 100644 --- a/scripts/base/frameworks/cluster/setup-connections.bro +++ b/scripts/base/frameworks/cluster/setup-connections.bro @@ -44,7 +44,7 @@ event bro_init() &priority=9 { if ( n$node_type == WORKER && n$proxy == node ) Communication::nodes[i] = - [$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events]; + [$host=n$ip, $connect=F, $class=i, $sync=F, $auth=T, $events=worker2proxy_events]; # accepts connections from the previous one. # (This is not ideal for setups with many proxies) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index c6b9623096..56e27c2104 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -532,6 +532,7 @@ RemoteSerializer::RemoteSerializer() terminating = false; in_sync = 0; last_flush = 0; + received_logs = 0; } RemoteSerializer::~RemoteSerializer() @@ -1353,6 +1354,14 @@ double RemoteSerializer::NextTimestamp(double* local_network_time) { Poll(false); + if ( received_logs > 0 ) + { + // If we processed logs last time, assume there's more. + idle = false; + received_logs = 0; + return timer_mgr->Time(); + } + double et = events.length() ? events[0]->time : -1; double pt = packets.length() ? packets[0]->time : -1; @@ -2744,6 +2753,8 @@ bool RemoteSerializer::ProcessLogWrite() fmt.EndRead(); + ++received_logs; + return true; error: @@ -3385,6 +3396,9 @@ void SocketComm::Run() small_timeout.tv_usec = io->CanWrite() || io->CanRead() ? 1 : 10; + if ( ! io->CanWrite() ) + usleep(10); + int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, &small_timeout); diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index eabcb18a38..05d25ca525 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -338,6 +338,7 @@ private: int propagate_accesses; bool ignore_accesses; bool terminating; + int received_logs; Peer* source_peer; PeerID id_counter; // Keeps track of assigned IDs. uint32 current_sync_point; From 8eaf40ec18222d54ae4a76f535008951a73d7fca Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 8 Mar 2012 20:24:12 -0800 Subject: [PATCH 129/651] Reverting accidental commit. Thanks, Seth! --- scripts/base/frameworks/cluster/setup-connections.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/base/frameworks/cluster/setup-connections.bro b/scripts/base/frameworks/cluster/setup-connections.bro index 20646525be..b5a0d25e1f 100644 --- a/scripts/base/frameworks/cluster/setup-connections.bro +++ b/scripts/base/frameworks/cluster/setup-connections.bro @@ -44,7 +44,7 @@ event bro_init() &priority=9 { if ( n$node_type == WORKER && n$proxy == node ) Communication::nodes[i] = - [$host=n$ip, $connect=F, $class=i, $sync=F, $auth=T, $events=worker2proxy_events]; + [$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events]; # accepts connections from the previous one. # (This is not ideal for setups with many proxies) From faf5c95752c2cb32f269b624262676902a22426f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 11 Mar 2012 19:41:41 -0700 Subject: [PATCH 130/651] a couple of small fixes ( default values, all null lines) --- src/input/Manager.cc | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index a647b3c945..27580e0e82 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -104,7 +104,6 @@ struct Manager::ReaderInfo { EnumVal* type; ReaderFrontend* reader; - //list events; // events we fire when "something" happens map filters; // filters that can prevent our actions bool HasFilter(int id); @@ -160,7 +159,7 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) while ( true ) { if ( ir->type == BifEnum::Input::READER_DEFAULT ) { - reporter->Error("unknown reader when creating reader"); + reporter->Error("The reader that was requested was not found and could not be initialized."); return 0; } @@ -181,7 +180,7 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) } else { // ohok. init failed, kill factory for all eternity ir->factory = 0; - DBG_LOG(DBG_LOGGING, "failed to init input class %s", ir->name); + DBG_LOG(DBG_LOGGING, "Failed to init input class %s", ir->name); return 0; } @@ -225,8 +224,11 @@ ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) assert(reader_obj); // get the source... - const BroString* bsource = description->Lookup(rtype->FieldOffset("source"))->AsString(); + Val* sourceval = description->LookupWithDefault(rtype->FieldOffset("source")); + assert ( sourceval != 0 ); + const BroString* bsource = sourceval->AsString(); string source((const char*) bsource->Bytes(), bsource->Len()); + Unref(sourceval); ReaderInfo* info = new ReaderInfo; info->reader = reader_obj; @@ -255,13 +257,14 @@ bool Manager::AddEventFilter(EnumVal *id, RecordVal* fval) { return false; } - Val* name = fval->Lookup(rtype->FieldOffset("name")); - RecordType *fields = fval->Lookup(rtype->FieldOffset("fields"))->AsType()->AsTypeType()->Type()->AsRecordType(); + Val* name = fval->LookupWithDefault(rtype->FieldOffset("name")); + RecordType *fields = fval->LookupWithDefault(rtype->FieldOffset("fields"))->AsType()->AsTypeType()->Type()->AsRecordType(); Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); - Val* event_val = fval->Lookup(rtype->FieldOffset("ev")); + Val* event_val = fval->LookupWithDefault(rtype->FieldOffset("ev")); Func* event = event_val->AsFunc(); + Unref(event_val); { FuncType* etype = event->FType()->AsFuncType(); @@ -330,8 +333,10 @@ bool Manager::AddEventFilter(EnumVal *id, RecordVal* fval) { logf[i] = fieldsV[i]; } + Unref(fields); // ref'd by lookupwithdefault EventFilter* filter = new EventFilter(); filter->name = name->AsString()->CheckString(); + Unref(name); // ref'd by lookupwithdefault filter->id = id->Ref()->AsEnumVal(); filter->num_fields = fieldsV.size(); filter->fields = fields->Ref()->AsRecordType(); @@ -369,8 +374,9 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { RecordType *idx = fval->LookupWithDefault(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); RecordType *val = 0; - if ( fval->Lookup(rtype->FieldOffset("val")) != 0 ) { + if ( fval->LookupWithDefault(rtype->FieldOffset("val")) != 0 ) { val = fval->LookupWithDefault(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); + Unref(val); // The lookupwithdefault in the if-clause ref'ed val. } TableVal *dst = fval->LookupWithDefault(rtype->FieldOffset("destination"))->AsTableVal(); @@ -780,10 +786,12 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va assert(i->filters[id]->filter_type == TABLE_FILTER); TableFilter* filter = (TableFilter*) i->filters[id]; - //reporter->Error("Hashing %d index fields", i->num_idx_fields); HashKey* idxhash = HashValues(filter->num_idx_fields, vals); - //reporter->Error("Result: %d\n", (uint64_t) idxhash->Hash()); - //reporter->Error("Hashing %d val fields", i->num_val_fields); + + if ( idxhash == 0 ) { + reporter->Error("Could not hash line. Ignoring"); + return filter->num_val_fields + filter->num_idx_fields; + } hash_t valhash = 0; if ( filter->num_val_fields > 0 ) { @@ -792,10 +800,6 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va delete(valhashkey); } - //reporter->Error("Result: %d", (uint64_t) valhash->Hash()); - - //reporter->Error("received entry with idxhash %d and valhash %d", (uint64_t) idxhash->Hash(), (uint64_t) valhash->Hash()); - InputHash *h = filter->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before @@ -1609,14 +1613,16 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { length += GetValueLength(val); } - //reporter->Error("Length: %d", length); + if ( length == 0 ) { + reporter->Error("Input reader sent line where all elements are null values. Ignoring line"); + return NULL; + } int position = 0; char *data = (char*) malloc(length); if ( data == 0 ) { reporter->InternalError("Could not malloc?"); } - //memset(data, 0, length); for ( int i = 0; i < num_elements; i++ ) { const Value* val = vals[i]; if ( val->present ) From 92555badd4d6bca6a3f9ed96c3dab1087aac9940 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 11 Mar 2012 20:43:26 -0700 Subject: [PATCH 131/651] cleanup, more sanity tests, a little bit more documentation --- doc/input.rst | 27 +++-- scripts/base/frameworks/input/main.bro | 3 - src/input/Manager.cc | 159 ++++++++++++++----------- src/input/Manager.h | 3 + 4 files changed, 110 insertions(+), 82 deletions(-) diff --git a/doc/input.rst b/doc/input.rst index 78e96fe06e..e201af9fed 100644 --- a/doc/input.rst +++ b/doc/input.rst @@ -34,9 +34,11 @@ very similar to the abstracts used in the logging framework: Readers A reader defines the input format for the specific input stream. - At the moment, Bro comes with only one type of reader, which can - read the tab seperated ASCII logfiles that were generated by the + At the moment, Bro comes with two types of reader. The default reader is READER_ASCII, + which can read the tab seperated ASCII logfiles that were generated by the logging framework. + READER_RAW can files containing records separated by a character(like e.g. newline) and send + one event per line. Basics @@ -68,7 +70,21 @@ The fields that can be set when creating a stream are: ``reader`` The reader used for this stream. Default is ``READER_ASCII``. - + + ``mode`` + The mode in which the stream is opened. Possible values are ``MANUAL``, ``REREAD`` and ``STREAM``. + Default is ``MANUAL``. + ``MANUAL`` means, that the files is not updated after it has been read. Changes to the file will not + be reflected in the data bro knows. + ``REREAD`` means that the whole file is read again each time a change is found. This should be used for + files that are mapped to a table where individual lines can change. + ``STREAM`` means that the data from the file is streamed. Events / table entries will be generated as new + data is added to the file. + + ``autostart`` + If set to yes, the first update operation is triggered automatically after the first filter has been added to the stream. + This has to be set to false if several filters are added to the input source. + In this case Input::force_update has to be called manually once after all filters have been added. Filters ======= @@ -101,9 +117,6 @@ could be defined as follows: ... Input::add_eventfilter(Foo::INPUT, [$name="input", $fields=Val, $ev=line]); - - # read the file after all filters have been set - Input::force_update(Foo::INPUT); } The fields that can be set for an event filter are: @@ -156,7 +169,7 @@ an approach similar to this: Input::add_tablefilter(Foo::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=conn_attempts]); - # read the file after all filters have been set + # read the file after all filters have been set (only needed if autostart is set to false) Input::force_update(Foo::INPUT); } diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 445f947106..c6995121bd 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -182,9 +182,6 @@ function read_table(description: Input::StreamDescription, filter: Input::TableF if ( ok ) { ok = add_tablefilter(id, filter); } - if ( ok ) { - ok = force_update(id); - } if ( ok ) { ok = remove_stream(id); } else { diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 27580e0e82..db98cb7a33 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -327,7 +327,6 @@ bool Manager::AddEventFilter(EnumVal *id, RecordVal* fval) { return false; } - Field** logf = new Field*[fieldsV.size()]; for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { logf[i] = fieldsV[i]; @@ -380,6 +379,30 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { } TableVal *dst = fval->LookupWithDefault(rtype->FieldOffset("destination"))->AsTableVal(); + // check if index fields match tabla description + { + int num = idx->NumFields(); + const type_list* tl = dst->Type()->AsTableType()->IndexTypes(); + + loop_over_list(*tl, j) + { + if ( j >= num ) { + reporter->Error("Table type has more indexes than index definition"); + return false; + } + + if ( !same_type(idx->FieldType(j), (*tl)[j]) ) { + reporter->Error("Table type does not match index type"); + return false; + } + } + + if ( num != j ) { + reporter->Error("Table has less elements than index definition"); + return false; + } + } + Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); Val* event_val = fval->LookupWithDefault(rtype->FieldOffset("ev")); @@ -571,7 +594,6 @@ bool Manager::RemoveStreamContinuation(const ReaderFrontend* reader) { reporter->Error("Stream not found in RemoveStreamContinuation"); return false; - } bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { @@ -738,7 +760,6 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu idxval = l; } - //reporter->Error("Position: %d, num_fields: %d", position, num_fields); assert ( position == num_fields ); return idxval; @@ -771,8 +792,6 @@ void Manager::SendEntry(const ReaderFrontend* reader, const int id, Value* *vals delete vals[i]; } delete [] vals; - - } int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Value* const *vals) { @@ -846,17 +865,15 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); } + + bool result; + if ( filter->num_val_fields > 0 ) { // we have values + result = CallPred(filter->pred, 3, ev, predidx, valval); + } else { + // no values + result = CallPred(filter->pred, 2, ev, predidx); + } - val_list vl( 2 + (filter->num_val_fields > 0) ); // 2 if we don't have values, 3 otherwise. - vl.append(ev); - vl.append(predidx); - if ( filter->num_val_fields > 0 ) - vl.append(valval); - - Val* v = filter->pred->Call(&vl); - bool result = v->AsBool(); - Unref(v); - if ( result == false ) { if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... @@ -968,14 +985,8 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { Ref(predidx); Ref(val); - val_list vl(3); - vl.append(ev); - vl.append(predidx); - vl.append(val); - Val* v = filter->pred->Call(&vl); - bool result = v->AsBool(); - Unref(v); - + bool result = CallPred(filter->pred, 3, ev, predidx, val); + if ( result == false ) { // Keep it. Hence - we quit and simply go to the next entry of lastDict // ah well - and we have to add the entry to currDict... @@ -1038,7 +1049,6 @@ void Manager::Put(const ReaderFrontend* reader, int id, Value* *vals) { } else { assert(false); } - } int Manager::SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const Value* const *vals) { @@ -1132,18 +1142,15 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); } + + bool result; + if ( filter->num_val_fields > 0 ) { // we have values + result = CallPred(filter->pred, 3, ev, predidx, valval); + } else { + // no values + result = CallPred(filter->pred, 2, ev, predidx); + } - val_list vl( 2 + (filter->num_val_fields > 0) ); // 2 if we don't have values, 3 otherwise. - vl.append(ev); - vl.append(predidx); - if ( filter->num_val_fields > 0 ) - vl.append(valval); - - - Val* v = filter->pred->Call(&vl); - bool result = v->AsBool(); - Unref(v); - if ( result == false ) { // do nothing Unref(idxval); @@ -1154,7 +1161,6 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * } - filter->tab->Assign(idxval, valval); if ( filter->event ) { @@ -1176,13 +1182,9 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * SendEvent(filter->event, 3, ev, predidx, valval); } } - } - - - } else { // no predicates or other stuff @@ -1192,6 +1194,7 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * return filter->num_idx_fields + filter->num_val_fields; } +// Todo:: perhaps throw some kind of clear-event? void Manager::Clear(const ReaderFrontend* reader, int id) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { @@ -1207,6 +1210,7 @@ void Manager::Clear(const ReaderFrontend* reader, int id) { filter->tab->RemoveAll(); } +// put interface: delete old entry from table. bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { ReaderInfo *i = FindReader(reader); if ( i == 0 ) { @@ -1235,13 +1239,7 @@ bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { int startpos = 0; Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); - val_list vl(3); - vl.append(ev); - vl.append(predidx); - vl.append(val); - Val* v = filter->pred->Call(&vl); - filterresult = v->AsBool(); - Unref(v); + filterresult = CallPred(filter->pred, 3, ev, predidx, val); if ( filterresult == false ) { // keep it. @@ -1285,6 +1283,26 @@ bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { return success; } +bool Manager::CallPred(Func* pred_func, const int numvals, ...) +{ + bool result; + val_list vl(numvals); + + va_list lP; + va_start(lP, numvals); + for ( int i = 0; i < numvals; i++ ) + { + vl.append( va_arg(lP, Val*) ); + } + va_end(lP); + + Val* v = pred_func->Call(&vl); + result = v->AsBool(); + Unref(v); + + return(result); +} + bool Manager::SendEvent(const string& name, const int num_vals, Value* *vals) { EventHandler* handler = event_registry->Lookup(name.c_str()); @@ -1341,8 +1359,15 @@ void Manager::SendEvent(EventHandlerPtr ev, list events) mgr.QueueEvent(ev, vl, SOURCE_LOCAL); } - +// Convert a bro list value to a bro record value. I / we could think about moving this functionality to val.cc RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, int* position) { + assert(position != 0 ); // we need the pointer to point to data; + + if ( request_type->Tag() != TYPE_RECORD ) { + reporter->InternalError("ListValToRecordVal called on non-record-value."); + return 0; + } + RecordVal* rec = new RecordVal(request_type->AsRecordType()); int maxpos = list->Length(); @@ -1364,20 +1389,14 @@ RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, return rec; } - - +// Convert a threading value to a record value RecordVal* Manager::ValueToRecordVal(const Value* const *vals, RecordType *request_type, int* position) { - if ( position == 0 ) { - reporter->InternalError("Need position"); - return 0; - } + assert(position != 0); // we need the pointer to point to data. - /* if ( request_type->Tag() != TYPE_RECORD ) { - reporter->InternalError("I only work with records"); + reporter->InternalError("ValueToRecordVal called on non-record-value."); return 0; - } */ - + } RecordVal* rec = new RecordVal(request_type->AsRecordType()); for ( int i = 0; i < request_type->NumFields(); i++ ) { @@ -1394,11 +1413,12 @@ RecordVal* Manager::ValueToRecordVal(const Value* const *vals, RecordType *reque } return rec; - } - +// Count the length of the values +// used to create a correct length buffer for hashing later int Manager::GetValueLength(const Value* val) { + assert( val->present ); // presence has to be checked elsewhere int length = 0; switch (val->type) { @@ -1485,19 +1505,20 @@ int Manager::GetValueLength(const Value* val) { } +// Given a threading::value, copy the raw data bytes into *data and return how many bytes were copied. +// Used for hashing the values for lookup in the bro table int Manager::CopyValue(char *data, const int startpos, const Value* val) { + assert( val->present ); // presence has to be checked elsewhere + switch ( val->type ) { case TYPE_BOOL: case TYPE_INT: - //reporter->Error("Adding field content to pos %d: %lld", val->val.int_val, startpos); memcpy(data+startpos, (const void*) &(val->val.int_val), sizeof(val->val.int_val)); - //*(data+startpos) = val->val.int_val; return sizeof(val->val.int_val); break; case TYPE_COUNT: case TYPE_COUNTER: - //*(data+startpos) = val->val.uint_val; memcpy(data+startpos, (const void*) &(val->val.uint_val), sizeof(val->val.uint_val)); return sizeof(val->val.uint_val); break; @@ -1516,7 +1537,6 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: - //*(data+startpos) = val->val.double_val; memcpy(data+startpos, (const void*) &(val->val.double_val), sizeof(val->val.double_val)); return sizeof(val->val.double_val); break; @@ -1598,12 +1618,11 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { return 0; } - reporter->InternalError("internal error"); assert(false); return 0; - } +// Hash num_elements threading values and return the HashKey for them. At least one of the vals has to be ->present. HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { int length = 0; @@ -1633,10 +1652,9 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { assert(position == length); return new HashKey(data, length, key, true); - - } +// convert threading value to Bro value Val* Manager::ValueToVal(const Value* val, BroType* request_type) { if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) { @@ -1647,7 +1665,6 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { if ( !val->present ) { return 0; // unset field } - switch ( val->type ) { case TYPE_BOOL: @@ -1760,8 +1777,7 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { reporter->InternalError("unsupported type for input_read"); } - - reporter->InternalError("Impossible error"); + assert(false); return NULL; } @@ -1778,7 +1794,6 @@ Manager::ReaderInfo* Manager::FindReader(const ReaderFrontend* reader) return 0; } - Manager::ReaderInfo* Manager::FindReader(const EnumVal* id) { for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) diff --git a/src/input/Manager.h b/src/input/Manager.h index b4fc6cff7f..96ea0e43db 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -177,6 +177,9 @@ private: void SendEvent(EventHandlerPtr ev, const int numvals, ...); void SendEvent(EventHandlerPtr ev, list events); + // Call predicate function and return result + bool CallPred(Func* pred_func, const int numvals, ...); + // get a hashkey for a set of threading::Values HashKey* HashValues(const int num_elements, const threading::Value* const *vals); From e74cbbf77484528334e5137d73a1e041d9206590 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 12 Mar 2012 15:26:51 -0500 Subject: [PATCH 132/651] Add unit test for IPv6 fragment reassembly. --- testing/btest/Baseline/core.ipv6-frag/dns.log | 9 +++++++++ testing/btest/Baseline/core.ipv6-frag/output | 5 +++++ testing/btest/Traces/ipv6-fragmented-dns.trace | Bin 0 -> 4772 bytes testing/btest/core/ipv6-frag.test | 9 +++++++++ 4 files changed, 23 insertions(+) create mode 100644 testing/btest/Baseline/core.ipv6-frag/dns.log create mode 100644 testing/btest/Baseline/core.ipv6-frag/output create mode 100755 testing/btest/Traces/ipv6-fragmented-dns.trace create mode 100644 testing/btest/core/ipv6-frag.test diff --git a/testing/btest/Baseline/core.ipv6-frag/dns.log b/testing/btest/Baseline/core.ipv6-frag/dns.log new file mode 100644 index 0000000000..50c9684bac --- /dev/null +++ b/testing/btest/Baseline/core.ipv6-frag/dns.log @@ -0,0 +1,9 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path dns +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name QR AA TC RD RA Z answers TTLs +#types time string addr port addr port enum count string count string count string count string bool bool bool bool bool count vector[string] vector[interval] +1331084278.438444 UWkUyAuUGXf 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51850 2607:f740:b::f93 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR F T F T F 0 This TXT record should be ignored 1.000000 +1331084293.592245 arKYeMETxOg 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51851 2607:f740:b::f93 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR F T F T F 0 This TXT record should be ignored 1.000000 diff --git a/testing/btest/Baseline/core.ipv6-frag/output b/testing/btest/Baseline/core.ipv6-frag/output new file mode 100644 index 0000000000..5020d94e8d --- /dev/null +++ b/testing/btest/Baseline/core.ipv6-frag/output @@ -0,0 +1,5 @@ +ip6=[hdr=[class=0, flow=0, len=81, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=51850/udp, dport=53/udp, ulen=81] +ip6=[hdr=[class=0, flow=0, len=331, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=53/udp, dport=51850/udp, ulen=331] +ip6=[hdr=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[hdr=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[hdr=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] diff --git a/testing/btest/Traces/ipv6-fragmented-dns.trace b/testing/btest/Traces/ipv6-fragmented-dns.trace new file mode 100755 index 0000000000000000000000000000000000000000..9dda47a8a9f6a4b89c12c3b26577aad3f6effc86 GIT binary patch literal 4772 zcmca|c+)~A1{MYw`2U}Qff300wmZy!%?37xb|4#sZx|$b9Nl_EhuJ!3RomSJpa?^t zpo0PpHckwKr=zc0!hA-ijsoF zl$6Z8^mt<8#GJ~iB9_eL;!KXD)S~RvoYYF@)Ra;N1_6*hO#z_J1|SAG ztKb<7o)dQ2fuwQ$XOuBM7pyM@IV{~)O}Df zBqOs}AtWM1p(r&uzbHkaI3vF_Cq*GCRUtDyFTW@?MbxoaA+tmQs4A9 z1RRiNU;=uCfgy!KkgX^`CowMt7#o>I%=ty>Kv8%E925r!7f2n;foZ$tS@(qbmvVA4 zbO05BFqU)@L_|922Bwpsoz3$n0-XSI1(4)NO(zDVB@~dm4yJH1OaYn(!dMbYoDRW+ zBFMlU0CEdWBk2H5O{at52V{s#J7output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff dns.log + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$ip6 && p?$ udp ) + print fmt("ip6=%s, udp = %s", p$ip6, p$udp); + } From 79948c79741e005565ab0aa29006249014428905 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 13 Mar 2012 14:34:53 -0700 Subject: [PATCH 133/651] Merge remote-tracking branch 'origin/topic/jsiwek/ipv6-ext-headers' * origin/topic/jsiwek/ipv6-ext-headers: Update PacketFilter/Discarder code for IP version independence. Add a few comments to IP.h Fix some IPv6 header related bugs. Add IPv6 fragment reassembly. Add handling for IPv6 extension header chains (addresses #531) --- aux/broccoli | 2 +- scripts/base/init-bare.bro | 265 ++++++++++++++++++++----------------- src/Frag.cc | 10 +- src/IP.h | 16 +++ src/Sessions.cc | 9 +- 5 files changed, 175 insertions(+), 127 deletions(-) diff --git a/aux/broccoli b/aux/broccoli index 2602eb53e7..ca13601450 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 2602eb53e70d7f0afae8fac58d7636b9291974a4 +Subproject commit ca13601450803b48d70122609764e51252a0d86e diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index a031080f0e..98da9f331d 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -303,10 +303,10 @@ type gap_info: record { gap_bytes: count; ##< How many bytes were missing in the gaps. }; -## Deprecated. -## +## Deprecated. +## ## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere -## else. +## else. type packet: record { conn: connection; is_orig: bool; @@ -933,7 +933,7 @@ const ICMP_UNREACH_ADMIN_PROHIB = 13; ##< Adminstratively prohibited. # Definitions for access to packet headers. Currently only used for # discarders. # todo::these should go into an enum to make them autodoc'able -const IPPROTO_IP = 0; ##< Dummy for IP. +const IPPROTO_IP = 0; ##< Dummy for IP. [Robin] Rename to IPPROTO_IP4? const IPPROTO_ICMP = 1; ##< Control message protocol. const IPPROTO_IGMP = 2; ##< Group management protocol. const IPPROTO_IPIP = 4; ##< IP encapsulation in IP. @@ -943,6 +943,7 @@ const IPPROTO_IPV6 = 41; ##< IPv6 header. const IPPROTO_RAW = 255; ##< Raw IP packet. # Definitions for IPv6 extension headers. +# [Robin] Do we need a constant for unknown extensions? const IPPROTO_HOPOPTS = 0; ##< IPv6 hop-by-hop-options header. const IPPROTO_ROUTING = 43; ##< IPv6 routing header. const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. @@ -959,7 +960,7 @@ type ip6_hdr: record { class: count; ##< Traffic class. flow: count; ##< Flow label. len: count; ##< Payload length. - nxt: count; ##< Next header (RFC 1700 assigned number). + nxt: count; ##< Next header (RFC 1700 assigned number). # [Robin] That's just the IPPROTO_* constant right. Then we should refer to them. hlim: count; ##< Hop limit. src: addr; ##< Source address. dst: addr; ##< Destination address. @@ -1037,7 +1038,7 @@ type ip6_fragment: record { ## ## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain type ip6_ah: record { - ## Next header (RFC 1700 assigned number). + ## Next header (RFC 1700 assigned number). # [Robin] Same as above. nxt: count; ## Length of header in 4-octet units, excluding first two units. len: count; @@ -1064,7 +1065,17 @@ type ip6_esp: record { ## An IPv6 header chain. ## ## .. bro:see:: pkt_hdr ip_hdr +# +# [Robin] How about turning ip6_hdr_chain and ip6_hdr around, making the latter +# the top-level record that then contains an ip6_hdr_chain instance. That way, the +# pkt_hdr record would have ip4_hdr and ip6_hdr members, which seems more natural. +# +# [Robin] What happens to unknown extension headers? We should keep them too so that +# one can at least identify what one can't analyze. type ip6_hdr_chain: record { + # [Robin] This looses the order of the headers (partially at least, even with ext_order I believe). + # Not sure how to do it differently, but can order be important for us? + ## The main IPv6 header. hdr: ip6_hdr; ## Hop-by-hop option extension header. @@ -1081,12 +1092,15 @@ type ip6_hdr_chain: record { esp: vector of ip6_esp; ## Order of extension headers identified by RFC 1700 assigned numbers. + # [Robin] I don't understand how this works. ext_order: vector of count; }; ## Values extracted from an IPv4 header. ## ## .. bro:see:: pkt_hdr ip6_hdr discarder_check_ip +## +# [Robin] Rename to ip4_hdr? type ip_hdr: record { hl: count; ##< Header length in bytes. tos: count; ##< Type of service. @@ -1142,6 +1156,9 @@ type icmp_hdr: record { ## A packet header, consisting of an IP header and transport-layer header. ## ## .. bro:see:: new_packet +# +# [Robin] Add flags saying whether it's v4/v6, tcp/udp/icmp? The day will come where +# we can't infer that from the connection anymore (tunnels). type pkt_hdr: record { ip: ip_hdr &optional; ##< The IPv4 header if an IPv4 packet. ip6: ip6_hdr_chain &optional; ##< The IPv6 header chain if an IPv6 packet. @@ -1459,7 +1476,7 @@ export { ## NFS file attributes. Field names are based on RFC 1813. ## - ## .. bro:see:: nfs_proc_getattr + ## .. bro:see:: nfs_proc_getattr type fattr_t: record { ftype: file_type_t; ##< File type. mode: count; ##< Mode @@ -1478,8 +1495,8 @@ export { }; ## NFS *readdir* arguments. - ## - ## .. bro:see:: nfs_proc_readdir + ## + ## .. bro:see:: nfs_proc_readdir type diropargs_t : record { dirfh: string; ##< The file handle of the directory. fname: string; ##< The name of the file we are interested in. @@ -1488,7 +1505,7 @@ export { ## NFS lookup reply. If the lookup failed, *dir_attr* may be set. If the lookup ## succeeded, *fh* is always set and *obj_attr* and *dir_attr* may be set. ## - ## .. bro:see:: nfs_proc_lookup + ## .. bro:see:: nfs_proc_lookup type lookup_reply_t: record { fh: string &optional; ##< File handle of object looked up. obj_attr: fattr_t &optional; ##< Optional attributes associated w/ file @@ -1505,7 +1522,7 @@ export { }; ## NFS *read* reply. If the lookup fails, *attr* may be set. If the lookup succeeds, - ## *attr* may be set and all other fields are set. + ## *attr* may be set and all other fields are set. type read_reply_t: record { attr: fattr_t &optional; ##< Attributes. size: count &optional; ##< Number of bytes read. @@ -1514,7 +1531,7 @@ export { }; ## NFS *readline* reply. If the request fails, *attr* may be set. If the request - ## succeeds, *attr* may be set and all other fields are set. + ## succeeds, *attr* may be set and all other fields are set. ## ## .. bro:see:: nfs_proc_readlink type readlink_reply_t: record { @@ -1524,7 +1541,7 @@ export { ## NFS *write* arguments. ## - ## .. bro:see:: nfs_proc_write + ## .. bro:see:: nfs_proc_write type writeargs_t: record { fh: string; ##< File handle to write to. offset: count; ##< Offset in file. @@ -1534,18 +1551,18 @@ export { }; ## NFS *wcc* attributes. - ## + ## ## .. bro:see:: NFS3::write_reply_t type wcc_attr_t: record { - size: count; ##< The dize. + size: count; ##< The dize. atime: time; ##< Access time. mtime: time; ##< Modification time. }; ## NFS *write* reply. If the request fails, *pre|post* attr may be set. If the - ## request succeeds, *pre|post* attr may be set and all other fields are set. + ## request succeeds, *pre|post* attr may be set and all other fields are set. ## - ## .. bro:see:: nfs_proc_write + ## .. bro:see:: nfs_proc_write type write_reply_t: record { preattr: wcc_attr_t &optional; ##< Pre operation attributes. postattr: fattr_t &optional; ##< Post operation attributes. @@ -1556,9 +1573,9 @@ export { ## NFS reply for *create*, *mkdir*, and *symlink*. If the proc ## failed, *dir_\*_attr* may be set. If the proc succeeded, *fh* and the *attr*'s - ## may be set. Note: no guarantee that *fh* is set after success. + ## may be set. Note: no guarantee that *fh* is set after success. ## - ## .. bro:see:: nfs_proc_create nfs_proc_mkdir + ## .. bro:see:: nfs_proc_create nfs_proc_mkdir type newobj_reply_t: record { fh: string &optional; ##< File handle of object created. obj_attr: fattr_t &optional; ##< Optional attributes associated w/ new object. @@ -1566,17 +1583,17 @@ export { dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. }; - ## NFS reply for *remove*, *rmdir*. Corresponds to *wcc_data* in the spec. + ## NFS reply for *remove*, *rmdir*. Corresponds to *wcc_data* in the spec. ## - ## .. bro:see:: nfs_proc_remove nfs_proc_rmdir + ## .. bro:see:: nfs_proc_remove nfs_proc_rmdir type delobj_reply_t: record { dir_pre_attr: wcc_attr_t &optional; ##< Optional attributes associated w/ dir. dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. }; ## NFS *readdir* arguments. Used for both *readdir* and *readdirplus*. - ## - ## .. bro:see:: nfs_proc_readdir + ## + ## .. bro:see:: nfs_proc_readdir type readdirargs_t: record { isplus: bool; ##< Is this a readdirplus request? dirfh: string; ##< The directory filehandle. @@ -1589,7 +1606,7 @@ export { ## NFS *direntry*. *fh* and *attr* are used for *readdirplus*. However, even ## for *readdirplus* they may not be filled out. ## - ## .. bro:see:: NFS3::direntry_vec_t NFS3::readdir_reply_t + ## .. bro:see:: NFS3::direntry_vec_t NFS3::readdir_reply_t type direntry_t: record { fileid: count; ##< E.g., inode number. fname: string; ##< Filename. @@ -1600,7 +1617,7 @@ export { ## Vector of NFS *direntry*. ## - ## .. bro:see:: NFS3::readdir_reply_t + ## .. bro:see:: NFS3::readdir_reply_t type direntry_vec_t: vector of direntry_t; ## NFS *readdir* reply. Used for *readdir* and *readdirplus*. If an is @@ -1631,7 +1648,7 @@ module GLOBAL; ## An NTP message. ## -## .. bro:see:: ntp_message +## .. bro:see:: ntp_message type ntp_msg: record { id: count; ##< Message ID. code: count; ##< Message code. @@ -1653,7 +1670,7 @@ global samba_cmds: table[count] of string &redef { return fmt("samba-unknown-%d", c); }; ## An SMB command header. -## +## ## .. bro:see:: smb_com_close smb_com_generic_andx smb_com_logoff_andx ## smb_com_negotiate smb_com_negotiate_response smb_com_nt_create_andx ## smb_com_read_andx smb_com_setup_andx smb_com_trans_mailslot @@ -1672,9 +1689,9 @@ type smb_hdr : record { }; ## An SMB transaction. -## +## ## .. bro:see:: smb_com_trans_mailslot smb_com_trans_pipe smb_com_trans_rap -## smb_com_transaction smb_com_transaction2 +## smb_com_transaction smb_com_transaction2 type smb_trans : record { word_count: count; ##< TODO. total_param_count: count; ##< TODO. @@ -1688,7 +1705,7 @@ type smb_trans : record { param_offset: count; ##< TODO. data_count: count; ##< TODO. data_offset: count; ##< TODO. - setup_count: count; ##< TODO. + setup_count: count; ##< TODO. setup0: count; ##< TODO. setup1: count; ##< TODO. setup2: count; ##< TODO. @@ -1699,19 +1716,19 @@ type smb_trans : record { ## SMB transaction data. -## +## ## .. bro:see:: smb_com_trans_mailslot smb_com_trans_pipe smb_com_trans_rap -## smb_com_transaction smb_com_transaction2 -## +## smb_com_transaction smb_com_transaction2 +## ## .. todo:: Should this really be a record type? type smb_trans_data : record { data : string; ##< The transaction's data. }; -## Deprecated. -## +## Deprecated. +## ## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere -## else. +## else. type smb_tree_connect : record { flags: count; password: string; @@ -1719,21 +1736,21 @@ type smb_tree_connect : record { service: string; }; -## Deprecated. -## +## Deprecated. +## ## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere -## else. +## else. type smb_negotiate : table[count] of string; ## A list of router addresses offered by a DHCP server. ## -## .. bro:see:: dhcp_ack dhcp_offer +## .. bro:see:: dhcp_ack dhcp_offer type dhcp_router_list: table[count] of addr; ## A DHCP message. ## ## .. bro:see:: dhcp_ack dhcp_decline dhcp_discover dhcp_inform dhcp_nak -## dhcp_offer dhcp_release dhcp_request +## dhcp_offer dhcp_release dhcp_request type dhcp_msg: record { op: count; ##< Message OP code. 1 = BOOTREQUEST, 2 = BOOTREPLY m_type: count; ##< The type of DHCP message. @@ -1770,7 +1787,7 @@ type dns_msg: record { ## A DNS SOA record. ## -## .. bro:see:: dns_SOA_reply +## .. bro:see:: dns_SOA_reply type dns_soa: record { mname: string; ##< Primary source of data for zone. rname: string; ##< Mailbox for responsible person. @@ -1783,7 +1800,7 @@ type dns_soa: record { ## An additional DNS EDNS record. ## -## .. bro:see:: dns_EDNS_addl +## .. bro:see:: dns_EDNS_addl type dns_edns_additional: record { query: string; ##< Query. qtype: count; ##< Query type. @@ -1798,7 +1815,7 @@ type dns_edns_additional: record { ## An additional DNS TSIG record. ## -## bro:see:: dns_TSIG_addl +## bro:see:: dns_TSIG_addl type dns_tsig_additional: record { query: string; ##< Query. qtype: count; ##< Query type. @@ -1812,9 +1829,9 @@ type dns_tsig_additional: record { }; # DNS answer types. -# +# # .. .. bro:see:: dns_answerr -# +# # todo::use enum to make them autodoc'able const DNS_QUERY = 0; ##< A query. This shouldn't occur, just for completeness. const DNS_ANS = 1; ##< An answer record. @@ -1828,7 +1845,7 @@ const DNS_ADDL = 3; ##< An additional record. ## dns_TXT_reply dns_WKS_reply type dns_answer: record { ## Answer type. One of :bro:see:`DNS_QUERY`, :bro:see:`DNS_ANS`, - ## :bro:see:`DNS_AUTH` and :bro:see:`DNS_ADDL`. + ## :bro:see:`DNS_AUTH` and :bro:see:`DNS_ADDL`. answer_type: count; query: string; ##< Query. qtype: count; ##< Query type. @@ -1848,27 +1865,27 @@ global dns_skip_auth: set[addr] &redef; ## .. bro:see:: dns_skip_all_addl dns_skip_auth global dns_skip_addl: set[addr] &redef; -## If true, all DNS AUTH records are skipped. +## If true, all DNS AUTH records are skipped. ## ## .. bro:see:: dns_skip_all_addl dns_skip_auth global dns_skip_all_auth = T &redef; -## If true, all DNS ADDL records are skipped. +## If true, all DNS ADDL records are skipped. ## ## .. bro:see:: dns_skip_all_auth dns_skip_addl global dns_skip_all_addl = T &redef; ## If a DNS request includes more than this many queries, assume it's non-DNS -## traffic and do not process it. Set to 0 to turn off this functionality. +## traffic and do not process it. Set to 0 to turn off this functionality. global dns_max_queries = 5; ## An X509 certificate. ## -## .. bro:see:: x509_certificate +## .. bro:see:: x509_certificate type X509: record { version: count; ##< Version number. serial: string; ##< Serial number. - subject: string; ##< Subject. + subject: string; ##< Subject. issuer: string; ##< Issuer. not_valid_before: time; ##< Timestamp before when certificate is not valid. not_valid_after: time; ##< Timestamp after when certificate is not valid. @@ -1876,7 +1893,7 @@ type X509: record { ## HTTP session statistics. ## -## .. bro:see:: http_stats +## .. bro:see:: http_stats type http_stats_rec: record { num_requests: count; ##< Number of requests. num_replies: count; ##< Number of replies. @@ -1886,7 +1903,7 @@ type http_stats_rec: record { ## HTTP message statistics. ## -## .. bro:see:: http_message_done +## .. bro:see:: http_message_done type http_message_stat: record { ## When the request/reply line was complete. start: time; @@ -1903,26 +1920,26 @@ type http_message_stat: record { }; ## Maximum number of HTTP entity data delivered to events. The amount of data -## can be limited for better performance, zero disables truncation. -## +## can be limited for better performance, zero disables truncation. +## ## .. bro:see:: http_entity_data skip_http_entity_data skip_http_data global http_entity_data_delivery_size = 1500 &redef; ## Skip HTTP data for performance considerations. The skipped -## portion will not go through TCP reassembly. -## +## portion will not go through TCP reassembly. +## ## .. bro:see:: http_entity_data skip_http_entity_data http_entity_data_delivery_size const skip_http_data = F &redef; ## Maximum length of HTTP URIs passed to events. Longer ones will be truncated ## to prevent over-long URIs (usually sent by worms) from slowing down event ## processing. A value of -1 means "do not truncate". -## +## ## .. bro:see:: http_request const truncate_http_URI = -1 &redef; -## IRC join information. -## +## IRC join information. +## ## .. bro:see:: irc_join_list type irc_join_info: record { nick: string; @@ -1933,13 +1950,13 @@ type irc_join_info: record { ## Set of IRC join information. ## -## .. bro:see:: irc_join_message +## .. bro:see:: irc_join_message type irc_join_list: set[irc_join_info]; -## Deprecated. -## +## Deprecated. +## ## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere -## else. +## else. global irc_servers : set[addr] &redef; ## Internal to the stepping stone detector. @@ -2003,7 +2020,7 @@ type backdoor_endp_stats: record { ## Description of a signature match. ## -## .. bro:see:: signature_match +## .. bro:see:: signature_match type signature_state: record { sig_id: string; ##< ID of the matching signature. conn: connection; ##< Matching connection. @@ -2011,10 +2028,10 @@ type signature_state: record { payload_size: count; ##< Payload size of the first matching packet of current endpoint. }; -# Deprecated. -# +# Deprecated. +# # .. todo:: This type is no longer used. Remove any reference of this from the -# core. +# core. type software_version: record { major: int; minor: int; @@ -2022,10 +2039,10 @@ type software_version: record { addl: string; }; -# Deprecated. -# +# Deprecated. +# # .. todo:: This type is no longer used. Remove any reference of this from the -# core. +# core. type software: record { name: string; version: software_version; @@ -2042,7 +2059,7 @@ type OS_version_inference: enum { ## Passive fingerprinting match. ## -## .. bro:see:: OS_version_found +## .. bro:see:: OS_version_found type OS_version: record { genre: string; ##< Linux, Windows, AIX, ... detail: string; ##< Lernel version or such. @@ -2052,20 +2069,20 @@ type OS_version: record { ## Defines for which subnets we should do passive fingerprinting. ## -## .. bro:see:: OS_version_found +## .. bro:see:: OS_version_found global generate_OS_version_event: set[subnet] &redef; # Type used to report load samples via :bro:see:`load_sample`. For now, it's a # set of names (event names, source file names, and perhaps ````, which were seen during the sample. +# number>``, which were seen during the sample. type load_sample_info: set[string]; ## ID for NetFlow header. This is primarily a means to sort together NetFlow -## headers and flow records at the script level. +## headers and flow records at the script level. type nfheader_id: record { ## Name of the NetFlow file (e.g., ``netflow.dat``) or the receiving socket address ## (e.g., ``127.0.0.1:5555``), or an explicit name if specified to - ## ``-y`` or ``-Y``. + ## ``-y`` or ``-Y``. rcvr_id: string; ## A serial number, ignoring any overflows. pdu_id: count; @@ -2073,7 +2090,7 @@ type nfheader_id: record { ## A NetFlow v5 header. ## -## .. bro:see:: netflow_v5_header +## .. bro:see:: netflow_v5_header type nf_v5_header: record { h_id: nfheader_id; ##< ID for sorting. cnt: count; ##< TODO. @@ -2089,7 +2106,7 @@ type nf_v5_header: record { ## A NetFlow v5 record. ## ## .. bro:see:: netflow_v5_record -type nf_v5_record: record { +type nf_v5_record: record { h_id: nfheader_id; ##< ID for sorting. id: conn_id; ##< Connection ID. nexthop: addr; ##< Address of next hop. @@ -2123,7 +2140,7 @@ type bittorrent_peer: record { }; ## A set of BitTorrent peers. -## +## ## .. bro:see:: bt_tracker_response type bittorrent_peer_set: set[bittorrent_peer]; @@ -2146,12 +2163,12 @@ type bittorrent_benc_dir: table[string] of bittorrent_benc_value; ## Header table type used by BitTorrent analyzer. ## ## .. bro:see:: bt_tracker_request bt_tracker_response -## bt_tracker_response_not_ok +## bt_tracker_response_not_ok type bt_tracker_headers: table[string] of string; @load base/event.bif -## BPF filter the user has set via the -f command line options. Empty if none. +## BPF filter the user has set via the -f command line options. Empty if none. const cmd_line_bpf_filter = "" &redef; ## Deprecated. @@ -2169,24 +2186,24 @@ const log_encryption_key = "" &redef; ## Write profiling info into this file in regular intervals. The easiest way to ## activate profiling is loading :doc:`/scripts/policy/misc/profiling`. ## -## .. bro:see:: profiling_interval expensive_profiling_multiple segment_profiling +## .. bro:see:: profiling_interval expensive_profiling_multiple segment_profiling global profiling_file: file &redef; ## Update interval for profiling (0 disables). The easiest way to activate ## profiling is loading :doc:`/scripts/policy/misc/profiling`. ## -## .. bro:see:: profiling_file expensive_profiling_multiple segment_profiling +## .. bro:see:: profiling_file expensive_profiling_multiple segment_profiling const profiling_interval = 0 secs &redef; ## Multiples of profiling_interval at which (more expensive) memory profiling is ## done (0 disables). ## -## .. bro:see:: profiling_interval profiling_file segment_profiling +## .. bro:see:: profiling_interval profiling_file segment_profiling const expensive_profiling_multiple = 0 &redef; ## If true, then write segment profiling information (very high volume!) ## in addition to profiling statistics. -## +## ## .. bro:see:: profiling_interval expensive_profiling_multiple profiling_file const segment_profiling = F &redef; @@ -2225,42 +2242,42 @@ global load_sample_freq = 20 &redef; ## Rate at which to generate :bro:see:`gap_report` events assessing to what degree ## the measurement process appears to exhibit loss. -## +## ## .. bro:see:: gap_report const gap_report_freq = 1.0 sec &redef; ## Whether we want :bro:see:`content_gap` and :bro:see:`gap_report` for partial ## connections. A connection is partial if it is missing a full handshake. Note ## that gap reports for partial connections might not be reliable. -## +## ## .. bro:see:: content_gap gap_report partial_connection const report_gaps_for_partial = F &redef; ## The CA certificate file to authorize remote Bros/Broccolis. -## +## ## .. bro:see:: ssl_private_key ssl_passphrase const ssl_ca_certificate = "" &redef; ## File containing our private key and our certificate. -## +## ## .. bro:see:: ssl_ca_certificate ssl_passphrase const ssl_private_key = "" &redef; ## The passphrase for our private key. Keeping this undefined ## causes Bro to prompt for the passphrase. -## +## ## .. bro:see:: ssl_private_key ssl_ca_certificate const ssl_passphrase = "" &redef; ## Default mode for Bro's user-space dynamic packet filter. If true, packets that -## aren't explicitly allowed through, are dropped from any further processing. -## +## aren't explicitly allowed through, are dropped from any further processing. +## ## .. note:: This is not the BPF packet filter but an additional dynamic filter -## that Bro optionally applies just before normal processing starts. -## -## .. bro:see:: install_dst_addr_filter install_dst_net_filter +## that Bro optionally applies just before normal processing starts. +## +## .. bro:see:: install_dst_addr_filter install_dst_net_filter ## install_src_addr_filter install_src_net_filter uninstall_dst_addr_filter -## uninstall_dst_net_filter uninstall_src_addr_filter uninstall_src_net_filter +## uninstall_dst_net_filter uninstall_src_addr_filter uninstall_src_net_filter const packet_filter_default = F &redef; ## Maximum size of regular expression groups for signature matching. @@ -2272,17 +2289,17 @@ const enable_syslog = F &redef; ## Description transmitted to remote communication peers for identification. const peer_description = "bro" &redef; -## If true, broadcast events received from one peer to all other peers. -## +## If true, broadcast events received from one peer to all other peers. +## ## .. bro:see:: forward_remote_state_changes ## ## .. note:: This option is only temporary and will disappear once we get a more ## sophisticated script-level communication framework. const forward_remote_events = F &redef; -## If true, broadcast state updates received from one peer to all other peers. -## -## .. bro:see:: forward_remote_events +## If true, broadcast state updates received from one peer to all other peers. +## +## .. bro:see:: forward_remote_events ## ## .. note:: This option is only temporary and will disappear once we get a more ## sophisticated script-level communication framework. @@ -2311,23 +2328,23 @@ const REMOTE_SRC_PARENT = 2; ##< Message from the parent process. const REMOTE_SRC_SCRIPT = 3; ##< Message from a policy script. ## Synchronize trace processing at a regular basis in pseudo-realtime mode. -## +## ## .. bro:see:: remote_trace_sync_peers const remote_trace_sync_interval = 0 secs &redef; ## Number of peers across which to synchronize trace processing in -## pseudo-realtime mode. -## +## pseudo-realtime mode. +## ## .. bro:see:: remote_trace_sync_interval const remote_trace_sync_peers = 0 &redef; ## Whether for :bro:attr:`&synchronized` state to send the old value as a -## consistency check. +## consistency check. const remote_check_sync_consistency = F &redef; ## Analyzer tags. The core automatically defines constants ## ``ANALYZER_*``, e.g., ``ANALYZER_HTTP``. -## +## ## .. bro:see:: dpd_config ## ## .. todo::We should autodoc these automaticallty generated constants. @@ -2345,7 +2362,7 @@ type dpd_protocol_config: record { ## This table defines the ports. ## ## .. bro:see:: dpd_reassemble_first_packets dpd_buffer_size -## dpd_match_only_beginning dpd_ignore_ports +## dpd_match_only_beginning dpd_ignore_ports const dpd_config: table[AnalyzerTag] of dpd_protocol_config = {} &redef; ## Reassemble the beginning of all TCP connections before doing @@ -2353,10 +2370,10 @@ const dpd_config: table[AnalyzerTag] of dpd_protocol_config = {} &redef; ## expensive of CPU cycles. ## ## .. bro:see:: dpd_config dpd_buffer_size -## dpd_match_only_beginning dpd_ignore_ports -## +## dpd_match_only_beginning dpd_ignore_ports +## ## .. note:: Despite the name, this option affects *all* signature matching, not -## only signatures used for dynamic protocol detection. +## only signatures used for dynamic protocol detection. const dpd_reassemble_first_packets = T &redef; ## Size of per-connection buffer used for dynamic protocol detection. For each @@ -2365,23 +2382,23 @@ const dpd_reassemble_first_packets = T &redef; ## already passed through (i.e., when a DPD signature matches only later). ## However, once the buffer is full, data is deleted and lost to analyzers that are ## activated afterwards. Then only analyzers that can deal with partial -## connections will be able to analyze the session. +## connections will be able to analyze the session. ## ## .. bro:see:: dpd_reassemble_first_packets dpd_config dpd_match_only_beginning -## dpd_ignore_ports +## dpd_ignore_ports const dpd_buffer_size = 1024 &redef; ## If true, stops signature matching if dpd_buffer_size has been reached. ## ## .. bro:see:: dpd_reassemble_first_packets dpd_buffer_size -## dpd_config dpd_ignore_ports -## +## dpd_config dpd_ignore_ports +## ## .. note:: Despite the name, this option affects *all* signature matching, not -## only signatures used for dynamic protocol detection. +## only signatures used for dynamic protocol detection. const dpd_match_only_beginning = T &redef; ## If true, don't consider any ports for deciding which protocol analyzer to -## use. If so, the value of :bro:see:`dpd_config` is ignored. +## use. If so, the value of :bro:see:`dpd_config` is ignored. ## ## .. bro:see:: dpd_reassemble_first_packets dpd_buffer_size ## dpd_match_only_beginning dpd_config @@ -2389,14 +2406,14 @@ const dpd_ignore_ports = F &redef; ## Ports which the core considers being likely used by servers. For ports in ## this set, is may heuristically decide to flip the direction of the -## connection if it misses the initial handshake. +## connection if it misses the initial handshake. const likely_server_ports: set[port] &redef; ## Deprated. Set of all ports for which we know an analyzer, built by -## :doc:`/scripts/base/frameworks/dpd/main`. +## :doc:`/scripts/base/frameworks/dpd/main`. ## ## .. todo::This should be defined by :doc:`/scripts/base/frameworks/dpd/main` -## itself we still need it. +## itself we still need it. global dpd_analyzer_ports: table[port] of set[AnalyzerTag]; ## Per-incident timer managers are drained after this amount of inactivity. @@ -2409,7 +2426,7 @@ const time_machine_profiling = F &redef; const check_for_unused_event_handlers = F &redef; # If true, dumps all invoked event handlers at startup. -# todo::Still used? +# todo::Still used? # const dump_used_event_handlers = F &redef; ## Deprecated. @@ -2425,7 +2442,7 @@ const trace_output_file = ""; ## of setting this to true is that we can write the packets out before we actually ## process them, which can be helpful for debugging in case the analysis triggers a ## crash. -## +## ## .. bro:see:: trace_output_file const record_all_packets = F &redef; @@ -2438,7 +2455,7 @@ const record_all_packets = F &redef; const ignore_keep_alive_rexmit = F &redef; ## Whether the analysis engine parses IP packets encapsulated in -## UDP tunnels. +## UDP tunnels. ## ## .. bro:see:: tunnel_port const parse_udp_tunnels = F &redef; @@ -2446,6 +2463,6 @@ const parse_udp_tunnels = F &redef; ## Number of bytes per packet to capture from live interfaces. const snaplen = 8192 &redef; -# Load the logging framework here because it uses fairly deep integration with +# Load the logging framework here because it uses fairly deep integration with # BiFs and script-land defined types. @load base/frameworks/logging diff --git a/src/Frag.cc b/src/Frag.cc index cbdae92883..68c5c108f1 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -32,10 +32,12 @@ FragReassembler::FragReassembler(NetSessions* arg_s, { s = arg_s; key = k; + + // [Robin] Can't we merge these two cases now? const struct ip* ip4 = ip->IP4_Hdr(); if ( ip4 ) { - proto_hdr_len = ip4->ip_hl * 4; + proto_hdr_len = ip4->ip_hl * 4; // [Robin] HdrLen? proto_hdr = new u_char[64]; // max IP header + slop // Don't do a structure copy - need to pick up options, too. memcpy((void*) proto_hdr, (const void*) ip4, proto_hdr_len); @@ -244,6 +246,12 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) reassem4->ip_len = htons(frag_size + proto_hdr_len); reassembled_pkt = new IP_Hdr(reassem4, true); } + + // [Robin] Please always check for IP version explicitly, like here + // do "if ... ip_v == 6", and then catch other values via + // weird/errors. Even of it shouldn't happen (because of earlier + // checks), it's better to be safe. I believe there are more places + // like this elsewhere, please check. else { struct ip6_hdr* reassem6 = (struct ip6_hdr*) pkt_start; diff --git a/src/IP.h b/src/IP.h index 53fe1daf84..b876a2ac3b 100644 --- a/src/IP.h +++ b/src/IP.h @@ -14,6 +14,15 @@ #include #include +// [Robin] I'm concerced about the virtual methods here. These methods will +// be called *a lot* and that may add to some significant overhead I'm afraid +// (at least eventually as IPv6 is picking up). +// +// [Robin] Similar concern for the vector and ip6_hdrs data +// members: we're creating/allocating those for every IPv6 packet, right? +// +// Any idea how to avoid these? + /** * Base class for IPv6 header/extensions. */ @@ -32,6 +41,13 @@ public: */ IPv6_Hdr(const u_char* d, uint16 nxt) : type(IPPROTO_IPV6), data(d) { + // [Robin]. This looks potentially dangerous as it's changing + // the data passed in, which the caller may not realize. From + // quick look, it's only used from Frag.cc, so that may be + // ok. But could we guard against accidental use somehome? + // Like making this protected and then declare a friend; or a + // seperate method ChangeNext(). (I saw it's used by derived + // classes so not sure wehat works best.) if ( ((ip6_hdr*)data)->ip6_nxt == IPPROTO_FRAGMENT ) ((ip6_hdr*)data)->ip6_nxt = nxt; } diff --git a/src/Sessions.cc b/src/Sessions.cc index b4115f5c16..a5b054b933 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -430,6 +430,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( discarder && discarder->NextPacket(ip_hdr, len, caplen) ) return; + // [Robin] dump_this_packet = 1 for non-ICMP/UDP/TCP removed here. Why? + FragReassembler* f = 0; if ( ip_hdr->IsFragment() ) @@ -465,6 +467,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, len -= ip_hdr_len; // remove IP header caplen -= ip_hdr_len; + // [Robin] Does ESP need to be the last header? if ( ip_hdr->LastHeader() == IPPROTO_ESP ) { if ( esp_packet ) @@ -474,7 +477,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, mgr.QueueEvent(esp_packet, vl); } Remove(f); - // Can't do more since upper-layer payloads are going to be encrypted + // Can't do more since upper-layer payloads are going to be encrypted. return; } @@ -486,6 +489,9 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } + // [Robin] The Remove(f) used to be here, while it's now before every + // return statement. I'm not seeing why? + const u_char* data = ip_hdr->Payload(); ConnID id; @@ -594,6 +600,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( ipv6_ext_headers && ip_hdr->NumHeaders() > 1 ) { pkt_hdr_val = ip_hdr->BuildPktHdrVal(); + // [Robin] This should be ipv6_ext_headers, right? conn->Event(new_packet, 0, pkt_hdr_val); } From 7af14ec1fe0ff4ed4088f127eeae2eeb79eac887 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 14 Mar 2012 10:00:48 -0500 Subject: [PATCH 134/651] Remove the default "tcp or udp or icmp" filter. In default mode, Bro would load the packet filter script framework which installs a filter that allows all packets, but in bare mode (the -b option), this old filter would not follow IPv6 protocol chains and thus filter out packets with extension headers. --- src/main.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.cc b/src/main.cc index 6313528980..f5a5b5282f 100644 --- a/src/main.cc +++ b/src/main.cc @@ -837,7 +837,7 @@ int main(int argc, char** argv) if ( dns_type != DNS_PRIME ) net_init(interfaces, read_files, netflows, flow_files, - writefile, "tcp or udp or icmp", + writefile, "", secondary_path->Filter(), do_watchdog); BroFile::SetDefaultRotation(log_rotate_interval, log_max_size); From 5312a904ab2e1a242a03486b8ed2b59e2f274514 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 14 Mar 2012 10:31:08 -0500 Subject: [PATCH 135/651] Fix ipv6_ext_headers event and add routing0_data_to_addrs BIF. Also add unit tests for ipv6_ext_headers and esp_packet events. --- src/Sessions.cc | 2 +- src/bro.bif | 32 +++++ .../bifs.routing0_data_to_addrs/output | 4 + testing/btest/Baseline/core.ipv6_esp/output | 120 ++++++++++++++++++ .../Baseline/core.ipv6_ext_headers/output | 1 + .../btest/Traces/ext_hdr_hbh_routing.trace | Bin 0 -> 153 bytes testing/btest/Traces/ip6_esp.trace | Bin 0 -> 20210 bytes .../btest/bifs/routing0_data_to_addrs.test | 9 ++ testing/btest/core/ipv6_esp.test | 10 ++ testing/btest/core/ipv6_ext_headers.test | 10 ++ 10 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/bifs.routing0_data_to_addrs/output create mode 100644 testing/btest/Baseline/core.ipv6_esp/output create mode 100644 testing/btest/Baseline/core.ipv6_ext_headers/output create mode 100644 testing/btest/Traces/ext_hdr_hbh_routing.trace create mode 100644 testing/btest/Traces/ip6_esp.trace create mode 100644 testing/btest/bifs/routing0_data_to_addrs.test create mode 100644 testing/btest/core/ipv6_esp.test create mode 100644 testing/btest/core/ipv6_ext_headers.test diff --git a/src/Sessions.cc b/src/Sessions.cc index b4115f5c16..e70540b598 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -594,7 +594,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( ipv6_ext_headers && ip_hdr->NumHeaders() > 1 ) { pkt_hdr_val = ip_hdr->BuildPktHdrVal(); - conn->Event(new_packet, 0, pkt_hdr_val); + conn->Event(ipv6_ext_headers, 0, pkt_hdr_val); } if ( new_packet ) diff --git a/src/bro.bif b/src/bro.bif index ff06288940..375a1c64c1 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -2049,6 +2049,38 @@ function is_v6_addr%(a: addr%): bool # # =========================================================================== +## Converts the *data* field of :bro:type:`ip6_routing` records that have +## *rtype* of 0 into a set of addresses. +## +## s: The *data* field of an :bro:type:`ip6_routing` record that has +## an *rtype* of 0. +## +## Returns: The set of addresses contained in the routing header data. +function routing0_data_to_addrs%(s: string%): addr_set + %{ + BroType* index_type = base_type(TYPE_ADDR); + TypeList* set_index = new TypeList(index_type); + set_index->Append(index_type); + TableVal* tv = new TableVal(new SetType(set_index, 0)); + + int len = s->Len(); + const u_char* bytes = s->Bytes(); + bytes += 4; // go past 32-bit reserved field + len -= 4; + if ( ( len % 16 ) != 0 ) + reporter->Warning("Bad ip6_routing data length: %d", s->Len()); + + while ( len > 0 ) + { + IPAddr a(IPAddr::IPv6, (const uint32*) bytes, IPAddr::Network); + tv->Assign(new AddrVal(a), 0); + bytes += 16; + len -= 16; + } + + return tv; + %} + ## Converts a :bro:type:`addr` to a :bro:type:`index_vec`. ## ## a: The address to convert into a vector of counts. diff --git a/testing/btest/Baseline/bifs.routing0_data_to_addrs/output b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output new file mode 100644 index 0000000000..7179bf8564 --- /dev/null +++ b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output @@ -0,0 +1,4 @@ +{ +2001:78:1:32::1, +2001:78:1:32::2 +} diff --git a/testing/btest/Baseline/core.ipv6_esp/output b/testing/btest/Baseline/core.ipv6_esp/output new file mode 100644 index 0000000000..645b4c8c56 --- /dev/null +++ b/testing/btest/Baseline/core.ipv6_esp/output @@ -0,0 +1,120 @@ +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output new file mode 100644 index 0000000000..4cc9c706ae --- /dev/null +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -0,0 +1 @@ +[ip=, ip6=[hdr=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b], hopopts=[[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]]], dstopts=[], routing=[[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B]], fragment=[], ah=[], esp=[], ext_order=[0, 43]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/Traces/ext_hdr_hbh_routing.trace b/testing/btest/Traces/ext_hdr_hbh_routing.trace new file mode 100644 index 0000000000000000000000000000000000000000..2a294ed58ea254aa88049e4287324bd05111eaf0 GIT binary patch literal 153 zcmca|c+)~A1{MYwaA0F#U<7jR?~eEPY-MC91hPT+KNvt%wcSksF{~LJ6c|~405!8S zJ!t&@FX;b&RQ{}NZ3afL(Sj@tOdu@^j0_b($jfgh8yN)nEXRP#nmY z40s(L;RP>gHKxO8up=oUL)MP z5A~q~2SO^f;(f8SkG&p&VpYp{gTog}f#sGc1Ii!*tNj~kqaQ_#A`j}G2W1=~t#a=z zr6|?P{~avOcJ16A3wg*t*@4gbcdLzHEFTN&9811FBTn8!BAX#=RO@0L>+|F?3+=0_ zS9Q;vI+vHU?;Xjl0CF%PoaBhNn^1GcY9iie((`Rd;f|@*&;80_odR2$1o}OgGAJ7t zp)p%t5KMV9iksz(M{bx#=b=-t&#L$i+?dF!lb1NODq|Li7OItsC;z}J-N~|MT|ynR zp09jyxxipWOK%mBXArBeWv6#tP?qFo)8)hxQN-Sa-q zZj$0R<>fjcXM&d;a=R&#ylHZmYq3%llk7zmWgZIg4KY^#54YSiwPcT4+@9Z;=V=F5 z$dyv{dPd4WwJe_CkPl&ntwbgTu;(5wIb#Nmyhw;W-!@`8ZjYd8{8jlbPA{iw_R)w_ zV?KTI1|Z+Xhtq69zTJd(xqO&Wq4g{`m8g+u?^u<5PUh>|p|UpFt^#_bU6OY=x)?lIQG ztV@;}Rr8~-EtQSa1m(5f!L~H9E0@uMFTMN4wobqljeGZ<9}AB^mJWS6a0C6or!V;>h*D@~M%chQ@oG%P=!pFfT!6 z0ZB3GP&}F*4N>6E@BJ|9BeL74FD@sU(ByxoLY}CF5A?O{^RFB2Z z4j?b7g_9ipcGLW?{gWihVQZb`FQ3yV@gM0o>NTENnnw@lY12JG{H}@nJTsU(rqb~b zAjfS+17!e9AoTd>hV-vB)apO3p-8|e4B$X`tdU6(-#rj!-c%*h==uJ|Um9D|Q|8&z zF4RfYkCUXfyfff2ZUKAU;z5P-ur1rM9`sU;b)4KE&XubY?<<;{M=mkCtYjay;Yc#k z85|j|bV!@DkG7iF;~r&6oB~pm2{=g+VGS~nviYu*D9^KnX`zlHHOh`sjEVLOoPeN& zP3=7^1l3E7Ey`m-+PWAn4E%DwLiS-(rF}`2`Tdt(hX-Dz794NZGhay^8d!R*WaW`n zGQUt_Az?iOq|)PXk|MzxKG#ak_rI5vj9NO5-49sBeay}D-HCrNerGcR&rNJfXsuB? zXvr?YY4pl0V_N}1m$gsOm2z~oWZ)*!zv=u9O1N(z!;w>rdy~<#`zB*3BggbD_2+VbCU zA>azz#YxTIT7&d7T5(KAUWO=6js5ggMnC(^>n;H)C%mM{u!efa(IByee#$9<(WQ;a zN|*PC9HnH_CiDW%V4^Jo7W^N$A0{87Qc3twImU3*kwv8KK33taB$9olB5gCi&*B_g zD>_V>8#n5*2?%VAU*#Mr*nz|YuKTGS+Tbs9sDT3J+etdMSl#n!uOs3 zw;WJU;!tt}F7J2p8eAi>Bgk1#T;8yNbG+HoP{{HX_v_+phw0Akq-09pMf@8xy|fo! zgY2v`(KVaQ-)39^(gJu%QDF@^%#T~o=ye{M^`#fRlSFr0Y_zu~nRPGdKV!kbC1OU+ zz!S0l?Ia*~z5M2MVHE6}wA$tx*ZH7%XXq6Zi=O{vxtPnw>&XqizF)-z>2=TTUV0LU zUjx!XcuCP<4bRnMQm_tZdg(vA;T&JQ9H@7jNcV{KL|f3^sT6QY>o^$HRpT9eZpc=Q}?- ztU=GqH@D8gpv>@_legA#xT@5*w|0)7zwODjhGU&1SD5znHeX+glW=H3^ppQjjwLe0lO|Pc|h1?lL*VQ5rk9G4dQQT?EEyiWoB$$gU zAMO0vuSl{1iy!Z~L`6I54xxMDw7Q=AJQM%3;EfQytcRh&e%l)o!<-;l&-WVeH6&;4X(8g+y%AfKD~_r)x5G z9MUCx?Z8@i<-a&!i3Lcd;3b8?8btiqR34qYNeD!)=b2jR!DuAa8ljAs`mz1D6gxGo zsrHjsV^08Tk}a9*8&c<7RFOiH8iJ_OcgD2!=}L(-wqHhPXS{t(gs~i`a3CBpfzJ|LZxnijTwp%y zl6RxF@*Z6?yWNLzOKR$+Z9rs*TCxZ==Qm2rV&|Wk(q)vp$48AOO$Ar|&)9JQ=@7i6 zD6oc~^Qnjrb($WL(aBK<6;q81I)9QnK8*AUt!oeYW+u$8#08{r@RFj!8gimBORb%Y zXe4$_+fX?6?NwD`0Bss=~#Ff5afagCx)z47GA3dno(qFMnC)B7I^Ac^&ZpX(YU)Xs`yXSiiz^V#cnQX4Z+;-QpGx zgFLVT9@E|zw2oU0zThsf=jlC6Cn~7kF;-nN9mOKOJc#%K8nzr}B*seEYTnu5x%ryL zJsK_UFV>TLNDq1Du{^7f4@j}#B}Iod^omxb=>~zfNm|7;2HZ6xoj-Y0&JpX;q0U#v zdkX6+)K?u_34r3{a>D}r7Z$2z?5c@f@rXJ}eA(jTJf~g_y(*}GYAT(GIT-ZtSUUW} zaq^m<7X=`7Uq%PT0!s|^_~!=s*BWK;AAgU60Hd&g10fLfMej@(gaAzy22xSc&@nKv zuyJtl@CgWsh)GDv$nQ~5Qc=^;($O<8GBLBTvaxe;a&hzU^6?7@-WNhZL=@E&ywYBs zYk9e^%H2Q-*spfaofkPb@a??8d<`Rrhsev39c4h&7Dy`%1n`UFa{1%;VZhWQ>1 zki!5k9V8G$0o6gEsWS_AyRy4nhOGO)^|K2YZe(eJoD(1iE>{*g5TvUE=#ZhQd3V&2 zIesCQ_@F-gA(S_p4#@ckFC9uCC`B94p+Hkj@2G3*G&5aHV;V<#A)r7HoTypB$KJltDR`kk4wpCPf46Ui53Cda>|1$Q zDs#!$)2eZd6M}rhtb?ux9MtIquP@nNp;^_Sh+guM8K`b@)!)w7^VGih@p4c(%c!lI9UyB%u?pK8sL*pAbtu_N1%$MYv>|eh#%x zV=QtQARqLAlN|AOQ$t4hb?|u4yaAUg`NTukzxwRDJHkK7MDuID>tjXfzo--r@x1U` zzP!;pIbatr2QNS^!j@wF1gkMxwrqR!I9+QGm7pdJL1-^)pOU8*7EN^>%rcwnoz^mk}{11 zhO}I z{(qV*S6#P@(^|e^+1Xw$YEfX+tlC^=zBk|I5_WJ~wmXY1da;DELV(7>VW$1suHJf_ znGZ_c`}K_v-PKvBEl~5tNmTC0r`y^oMsE! z?Iy0?-ScRD?B6L(uDbOGLDJ`<$|173&kB^V2@@3dW{Kx6!oS&(G8NpLSoQ@8yP_{7 zS;utCb7T_+MG^Nl?h$OLGOu5v7p~mHOjG~psR+rqfMy`yUoH=3vc_+N&=(n4a zZsMN=kLc^}dyrzC&{a5e>I(>aJUT^<4MF(2^4yViC%*<%!0v58$BmTgcS9P9t|?6p zQvNpk5OXE)$*&D1yr6xFHx{Jfr@}41m70|E1Tlk$_Y$?*M{?|y(g1nBALf7e=p_NL zhW@i}b&^YVdem08@^arLVoaUFK|h^)6;BrjgIGi?rWm*iq|EFD+&=R}pRH=iE1^rQpQxL!C( z5n&BnZeSCe6UNJPUfh6o+s*zqK7rHS=LBD@9B4gvXom;{NpzO7QY;v$6p{%v$Asq; z&&~xJMeZ}6_PA**dHMSeii*)Xho0}a?#wI|rs4fWV8b5H0HihDaFQay8a5~rsfk(R zk%}K4UhmQQ7Ru_5et+U$GdrVt5Z|eI=wlcE(N{M8&k?fk&<~S3&lBz&le!E>qkyUb zny~!f$Ro5bpb4uwo}J6Cd+blG$bFA^ii|S>sXn}*SoXkL4%xM#%^h@=6`nu zV&_;ZB!hiFtms((ofXb3N1NKdDKcVE6mH{x+RZ>vD_{LAk9AXd>HP4CA)&^)caKP) zj~;W2;+_~zWWlBe>ZYh*79hogmlOhP=zBs@t3o4=D28Kb&=T)KP#6?4o9>x2Us{`? z5FY%cSJKhVa*<{FQ_HTw>N^J3e)VMjuo~(Lze|#D3WGFQjrvOTV#$>pt4p$z5vbe} z?|4`PpS}g8OI>if`pB?`=`JitXC97j_rN{OVewW5_syyz%-sjtp}!cNeiSF1`);{# zG&IuxPVS`<@QWvZM^_ey$5m~Kr6*QYd`}vKfJE*&vz1F>-|=eTHmlEuNc$KwLpC6d zgqIWr)?mqRn};#>ArgbQE!b85Dkl@IT-=`!gV}x0^yN5DHuZ#Wkht7xhzbX;y`8~S zuu@#>hfj$Tu^=IXxyemmj{xO&3iOWCugk;DmhEK}Um4F3qmASMQWJPdQDF@)rrvMH zCmpjj=^H=9Y`<}6)=kLWHI(|0c=39dq}oFEM9Cqtr?x$uD64X{iQ6;HwbrQhg8JHM1@aa`m zNPKCw#BZnC&r(m$Rm&7H@-PpO9(;k*JVl2!EIo_FeNd7-S6N>kF-%01RL+4NWX__QS3oVat7RhfAPjZyEsEqcXXPAAu+_@x) zQeQY}9qRW%%qat;{X@5;|F`>Aguoj0&%TvB^5OwoK^OLyvDMhTeO>=TfiBQ6La|7E zP;z;L<5iJ@NfY&jwN*LCAAi+o!L(0aI*wrh5B4qUtmedr!PebJ0(I&pU;-KPmO($h z@D44~hcC5nQho`_Eb=`l2c#E+aFQaz8glrj`kTYYvNlOtqlH9+Ia?w5x)O7d*u@pI zmcP5y_y^CCj&>JG0^KEFmBe5KA7ZRxKS=aU@|_63N7Kg;bzZgt=0Id+qOZhBWqp7C z>e&JBdz1=5Y7Q?c60AYTZj+GN4$<1@Kn17JM3z1AAi~8n0QuK?;ESvC|2)x&kvjvGddLI}sv?`VS0gzh2 zOA3ZHNdJ29`4{KTxT<)|{7K_B$6BLcLzfh>Y6yz$GJa6(pj7BPMQc4DP(1Sc#G#Zh zzHeEAm7Tu42^BIUAo&Ng$YuP-1F=s$FiQdrNH?%$8mgm8)GGn$=2tjfeF&_9`%yS9 z)o*(e7R1NsUR9d|AqG8c-_2PU)-~M}hfB7Tby*fAg|cxSz*h>U>G7de#mFx=8{5gP zmo@Sly^Q0{R^}xOvdqs{m;MyrjsmhS*&4bs29Nt($Qi{g;7= z%&N#&6&d7+MYZ%#I8^0L#Iy4%GIy5%{@7w9$MPP?;T5!wRAtVcQ_`J z_V`lr_`Y_ufbDmNOxxz^My6^&>IN?<3akO+z1=V$1+Ii$?#FD|VujuNC_`o6O$j|q zgc+xPXN4nY6j*xvj2?0cD3~!wFngvqswCERN{wYGfy4ht!urKb)&OF6k8-6lo`>pF z`=R-*@+KLZ8bHbiFDWXl0hdBT%V!}=)37;eqoYli@>~4WXEDEF2qDhsWeXNsB_w=-9t}l)84pZJZgG;rO=DGxCPk^8sFLnwgg@hoh*trTovH?5x2p-`%)* zJQv=MBVn)fVRAA{nn4XpzqJyC+QZb*{N)@4(T_nl!Uh7Eld*v8Pbx}Tp3~>e5GfB~hVGt)OFs*g`$L>uPu4=`Ks!Mj`Hr`G2i&>5%9Eq!m zdbw(}|K7J^SO04S zLV%{S0I7G`x5`F@SdUZ1Jy*NLP-q8o8dS01qXYJ><|Y9hA~e+lNWF^=-Dp_$E51%y zxE7DpXCTKOUOHgkiW90sf~L~nQI~&z&dy7aXG?=mUbX|sVTG3t90+or0CZqz>VnGM z&K>R_x&k_afmEg4=hvM;4(B5{U0Gn?>gfoegFsW4)bFOlLEb#!2_aL&FuYd!0^~#j zIseuE0qm;+*tf!f>X4zSvUk*J@u;eO%}pC4XP@uv0&>vcr33b@!iNDJ3N*D_?QZ8T zar0NaQmXB2eHQ2aZXm}<6Hez2*tg;z0CcF(R8k=IE?=|xT>A)x!{7BnWn74QfE*Sr zIO%|WEBih`hXzeuyrYg-;{1CGkDugZ5UEKokh5U{CmpbFl~WJs(4ncNKF|plsmVJ@okJ_V4Fg+5N}Aai9Z6L4T(~7^K!)59qeN5q`l$ z3c)mZ&&-C_F_|T)l#=zaUT&I<{L;xb2<7Be)TZgk0KTlJdC0xUIqN=j&&Q@o9@IsD z=SQ7+jY5)&RoZ9_bgEXg{eGw_9?@ue(o1r-8{WUtsbc}O25;b`MZBG^!`$O08FF|i zimKo5B;!CvW%~&{oFD%ORkwx;@g%Xv#BcUXqujaHWiMlZWgv;`kz~%$-dA>Y7RDS@ zu!cHFAQoLgsP|b5=lGf8hn7)nRzOPwFD=sT^ss@ewHzfwdBSg-5C(6_y$(x>9H)=z zq%~}U9i2j2)Ej(g*aRsNyzPdV_vlO>jq8{78YYdtHk_(G=H`hwO=fhbM6!`;YHu7Q zs<6ijU}Xcef$-9TZ>LYpNKhsU7&1q^s?vPP(Pv(}`(kNACc=d#H@DmY1wE@7x#9=ZN!${3cw zZ*@k$rp=q41b4{tc>1v*k-sh^j@?x6FV%d!O9TgW-oi_Zd^_EGnIBJmQ3Q!urV^5q zveG@HXm5@eRxIJVCWeZtLkMrv#M7vdyp~Yd+qBGQov5A1hm-37Z`2RrC}&)3o1(t1ooc zueboMOd6bK0_yE_!+rs#R(iGTt{%~(r`2d1Z@bTwMLDSOy%qbFCwLg8h8xg2rNT*zc01i^ zc6+)S`BRI}m9@u^bndrzYa)mUQz~EE+5MulEK!OX`*Mid8-s_UviHTJ8pZk8@VGWH zrIfxYEYs2QEm0~|0* zN+0(uyceb_+bZy$yT^#fCe~k?%TMvov5CB0H);CQMu=#*IK?#ROqc3K;g{$sDxRcQ zf$#OCbHnKx1~7H@EcN-s0A0`woOFn=)QgI71*1SbX}U9)NJ`acwsw}7VKvNG(q7Mg zS0J~h|AZVIJO0%B<3G2Y!6aZhptacyvOvL)?_p6Rpuhf$WfPMZ#OmLN94!v$9Dc${ zhXhOI{H}ejj*qv0*sy(>%*HA%^2seW@%nl`(*8%P;9u2>nLNWPRm*X@k~0QV2Ujm{ zlOd8(759h`84SV(DywuWPtx3X5nYK~5`c~gUOF%=_1hOmud%kLtx+n|Ec>}Gtv3_v z*&{jrE|1rxPjO?Sv7eK>R|_rFZHY+ocz@ZJ{d-WJA)x%8z?#~|0q5fLssyT<_xqs+ zMqmf|T<+=zoX#BtmP*I-c;S15r)e!un41mE38hx{}#KSKiyhfRiM_$~Ro~8eBPa(Hf0xxDk+Pwwg)9a#^ow2=_S;R7jzGEl* zzn5s=Tqs3l7%lh_N&`A)ceIP=qPGGUax> zXt^V#ODcN`ZS)>Rvdb469Vm~HNQ7>eZ7uZTRdeI;RLKB3MtJE^VX2cB zh)SUFX|6yV<5Qd7`7rwpA6IquZv&czgb~-Q29-7bCSlGXO2#>~cklb2NG#;xEe zQ0&LNu1dwbAZTS^;<3+sq=SEwP#r=jlw&3RRV#@~x8MHsa`JgnnC^1q;A8pmqI5Yx zR|zj2IxH1$+P$#hCSPyo;O}e17&8MJ%k5to*7^RXXAD}}Z!GlOVm(74h9HF92V+R4 zr^W^?R_6@Pg!uCL`wX=pIYut32ZzCOyykdnfR1wpQKkrDYnv4Vm(QR(@oUp%*?&^Rpa1l;AL|CfgYK7F!$WRy!{d$p%*)^t- zM(bAu0Zbh`h{wbOthY~I+3GGS^Jq$=T5;gMdl~Fu+EF_%CVL&f8X~&LIod~L^F1#~ zsXI*L5uj6pmktS*T0m#PtDN@6uEH!6(*9 zm9%gQ;ePU@1&_}-ou%usmp*MDnfQ^ADDgY)?R{A|qpr%oIDL(?A8f1R%LLvpM5WW}x#9jxTiCsM82fKCNoIuuyyD(&a+-0QcJ;@-^S<@uzCa)NK)di(2^ zPorwn^ca~IA8(GEOeUd*H&C{}vOU~6;Q6p-nKidt+|;Exl8)N$Lt6R7)1+iEK?l&` zz)Ob;OReE>V#_wnX~dmMteCE2~7pH*T6eAnLefT8iQs{VMfnb%<88- zi)&w&<|TwO&0-2yn95sNS^3D|S+FPZ$68%L*Zd1k^9~J`y1che8GeZOh0s*t`~cs6 z_CA$dAMs9uL02i(k#~M?nKU>@3$-#&d{G?pE4XOVP*;YmI?Peq?akPm&kWTZGx)!% z&~e_8XzBsF=kU^@!%`u#2BMK$*6PlI9W*R4=aRjf9{VoQTPH8b+n3a9nU z33!QseG>+#2nm{cwthE}B`|ge82bu1>Coe!+g-03I1uDS2Z+GX)QUTbtbnoKxp3ek z0`^DzegYy0H1#czdY7wa1B|61{h#ymU!No7K#=@CAVP+wE&_A?E+SiCEMgp-M8H0X z&2vD60!?iPHh%6RvIEB2Xa7I{BkjMMN5J04p)DXng{JZXsdo|C17jIW;3NX}JEV*O j5gIggx%h4(2Vm?DFcvOfYrr0dmk}UBho+X@QS`q61hT$Z literal 0 HcmV?d00001 diff --git a/testing/btest/bifs/routing0_data_to_addrs.test b/testing/btest/bifs/routing0_data_to_addrs.test new file mode 100644 index 0000000000..f150ec2a35 --- /dev/null +++ b/testing/btest/bifs/routing0_data_to_addrs.test @@ -0,0 +1,9 @@ +# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +event ipv6_ext_headers(c: connection, p: pkt_hdr) + { + for ( h in p$ip6$routing ) + if ( p$ip6$routing[h]$rtype == 0 ) + print routing0_data_to_addrs(p$ip6$routing[h]$data); + } diff --git a/testing/btest/core/ipv6_esp.test b/testing/btest/core/ipv6_esp.test new file mode 100644 index 0000000000..b606c23400 --- /dev/null +++ b/testing/btest/core/ipv6_esp.test @@ -0,0 +1,10 @@ +# @TEST-EXEC: bro -r $TRACES/ip6_esp.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +# Just check that the event is raised correctly for a packet containing +# ESP extension headers. + +event esp_packet(p: pkt_hdr) + { + print p; + } diff --git a/testing/btest/core/ipv6_ext_headers.test b/testing/btest/core/ipv6_ext_headers.test new file mode 100644 index 0000000000..170a67bc72 --- /dev/null +++ b/testing/btest/core/ipv6_ext_headers.test @@ -0,0 +1,10 @@ +# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +# Just check that the event is raised correctly for a packet containing +# extension headers. + +event ipv6_ext_headers(c: connection, p: pkt_hdr) + { + print p; + } From 94864da465a134bea251461adf588442e2d6d2bd Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 14 Mar 2012 15:25:08 -0500 Subject: [PATCH 136/651] Update documentation for new syntax of IPv6 literals. --- doc/scripts/builtins.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/scripts/builtins.rst b/doc/scripts/builtins.rst index 5da551ed1f..30b344ca6b 100644 --- a/doc/scripts/builtins.rst +++ b/doc/scripts/builtins.rst @@ -162,7 +162,11 @@ The Bro scripting language supports the following built-in types. ``A1.A2.A3.A4``, where Ai all lie between 0 and 255. IPv6 address constants are written as colon-separated hexadecimal form - as described by :rfc:`2373`. + as described by :rfc:`2373`, but additionally encased in square brackets. + The mixed notation with embedded IPv4 addresses as dotted-quads in the + lower 32 bits is also allowed. + Some examples: ``[2001:db8::1]``, ``[::ffff:192.168.1.100]``, or + ``[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]``. Hostname constants can also be used, but since a hostname can correspond to multiple IP addresses, the type of such variable is a @@ -196,7 +200,7 @@ The Bro scripting language supports the following built-in types. A type representing a block of IP addresses in CIDR notation. A ``subnet`` constant is written as an :bro:type:`addr` followed by a slash (/) and then the network prefix size specified as a decimal - number. For example, ``192.168.0.0/16``. + number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``. .. bro:type:: any From b4e6971aab46054e68887836e1a1be40cfd4b9c5 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 14 Mar 2012 14:45:53 -0700 Subject: [PATCH 137/651] Add regular debugging output for interesting operations (stream/filter operations) to input framework (this was way overdue) --- scripts/base/frameworks/input/main.bro | 25 +++++++- src/DebugLogger.cc | 3 +- src/DebugLogger.h | 1 + src/input/Manager.cc | 88 +++++++++++++++++++++++++- 4 files changed, 114 insertions(+), 3 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index c6995121bd..1df8563d94 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -5,7 +5,7 @@ module Input; export { - redef enum Input::ID += { TABLE_READ }; + redef enum Input::ID += { TABLE_READ, EVENT_READ }; ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; @@ -123,6 +123,8 @@ export { ## filter: the `TableFilter` record describing the filter. global read_table: function(description: Input::StreamDescription, filter: Input::TableFilter) : bool; + global read_event: function(description: Input::StreamDescription, filter: Input::EventFilter) : bool; + global update_finished: event(id: Input::ID); } @@ -182,6 +184,27 @@ function read_table(description: Input::StreamDescription, filter: Input::TableF if ( ok ) { ok = add_tablefilter(id, filter); } + if ( ok ) { + ok = remove_tablefilter(id, filter$name); + } + if ( ok ) { + ok = remove_stream(id); + } else { + remove_stream(id); + } + + return ok; +} + +function read_event(description: Input::StreamDescription, filter: Input::EventFilter) : bool { + local ok: bool = T; + # since we create and delete it ourselves this should be ok... at least for singlethreaded operation + local id: Input::ID = Input::EVENT_READ; + + ok = create_stream(id, description); + if ( ok ) { + ok = add_eventfilter(id, filter); + } if ( ok ) { ok = remove_stream(id); } else { diff --git a/src/DebugLogger.cc b/src/DebugLogger.cc index c41a0552c6..3394486ff2 100644 --- a/src/DebugLogger.cc +++ b/src/DebugLogger.cc @@ -15,7 +15,8 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = { { "compressor", 0, false }, {"string", 0, false }, { "notifiers", 0, false }, { "main-loop", 0, false }, { "dpd", 0, false }, { "tm", 0, false }, - { "logging", 0, false }, { "threading", 0, false } + { "logging", 0, false }, {"input", 0, false }, + { "threading", 0, false } }; DebugLogger::DebugLogger(const char* filename) diff --git a/src/DebugLogger.h b/src/DebugLogger.h index 71e21bfa26..ca422072c5 100644 --- a/src/DebugLogger.h +++ b/src/DebugLogger.h @@ -24,6 +24,7 @@ enum DebugStream { DBG_DPD, // Dynamic application detection framework DBG_TM, // Time-machine packet input via Brocolli DBG_LOGGING, // Logging streams + DBG_INPUT, // Input streams DBG_THREADING, // Threading system NUM_DBGS // Has to be last diff --git a/src/input/Manager.cc b/src/input/Manager.cc index db98cb7a33..aa50453bdf 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -205,12 +205,24 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) // create a new input reader object to be used at whomevers leisure lateron. ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) { + { + ReaderInfo *i = FindReader(id); + if ( i != 0 ) { + ODesc desc; + id->Describe(&desc); + reporter->Error("Trying create already existing input stream %s", desc.Description()); + return 0; + } + } + ReaderDefinition* ir = input_readers; RecordType* rtype = description->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::StreamDescription, 0) ) { - reporter->Error("Streamdescription argument not of right type"); + ODesc desc; + id->Describe(&desc); + reporter->Error("Streamdescription argument not of right type for new input stream %s", desc.Description()); return 0; } @@ -239,6 +251,13 @@ ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) reader_obj->Init(source, mode->InternalInt(), do_autostart); +#ifdef DEBUG + ODesc desc; + id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", + desc.Description()); +#endif + return reader_obj; } @@ -503,6 +522,13 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { i->filters[filterid] = filter; i->reader->AddFilter( filterid, fieldsV.size(), fields ); +#ifdef DEBUG + ODesc desc; + id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Successfully created new table filter %s for stream", + filter->name.c_str(), desc.Description()); +#endif + return true; } @@ -574,6 +600,13 @@ bool Manager::RemoveStream(const EnumVal* id) { i->reader->Finish(); +#ifdef DEBUG + ODesc desc; + id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Successfully queued removal of stream %s", + desc.Description()); +#endif + return true; } @@ -586,6 +619,12 @@ bool Manager::RemoveStreamContinuation(const ReaderFrontend* reader) { if ( (*s)->reader && (*s)->reader == reader ) { i = *s; +#ifdef DEBUG + ODesc desc; + i->id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Successfully executed removal of stream %s", + desc.Description()); +#endif delete(i); readers.erase(s); return true; @@ -651,6 +690,13 @@ bool Manager::ForceUpdate(const EnumVal* id) i->reader->Update(); +#ifdef DEBUG + ODesc desc; + id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Forcing update of stream %s", + desc.Description()); +#endif + return true; // update is async :( } @@ -685,6 +731,13 @@ bool Manager::RemoveTableFilter(EnumVal* id, const string &name) { i->reader->RemoveFilter(filterId); +#ifdef DEBUG + ODesc desc; + id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Queued removal of tablefilter %s for stream %s", + name.c_str(), desc.Description()); +#endif + return true; } @@ -701,6 +754,13 @@ bool Manager::RemoveFilterContinuation(const ReaderFrontend* reader, const int f return false; } +#ifdef DEBUG + ODesc desc; + i->id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Executed removal of (table|event)-filter %s for stream %s", + (*it).second->name.c_str(), desc.Description()); +#endif + delete (*it).second; i->filters.erase(it); @@ -736,6 +796,13 @@ bool Manager::RemoveEventFilter(EnumVal* id, const string &name) { } i->reader->RemoveFilter(filterId); + +#ifdef DEBUG + ODesc desc; + id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Queued removal of eventfilter %s for stream %s", + name.c_str(), desc.Description()); +#endif return true; } @@ -948,6 +1015,13 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { assert(i->HasFilter(id)); +#ifdef DEBUG + ODesc desc; + i->id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Got EndCurrentSend for filter %d and stream %s", + id, desc.Description()); +#endif + if ( i->filters[id]->filter_type == EVENT_FILTER ) { // nothing to do.. return; @@ -1018,6 +1092,11 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { filter->lastDict = filter->currDict; filter->currDict = new PDict(InputHash); +#ifdef DEBUG + DBG_LOG(DBG_INPUT, "EndCurrentSend complete for filter %d and stream %s, queueing update_finished event", + id, desc.Description()); +#endif + // Send event that the current update is indeed finished. EventHandler* handler = event_registry->Lookup("Input::update_finished"); if ( handler == 0 ) { @@ -1202,6 +1281,13 @@ void Manager::Clear(const ReaderFrontend* reader, int id) { return; } +#ifdef DEBUG + ODesc desc; + i->id->Describe(&desc); + DBG_LOG(DBG_INPUT, "Got Clear for filter %d and stream %s", + id, desc.Description()); +#endif + assert(i->HasFilter(id)); assert(i->filters[id]->filter_type == TABLE_FILTER); From 57ffe1be777f395de2cb94c95c02b9f234109744 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 15 Mar 2012 18:41:51 -0700 Subject: [PATCH 138/651] completely change interface again. compiles, not really tested. basic test works 70% of the time, coredumps in the other 30 - but was not easy to debug on a first glance (most interestingly the crash happens in the logging framework - I wonder how that works). Other tests are not adjusted to the new interface yet. --- scripts/base/frameworks/input/main.bro | 190 ++----- src/input.bif | 39 +- src/input/Manager.cc | 511 ++++++------------ src/input/Manager.h | 96 +--- src/input/ReaderBackend.cc | 115 +--- src/input/ReaderBackend.h | 80 +-- src/input/ReaderFrontend.cc | 83 +-- src/input/ReaderFrontend.h | 27 +- src/input/readers/Ascii.cc | 182 +++---- src/input/readers/Ascii.h | 24 +- src/input/readers/Raw.cc | 89 +-- src/input/readers/Raw.h | 23 +- src/types.bif | 4 - .../scripts/base/frameworks/input/basic.bro | 12 +- 14 files changed, 403 insertions(+), 1072 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 1df8563d94..4f7f9983d1 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -5,15 +5,15 @@ module Input; export { - redef enum Input::ID += { TABLE_READ, EVENT_READ }; - ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; const default_mode = MANUAL &redef; - ## Stream decription type used for the `create_stream` method - type StreamDescription: record { + ## TableFilter description type used for the `table` method. + type TableDescription: record { + ## Common definitions for tables and events + ## String that allows the reader to find the source. ## For `READER_ASCII`, this is the filename. source: string; @@ -26,13 +26,12 @@ export { ## Automatically start the input stream after the first filter has been added autostart: bool &default=T; - }; - ## TableFilter description type used for the `add_tablefilter` method. - type TableFilter: record { ## Descriptive name. Used to remove a filter at a later time name: string; + ## Special definitions for tables + ## Table which will contain the data read by the input framework destination: any; ## Record that defines the values used as the index of the table @@ -55,11 +54,28 @@ export { pred: function(typ: Input::Event, left: any, right: any): bool &optional; }; - ## EventFilter description type used for the `add_eventfilter` method. - type EventFilter: record { - ## Descriptive name. Used to remove a filter at a later time - name: string; + ## EventFilter description type used for the `event` method. + type EventDescription: record { + ## Common definitions for tables and events + + ## String that allows the reader to find the source. + ## For `READER_ASCII`, this is the filename. + source: string; + + ## Reader to use for this steam + reader: Reader &default=default_reader; + ## Read mode to use for this stream + mode: Mode &default=default_mode; + + ## Automatically start the input stream after the first filter has been added + autostart: bool &default=T; + + ## Descriptive name. Used to remove a filter at a later time + name: string; + + ## Special definitions for events + ## Record describing the fields to be retrieved from the source input. fields: any; ## If want_record if false (default), the event receives each value in fields as a seperate argument. @@ -72,61 +88,29 @@ export { }; - #const no_filter: Filter = [$name="", $idx="", $val="", $destination=""]; # Sentinel. - - ## Create a new input stream from a given source. Returns true on success. + ## Create a new table input from a given source. Returns true on success. ## - ## id: `Input::ID` enum value identifying this stream - ## description: `StreamDescription` record describing the source. - global create_stream: function(id: Input::ID, description: Input::StreamDescription) : bool; - - ## Remove a current input stream. Returns true on success. + ## description: `TableDescription` record describing the source. + global add_table: function(description: Input::TableDescription) : bool; + + ## Create a new event input from a given source. Returns true on success. ## - ## id: `Input::ID` enum value identifying the stream to be removed - global remove_stream: function(id: Input::ID) : bool; + ## description: `TableDescription` record describing the source. + global add_event: function(description: Input::EventDescription) : bool; + + ## Remove a input stream. Returns true on success and false if the named stream was not found. + ## + ## id: string value identifying the stream to be removed + global remove: function(id: string) : bool; ## Forces the current input to be checked for changes. + ## Returns true on success and false if the named stream was not found ## - ## id: `Input::ID` enum value identifying the stream - global force_update: function(id: Input::ID) : bool; - - ## Adds a table filter to a specific input stream. Returns true on success. - ## - ## id: `Input::ID` enum value identifying the stream - ## filter: the `TableFilter` record describing the filter. - global add_tablefilter: function(id: Input::ID, filter: Input::TableFilter) : bool; - - ## Removes a named table filter to a specific input stream. Returns true on success. - ## - ## id: `Input::ID` enum value identifying the stream - ## name: the name of the filter to be removed. - global remove_tablefilter: function(id: Input::ID, name: string) : bool; - - ## Adds an event filter to a specific input stream. Returns true on success. - ## - ## id: `Input::ID` enum value identifying the stream - ## filter: the `EventFilter` record describing the filter. - global add_eventfilter: function(id: Input::ID, filter: Input::EventFilter) : bool; - - ## Removes a named event filter to a specific input stream. Returns true on success. - ## - ## id: `Input::ID` enum value identifying the stream - ## name: the name of the filter to be removed. - global remove_eventfilter: function(id: Input::ID, name: string) : bool; - #global get_filter: function(id: ID, name: string) : Filter; - - ## Convenience function for reading a specific input source exactly once using - ## exactly one tablefilter - ## - ## id: `Input::ID` enum value identifying the stream - ## description: `StreamDescription` record describing the source. - ## filter: the `TableFilter` record describing the filter. - global read_table: function(description: Input::StreamDescription, filter: Input::TableFilter) : bool; - - global read_event: function(description: Input::StreamDescription, filter: Input::EventFilter) : bool; - - global update_finished: event(id: Input::ID); + ## id: string value identifying the stream + global force_update: function(id: string) : bool; + ## Event that is called, when the update of a specific source is finished + global update_finished: event(id: string); } @load base/input.bif @@ -134,90 +118,26 @@ export { module Input; -#global filters: table[ID, string] of Filter; +#global streams: table[string] of Filter; +# ^ change to set containing the names -function create_stream(id: Input::ID, description: Input::StreamDescription) : bool +function add_table(description: Input::TableDescription) : bool { - return __create_stream(id, description); + return __create_table_stream(description); } -function remove_stream(id: Input::ID) : bool +function add_event(description: Input::EventDescription) : bool + { + return __create_event_stream(description); + } + +function remove(id: string) : bool { return __remove_stream(id); } -function force_update(id: Input::ID) : bool +function force_update(id: string) : bool { return __force_update(id); } -function add_tablefilter(id: Input::ID, filter: Input::TableFilter) : bool - { -# filters[id, filter$name] = filter; - return __add_tablefilter(id, filter); - } - -function remove_tablefilter(id: Input::ID, name: string) : bool - { -# delete filters[id, name]; - return __remove_tablefilter(id, name); - } - -function add_eventfilter(id: Input::ID, filter: Input::EventFilter) : bool - { -# filters[id, filter$name] = filter; - return __add_eventfilter(id, filter); - } - -function remove_eventfilter(id: Input::ID, name: string) : bool - { -# delete filters[id, name]; - return __remove_eventfilter(id, name); - } - -function read_table(description: Input::StreamDescription, filter: Input::TableFilter) : bool { - local ok: bool = T; - # since we create and delete it ourselves this should be ok... at least for singlethreaded operation - local id: Input::ID = Input::TABLE_READ; - - ok = create_stream(id, description); - if ( ok ) { - ok = add_tablefilter(id, filter); - } - if ( ok ) { - ok = remove_tablefilter(id, filter$name); - } - if ( ok ) { - ok = remove_stream(id); - } else { - remove_stream(id); - } - - return ok; -} - -function read_event(description: Input::StreamDescription, filter: Input::EventFilter) : bool { - local ok: bool = T; - # since we create and delete it ourselves this should be ok... at least for singlethreaded operation - local id: Input::ID = Input::EVENT_READ; - - ok = create_stream(id, description); - if ( ok ) { - ok = add_eventfilter(id, filter); - } - if ( ok ) { - ok = remove_stream(id); - } else { - remove_stream(id); - } - - return ok; -} - -#function get_filter(id: ID, name: string) : Filter -# { -# if ( [id, name] in filters ) -# return filters[id, name]; -# -# return no_filter; -# } diff --git a/src/input.bif b/src/input.bif index 5418b7bbd4..1157b7b62b 100644 --- a/src/input.bif +++ b/src/input.bif @@ -7,52 +7,33 @@ module Input; #include "NetVar.h" %%} -type StreamDescription: record; -type TableFilter: record; -type EventFilter: record; +type TableDescription: record; +type EventDescription: record; -function Input::__create_stream%(id: Input::ID, description: Input::StreamDescription%) : bool +function Input::__create_table_stream%(description: Input::TableDescription%) : bool %{ - input::ReaderFrontend *the_reader = input_mgr->CreateStream(id->AsEnumVal(), description->AsRecordVal()); - return new Val( the_reader != 0, TYPE_BOOL ); - %} - -function Input::__remove_stream%(id: Input::ID%) : bool - %{ - bool res = input_mgr->RemoveStream(id->AsEnumVal()); + bool res = input_mgr->CreateTableStream(description->AsRecordVal()); return new Val( res, TYPE_BOOL ); %} -function Input::__force_update%(id: Input::ID%) : bool +function Input::__create_event_stream%(description: Input::EventDescription%) : bool %{ - bool res = input_mgr->ForceUpdate(id->AsEnumVal()); + bool res = input_mgr->CreateEventStream(description->AsRecordVal()); return new Val( res, TYPE_BOOL ); %} -function Input::__add_tablefilter%(id: Input::ID, filter: Input::TableFilter%) : bool +function Input::__remove_stream%(id: string%) : bool %{ - bool res = input_mgr->AddTableFilter(id->AsEnumVal(), filter->AsRecordVal()); + bool res = input_mgr->RemoveStream(id->AsString()->CheckString()); return new Val( res, TYPE_BOOL ); %} -function Input::__remove_tablefilter%(id: Input::ID, name: string%) : bool +function Input::__force_update%(id: string%) : bool %{ - bool res = input_mgr->RemoveTableFilter(id->AsEnumVal(), name->AsString()->CheckString()); - return new Val( res, TYPE_BOOL); - %} - -function Input::__add_eventfilter%(id: Log::ID, filter: Input::EventFilter%) : bool - %{ - bool res = input_mgr->AddEventFilter(id->AsEnumVal(), filter->AsRecordVal()); + bool res = input_mgr->ForceUpdate(id->AsString()->CheckString()); return new Val( res, TYPE_BOOL ); %} -function Input::__remove_eventfilter%(id: Log::ID, name: string%) : bool - %{ - bool res = input_mgr->RemoveEventFilter(id->AsEnumVal(), name->AsString()->CheckString()); - return new Val( res, TYPE_BOOL); - %} - # Options for Ascii Reader module InputAscii; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index aa50453bdf..44b5bf44db 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -31,14 +31,25 @@ declare(PDict, InputHash); class Manager::Filter { public: - EnumVal* id; string name; + string source; + + int mode; FilterType filter_type; // to distinguish between event and table filters + EnumVal* type; + ReaderFrontend* reader; + virtual ~Filter(); }; +Manager::Filter::~Filter() { + Unref(type); + + delete(reader); +} + class Manager::TableFilter: public Manager::Filter { public: @@ -85,10 +96,6 @@ Manager::EventFilter::EventFilter() { filter_type = EVENT_FILTER; } -Manager::Filter::~Filter() { - Unref(id); -} - Manager::TableFilter::~TableFilter() { Unref(tab); Unref(itype); @@ -99,41 +106,6 @@ Manager::TableFilter::~TableFilter() { delete lastDict; } -struct Manager::ReaderInfo { - EnumVal* id; - EnumVal* type; - ReaderFrontend* reader; - - map filters; // filters that can prevent our actions - - bool HasFilter(int id); - - ~ReaderInfo(); - }; - -Manager::ReaderInfo::~ReaderInfo() { - map::iterator it = filters.begin(); - - while ( it != filters.end() ) { - delete (*it).second; - ++it; - } - - Unref(type); - Unref(id); - - delete(reader); -} - -bool Manager::ReaderInfo::HasFilter(int id) { - map::iterator it = filters.find(id); - if ( it == filters.end() ) { - return false; - } - return true; -} - - struct ReaderDefinition { bro_int_t type; // the type const char *name; // descriptive name for error messages @@ -203,37 +175,34 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) } // create a new input reader object to be used at whomevers leisure lateron. -ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) +bool Manager::CreateStream(Filter* info, RecordVal* description) { - { - ReaderInfo *i = FindReader(id); - if ( i != 0 ) { - ODesc desc; - id->Describe(&desc); - reporter->Error("Trying create already existing input stream %s", desc.Description()); - return 0; - } - } - ReaderDefinition* ir = input_readers; RecordType* rtype = description->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::StreamDescription, 0) ) + if ( ! ( same_type(rtype, BifType::Record::Input::TableDescription, 0) || same_type(rtype, BifType::Record::Input::EventDescription, 0) ) ) { - ODesc desc; - id->Describe(&desc); - reporter->Error("Streamdescription argument not of right type for new input stream %s", desc.Description()); - return 0; + reporter->Error("Streamdescription argument not of right type for new input stream"); + return false; + } + + Val* name_val = description->LookupWithDefault(rtype->FieldOffset("name")); + string name = name_val->AsString()->CheckString(); + Unref(name_val); + + { + Filter *i = FindFilter(name); + if ( i != 0 ) { + reporter->Error("Trying create already existing input stream %s", name.c_str()); + return false; + } } EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); - EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); Val *autostart = description->LookupWithDefault(rtype->FieldOffset("autostart")); - bool do_autostart = ( autostart->InternalInt() == 1 ); - Unref(autostart); // Ref'd by LookupWithDefault - - ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); - assert(reader_obj); + + ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); + assert(reader_obj); // get the source... Val* sourceval = description->LookupWithDefault(rtype->FieldOffset("source")); @@ -241,42 +210,45 @@ ReaderFrontend* Manager::CreateStream(EnumVal* id, RecordVal* description) const BroString* bsource = sourceval->AsString(); string source((const char*) bsource->Bytes(), bsource->Len()); Unref(sourceval); + + EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); + info->mode = mode->InternalInt(); + Unref(mode); - ReaderInfo* info = new ReaderInfo; info->reader = reader_obj; info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault - info->id = id->Ref()->AsEnumVal(); + info->name = name; + info->source = source; - readers.push_back(info); - - reader_obj->Init(source, mode->InternalInt(), do_autostart); #ifdef DEBUG - ODesc desc; - id->Describe(&desc); DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", - desc.Description()); + name.c_str()); #endif - return reader_obj; + return true; } -bool Manager::AddEventFilter(EnumVal *id, RecordVal* fval) { - ReaderInfo *i = FindReader(id); - if ( i == 0 ) { - reporter->Error("Stream not found"); - return false; - } +bool Manager::CreateEventStream(RecordVal* fval) { RecordType* rtype = fval->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::EventFilter, 0) ) + if ( ! same_type(rtype, BifType::Record::Input::EventDescription, 0) ) { reporter->Error("filter argument not of right type"); return false; } + + EventFilter* filter = new EventFilter(); + { + bool res = CreateStream(filter, fval); + if ( res == false ) { + delete filter; + return false; + } + } + - Val* name = fval->LookupWithDefault(rtype->FieldOffset("name")); RecordType *fields = fval->LookupWithDefault(rtype->FieldOffset("fields"))->AsType()->AsTypeType()->Type()->AsRecordType(); Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); @@ -352,42 +324,36 @@ bool Manager::AddEventFilter(EnumVal *id, RecordVal* fval) { } Unref(fields); // ref'd by lookupwithdefault - EventFilter* filter = new EventFilter(); - filter->name = name->AsString()->CheckString(); - Unref(name); // ref'd by lookupwithdefault - filter->id = id->Ref()->AsEnumVal(); filter->num_fields = fieldsV.size(); filter->fields = fields->Ref()->AsRecordType(); filter->event = event_registry->Lookup(event->GetID()->Name()); filter->want_record = ( want_record->InternalInt() == 1 ); Unref(want_record); // ref'd by lookupwithdefault - int filterid = 0; - if ( i->filters.size() > 0 ) { - filterid = i->filters.rbegin()->first + 1; // largest element is at beginning of map-> new id = old id + 1-> - } - i->filters[filterid] = filter; - i->reader->AddFilter( filterid, fieldsV.size(), logf ); + assert(filter->reader); + filter->reader->Init(filter->source, filter->mode, filter->num_fields, logf ); + readers[filter->reader] = filter; return true; } -bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { - ReaderInfo *i = FindReader(id); - if ( i == 0 ) { - reporter->Error("Stream not found"); - return false; - } - +bool Manager::CreateTableStream(RecordVal* fval) { RecordType* rtype = fval->Type()->AsRecordType(); - if ( ! same_type(rtype, BifType::Record::Input::TableFilter, 0) ) + if ( ! same_type(rtype, BifType::Record::Input::TableDescription, 0) ) { reporter->Error("filter argument not of right type"); return false; } + TableFilter* filter = new TableFilter(); + { + bool res = CreateStream(filter, fval); + if ( res == false ) { + delete filter; + return false; + } + } - Val* name = fval->LookupWithDefault(rtype->FieldOffset("name")); Val* pred = fval->LookupWithDefault(rtype->FieldOffset("pred")); RecordType *idx = fval->LookupWithDefault(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); @@ -493,9 +459,6 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { fields[i] = fieldsV[i]; } - TableFilter* filter = new TableFilter(); - filter->name = name->AsString()->CheckString(); - filter->id = id->Ref()->AsEnumVal(); filter->pred = pred ? pred->AsFunc() : 0; filter->num_idx_fields = idxfields; filter->num_val_fields = valfields; @@ -508,25 +471,22 @@ bool Manager::AddTableFilter(EnumVal *id, RecordVal* fval) { filter->want_record = ( want_record->InternalInt() == 1 ); Unref(want_record); // ref'd by lookupwithdefault - Unref(name); Unref(pred); if ( valfields > 1 ) { assert(filter->want_record); } + + + assert(filter->reader); + filter->reader->Init(filter->source, filter->mode, fieldsV.size(), fields ); + + readers[filter->reader] = filter; - int filterid = 0; - if ( i->filters.size() > 0 ) { - filterid = i->filters.rbegin()->first + 1; // largest element is at beginning of map-> new id = old id + 1-> - } - i->filters[filterid] = filter; - i->reader->AddFilter( filterid, fieldsV.size(), fields ); #ifdef DEBUG - ODesc desc; - id->Describe(&desc); - DBG_LOG(DBG_INPUT, "Successfully created new table filter %s for stream", - filter->name.c_str(), desc.Description()); + DBG_LOG(DBG_INPUT, "Successfully created table stream %s", + filter->name.c_str()); #endif return true; @@ -583,16 +543,8 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) } -bool Manager::RemoveStream(const EnumVal* id) { - ReaderInfo *i = 0; - for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) - { - if ( (*s)->id == id ) - { - i = (*s); - break; - } - } +bool Manager::RemoveStream(const string &name) { + Filter *i = FindFilter(name); if ( i == 0 ) { return false; // not found @@ -601,38 +553,29 @@ bool Manager::RemoveStream(const EnumVal* id) { i->reader->Finish(); #ifdef DEBUG - ODesc desc; - id->Describe(&desc); DBG_LOG(DBG_INPUT, "Successfully queued removal of stream %s", - desc.Description()); + name.c_str()); #endif return true; } -bool Manager::RemoveStreamContinuation(const ReaderFrontend* reader) { - ReaderInfo *i = 0; +bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) { + Filter *i = FindFilter(reader); - - for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) - { - if ( (*s)->reader && (*s)->reader == reader ) - { - i = *s; -#ifdef DEBUG - ODesc desc; - i->id->Describe(&desc); - DBG_LOG(DBG_INPUT, "Successfully executed removal of stream %s", - desc.Description()); -#endif - delete(i); - readers.erase(s); - return true; - } + if ( i == 0 ) { + reporter->Error("Stream not found in RemoveStreamContinuation"); + return false; } - - reporter->Error("Stream not found in RemoveStreamContinuation"); - return false; + + +#ifdef DEBUG + DBG_LOG(DBG_INPUT, "Successfully executed removal of stream %s", + i->name.c_str()); +#endif + readers.erase(reader); + delete(i); + return true; } bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { @@ -680,132 +623,24 @@ bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, co return true; } -bool Manager::ForceUpdate(const EnumVal* id) +bool Manager::ForceUpdate(const string &name) { - ReaderInfo *i = FindReader(id); + Filter *i = FindFilter(name); if ( i == 0 ) { - reporter->Error("Reader not found"); + reporter->Error("Stream %s not found", name.c_str()); return false; } i->reader->Update(); #ifdef DEBUG - ODesc desc; - id->Describe(&desc); DBG_LOG(DBG_INPUT, "Forcing update of stream %s", - desc.Description()); + name.c_str()); #endif return true; // update is async :( } -bool Manager::RemoveTableFilter(EnumVal* id, const string &name) { - ReaderInfo *i = FindReader(id); - if ( i == 0 ) { - reporter->Error("Reader not found"); - return false; - } - - bool found = false; - int filterId; - - for ( map::iterator it = i->filters.begin(); it != i->filters.end(); ++it ) { - if ( (*it).second->name == name ) { - found = true; - filterId = (*it).first; - - if ( (*it).second->filter_type != TABLE_FILTER ) { - reporter->Error("Trying to remove filter %s of wrong type", name.c_str()); - return false; - } - - break; - } - } - - if ( !found ) { - reporter->Error("Trying to remove nonexisting filter %s", name.c_str()); - return false; - } - - i->reader->RemoveFilter(filterId); - -#ifdef DEBUG - ODesc desc; - id->Describe(&desc); - DBG_LOG(DBG_INPUT, "Queued removal of tablefilter %s for stream %s", - name.c_str(), desc.Description()); -#endif - - return true; -} - -bool Manager::RemoveFilterContinuation(const ReaderFrontend* reader, const int filterId) { - ReaderInfo *i = FindReader(reader); - if ( i == 0 ) { - reporter->Error("Reader not found"); - return false; - } - - map::iterator it = i->filters.find(filterId); - if ( it == i->filters.end() ) { - reporter->Error("Got RemoveFilterContinuation where filter nonexistant for %d", filterId); - return false; - } - -#ifdef DEBUG - ODesc desc; - i->id->Describe(&desc); - DBG_LOG(DBG_INPUT, "Executed removal of (table|event)-filter %s for stream %s", - (*it).second->name.c_str(), desc.Description()); -#endif - - delete (*it).second; - i->filters.erase(it); - - return true; -} - -bool Manager::RemoveEventFilter(EnumVal* id, const string &name) { - ReaderInfo *i = FindReader(id); - if ( i == 0 ) { - reporter->Error("Reader not found"); - return false; - } - - bool found = false; - int filterId; - for ( map::iterator it = i->filters.begin(); it != i->filters.end(); ++it ) { - if ( (*it).second->name == name ) { - found = true; - filterId = (*it).first; - - if ( (*it).second->filter_type != EVENT_FILTER ) { - reporter->Error("Trying to remove filter %s of wrong type", name.c_str()); - return false; - } - - break; - } - } - - if ( !found ) { - reporter->Error("Trying to remove nonexisting filter %s", name.c_str()); - return false; - } - - i->reader->RemoveFilter(filterId); - -#ifdef DEBUG - ODesc desc; - id->Describe(&desc); - DBG_LOG(DBG_INPUT, "Queued removal of eventfilter %s for stream %s", - name.c_str(), desc.Description()); -#endif - return true; -} - Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Value* const *vals) { Val* idxval; int position = 0; @@ -833,24 +668,19 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu } -void Manager::SendEntry(const ReaderFrontend* reader, const int id, Value* *vals) { - ReaderInfo *i = FindReader(reader); +void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) { + Filter *i = FindFilter(reader); if ( i == 0 ) { - reporter->InternalError("Unknown reader"); - return; - } - - if ( !i->HasFilter(id) ) { - reporter->InternalError("Unknown filter"); + reporter->InternalError("Unknown reader in SendEntry"); return; } int readFields; - if ( i->filters[id]->filter_type == TABLE_FILTER ) { - readFields = SendEntryTable(reader, id, vals); - } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { + if ( i->filter_type == TABLE_FILTER ) { + readFields = SendEntryTable(i, vals); + } else if ( i->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - readFields = SendEventFilterEvent(reader, type, id, vals); + readFields = SendEventFilterEvent(i, type, vals); } else { assert(false); } @@ -861,16 +691,13 @@ void Manager::SendEntry(const ReaderFrontend* reader, const int id, Value* *vals delete [] vals; } -int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Value* const *vals) { - ReaderInfo *i = FindReader(reader); - +int Manager::SendEntryTable(Filter* i, const Value* const *vals) { bool updated = false; assert(i); - assert(i->HasFilter(id)); - assert(i->filters[id]->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i->filters[id]; + assert(i->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i; HashKey* idxhash = HashValues(filter->num_idx_fields, vals); @@ -1006,29 +833,25 @@ int Manager::SendEntryTable(const ReaderFrontend* reader, const int id, const Va } -void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { - ReaderInfo *i = FindReader(reader); +void Manager::EndCurrentSend(ReaderFrontend* reader) { + Filter *i = FindFilter(reader); if ( i == 0 ) { - reporter->InternalError("Unknown reader"); + reporter->InternalError("Unknown reader in EndCurrentSend"); return; } - assert(i->HasFilter(id)); - #ifdef DEBUG - ODesc desc; - i->id->Describe(&desc); - DBG_LOG(DBG_INPUT, "Got EndCurrentSend for filter %d and stream %s", - id, desc.Description()); + DBG_LOG(DBG_INPUT, "Got EndCurrentSend stream %s", + i->name.c_str()); #endif - if ( i->filters[id]->filter_type == EVENT_FILTER ) { + if ( i->filter_type == EVENT_FILTER ) { // nothing to do.. return; } - assert(i->filters[id]->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i->filters[id]; + assert(i->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i; // lastdict contains all deleted entries and should be empty apart from that IterCookie *c = filter->lastDict->InitForIteration(); @@ -1093,8 +916,8 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { filter->currDict = new PDict(InputHash); #ifdef DEBUG - DBG_LOG(DBG_INPUT, "EndCurrentSend complete for filter %d and stream %s, queueing update_finished event", - id, desc.Description()); + DBG_LOG(DBG_INPUT, "EndCurrentSend complete for stream %s, queueing update_finished event", + i->name.c_str()); #endif // Send event that the current update is indeed finished. @@ -1104,42 +927,40 @@ void Manager::EndCurrentSend(const ReaderFrontend* reader, int id) { } - Ref(i->id); - SendEvent(handler, 1, i->id); + SendEvent(handler, 1, new BroString(i->name)); } -void Manager::Put(const ReaderFrontend* reader, int id, Value* *vals) { - ReaderInfo *i = FindReader(reader); +void Manager::Put(ReaderFrontend* reader, Value* *vals) { + Filter *i = FindFilter(reader); if ( i == 0 ) { - reporter->InternalError("Unknown reader"); + reporter->InternalError("Unknown reader in Put"); return; } - if ( !i->HasFilter(id) ) { - reporter->InternalError("Unknown filter"); - return; - } - - if ( i->filters[id]->filter_type == TABLE_FILTER ) { - PutTable(reader, id, vals); - } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { + int readFields; + if ( i->filter_type == TABLE_FILTER ) { + readFields = PutTable(i, vals); + } else if ( i->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - SendEventFilterEvent(reader, type, id, vals); + readFields = SendEventFilterEvent(i, type, vals); } else { assert(false); } + + for ( int i = 0; i < readFields; i++ ) { + delete vals[i]; + } + delete [] vals; + } -int Manager::SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const Value* const *vals) { - ReaderInfo *i = FindReader(reader); - +int Manager::SendEventFilterEvent(Filter* i, EnumVal* type, const Value* const *vals) { bool updated = false; assert(i); - assert(i->HasFilter(id)); - assert(i->filters[id]->filter_type == EVENT_FILTER); - EventFilter* filter = (EventFilter*) i->filters[id]; + assert(i->filter_type == EVENT_FILTER); + EventFilter* filter = (EventFilter*) i; Val *val; list out_vals; @@ -1170,14 +991,11 @@ int Manager::SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, i } -int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const *vals) { - ReaderInfo *i = FindReader(reader); - +int Manager::PutTable(Filter* i, const Value* const *vals) { assert(i); - assert(i->HasFilter(id)); - assert(i->filters[id]->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i->filters[id]; + assert(i->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i; Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; @@ -1274,43 +1092,37 @@ int Manager::PutTable(const ReaderFrontend* reader, int id, const Value* const * } // Todo:: perhaps throw some kind of clear-event? -void Manager::Clear(const ReaderFrontend* reader, int id) { - ReaderInfo *i = FindReader(reader); +void Manager::Clear(ReaderFrontend* reader) { + Filter *i = FindFilter(reader); if ( i == 0 ) { - reporter->InternalError("Unknown reader"); + reporter->InternalError("Unknown reader in Clear"); return; } #ifdef DEBUG - ODesc desc; - i->id->Describe(&desc); - DBG_LOG(DBG_INPUT, "Got Clear for filter %d and stream %s", - id, desc.Description()); + DBG_LOG(DBG_INPUT, "Got Clear for stream %s", + i->name.c_str()); #endif - assert(i->HasFilter(id)); - - assert(i->filters[id]->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i->filters[id]; + assert(i->filter_type == TABLE_FILTER); + TableFilter* filter = (TableFilter*) i; filter->tab->RemoveAll(); } // put interface: delete old entry from table. -bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { - ReaderInfo *i = FindReader(reader); +bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { + Filter *i = FindFilter(reader); if ( i == 0 ) { - reporter->InternalError("Unknown reader"); + reporter->InternalError("Unknown reader in Delete"); return false; } - assert(i->HasFilter(id)); - bool success = false; int readVals = 0; - if ( i->filters[id]->filter_type == TABLE_FILTER ) { - TableFilter* filter = (TableFilter*) i->filters[id]; + if ( i->filter_type == TABLE_FILTER ) { + TableFilter* filter = (TableFilter*) i; Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); assert(idxval != 0); readVals = filter->num_idx_fields + filter->num_val_fields; @@ -1352,9 +1164,9 @@ bool Manager::Delete(const ReaderFrontend* reader, int id, Value* *vals) { reporter->Error("Internal error while deleting values from input table"); } } - } else if ( i->filters[id]->filter_type == EVENT_FILTER ) { + } else if ( i->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - readVals = SendEventFilterEvent(reader, type, id, vals); + readVals = SendEventFilterEvent(i, type, vals); success = true; } else { assert(false); @@ -1867,29 +1679,24 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { return NULL; } -Manager::ReaderInfo* Manager::FindReader(const ReaderFrontend* reader) +Manager::Filter* Manager::FindFilter(const string &name) { - for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) + for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { - if ( (*s)->reader && (*s)->reader == reader ) + if ( (*s).second->name == name ) { - return *s; + return (*s).second; } } return 0; } -Manager::ReaderInfo* Manager::FindReader(const EnumVal* id) - { - for ( vector::iterator s = readers.begin(); s != readers.end(); ++s ) - { - if ( (*s)->id && (*s)->id->AsEnum() == id->AsEnum() ) - { - return *s; - } - } - - return 0; +Manager::Filter* Manager::FindFilter(ReaderFrontend* reader) +{ + map::iterator s = readers.find(reader); + if ( s != readers.end() ) { + return s->second; } - + return 0; +} diff --git a/src/input/Manager.h b/src/input/Manager.h index 96ea0e43db..71169c4bc2 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -11,7 +11,7 @@ #include "../EventHandler.h" #include "../RemoteSerializer.h" -#include +#include namespace input { @@ -35,6 +35,9 @@ public: /** * Creates a new input stream. + * Add a filter to an input source, which will write the data from the data source into + * a Bro table. + * Add a filter to an input source, which sends events for read input data. * * @param id The enum value corresponding the input stream. * @@ -43,7 +46,9 @@ public: * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. */ - ReaderFrontend* CreateStream(EnumVal* id, RecordVal* description); + bool CreateTableStream(RecordVal* description); + bool CreateEventStream(RecordVal* description); + /** * Force update on a input stream. @@ -57,7 +62,7 @@ public: * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. */ - bool ForceUpdate(const EnumVal* id); + bool ForceUpdate(const string &id); /** * Deletes an existing input stream @@ -67,53 +72,8 @@ public: * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. */ - bool RemoveStream(const EnumVal* id); + bool RemoveStream(const string &id); - /** - * Add a filter to an input source, which will write the data from the data source into - * a Bro table. - * - * @param id The enum value corresponding the input stream. - * - * @param description A record of script type \c Input:TableFilter. - * - * This method corresponds directly to the internal BiF defined in - * input.bif, which just forwards here. - */ - bool AddTableFilter(EnumVal *id, RecordVal* filter); - - /** - * Removes a tablefilter from the log stream - * - * @param id The enum value corresponding the input stream. - * - * This method corresponds directly to the internal BiF defined in - * input.bif, which just forwards here. - */ - bool RemoveTableFilter(EnumVal* id, const string &name); - - /** - * Add a filter to an input source, which sends events for read input data. - * - * @param id The enum value corresponding the input stream. - * - * @param description A record of script type \c Input:EventFilter. - * - * This method corresponds directly to the internal BiF defined in - * input.bif, which just forwards here. - */ - bool AddEventFilter(EnumVal *id, RecordVal* filter); - - /** - * Removes a eventfilter from the log stream - * - * @param id The enum value corresponding the input stream. - * - * This method corresponds directly to the internal BiF defined in - * input.bif, which just forwards here. - */ - bool RemoveEventFilter(EnumVal* id, const string &name); - protected: friend class ReaderFrontend; friend class PutMessage; @@ -122,19 +82,18 @@ protected: friend class SendEventMessage; friend class SendEntryMessage; friend class EndCurrentSendMessage; - friend class FilterRemovedMessage; friend class ReaderFinishedMessage; // For readers to write to input stream in direct mode (reporting new/deleted values directly) // Functions take ownership of threading::Value fields - void Put(const ReaderFrontend* reader, int id, threading::Value* *vals); - void Clear(const ReaderFrontend* reader, int id); - bool Delete(const ReaderFrontend* reader, int id, threading::Value* *vals); + void Put(ReaderFrontend* reader, threading::Value* *vals); + void Clear(ReaderFrontend* reader); + bool Delete(ReaderFrontend* reader, threading::Value* *vals); // for readers to write to input stream in indirect mode (manager is monitoring new/deleted values) // Functions take ownership of threading::Value fields - void SendEntry(const ReaderFrontend* reader, const int id, threading::Value* *vals); - void EndCurrentSend(const ReaderFrontend* reader, const int id); + void SendEntry(ReaderFrontend* reader, threading::Value* *vals); + void EndCurrentSend(ReaderFrontend* reader); // Allows readers to directly send Bro events. // The num_vals and vals must be the same the named event expects. @@ -150,20 +109,23 @@ protected: // Used to prevent race conditions where data for a specific filter is still in the queue when the // RemoveFilter directive is executed by the main thread. // This makes sure all data that has ben queued for a filter is still received. - bool RemoveFilterContinuation(const ReaderFrontend* reader, const int filterId); - bool RemoveStreamContinuation(const ReaderFrontend* reader); + bool RemoveStreamContinuation(ReaderFrontend* reader); private: - struct ReaderInfo; + class Filter; + class TableFilter; + class EventFilter; + + bool CreateStream(Filter*, RecordVal* description); // SendEntry implementation for Tablefilter - int SendEntryTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + int SendEntryTable(Filter* i, const threading::Value* const *vals); // Put implementation for Tablefilter - int PutTable(const ReaderFrontend* reader, int id, const threading::Value* const *vals); + int PutTable(Filter* i, const threading::Value* const *vals); // SendEntry and Put implementation for Eventfilter - int SendEventFilterEvent(const ReaderFrontend* reader, EnumVal* type, int id, const threading::Value* const *vals); + int SendEventFilterEvent(Filter* i, EnumVal* type, const threading::Value* const *vals); // Checks is a bro type can be used for data reading. The equivalend in threading cannot be used, because we have support different types // from the log framework @@ -200,16 +162,12 @@ private: // Converts a Bro ListVal to a RecordVal given the record type RecordVal* ListValToRecordVal(ListVal* list, RecordType *request_type, int* position); - ReaderInfo* FindReader(const ReaderFrontend* reader); - ReaderInfo* FindReader(const EnumVal* id); - - vector readers; - - class Filter; - class TableFilter; - class EventFilter; + Filter* FindFilter(const string &name); + Filter* FindFilter(ReaderFrontend* reader); enum FilterType { TABLE_FILTER, EVENT_FILTER }; + + map readers; }; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 5af02b1acc..b33e19d297 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -11,48 +11,44 @@ namespace input { class PutMessage : public threading::OutputMessage { public: - PutMessage(ReaderFrontend* reader, int id, Value* *val) + PutMessage(ReaderFrontend* reader, Value* *val) : threading::OutputMessage("Put", reader), - id(id), val(val) {} + val(val) {} virtual bool Process() { - input_mgr->Put(Object(), id, val); + input_mgr->Put(Object(), val); return true; } private: - int id; Value* *val; }; class DeleteMessage : public threading::OutputMessage { public: - DeleteMessage(ReaderFrontend* reader, int id, Value* *val) + DeleteMessage(ReaderFrontend* reader, Value* *val) : threading::OutputMessage("Delete", reader), - id(id), val(val) {} + val(val) {} virtual bool Process() { - return input_mgr->Delete(Object(), id, val); + return input_mgr->Delete(Object(), val); } private: - int id; Value* *val; }; class ClearMessage : public threading::OutputMessage { public: - ClearMessage(ReaderFrontend* reader, int id) - : threading::OutputMessage("Clear", reader), - id(id) {} + ClearMessage(ReaderFrontend* reader) + : threading::OutputMessage("Clear", reader) {} virtual bool Process() { - input_mgr->Clear(Object(), id); + input_mgr->Clear(Object()); return true; } private: - int id; }; class SendEventMessage : public threading::OutputMessage { @@ -73,47 +69,30 @@ private: class SendEntryMessage : public threading::OutputMessage { public: - SendEntryMessage(ReaderFrontend* reader, const int id, Value* *val) + SendEntryMessage(ReaderFrontend* reader, Value* *val) : threading::OutputMessage("SendEntry", reader), - id(id), val(val) { } + val(val) { } virtual bool Process() { - input_mgr->SendEntry(Object(), id, val); + input_mgr->SendEntry(Object(), val); return true; } private: - const int id; Value* *val; }; class EndCurrentSendMessage : public threading::OutputMessage { public: - EndCurrentSendMessage(ReaderFrontend* reader, const int id) - : threading::OutputMessage("EndCurrentSend", reader), - id(id) {} + EndCurrentSendMessage(ReaderFrontend* reader) + : threading::OutputMessage("EndCurrentSend", reader) {} virtual bool Process() { - input_mgr->EndCurrentSend(Object(), id); + input_mgr->EndCurrentSend(Object()); return true; } private: - const int id; -}; - -class FilterRemovedMessage : public threading::OutputMessage { -public: - FilterRemovedMessage(ReaderFrontend* reader, const int id) - : threading::OutputMessage("FilterRemoved", reader), - id(id) {} - - virtual bool Process() { - return input_mgr->RemoveFilterContinuation(Object(), id); - } - -private: - const int id; }; class ReaderFinishedMessage : public threading::OutputMessage { @@ -155,19 +134,19 @@ ReaderBackend::~ReaderBackend() } -void ReaderBackend::Put(int id, Value* *val) +void ReaderBackend::Put(Value* *val) { - SendOut(new PutMessage(frontend, id, val)); + SendOut(new PutMessage(frontend, val)); } -void ReaderBackend::Delete(int id, Value* *val) +void ReaderBackend::Delete(Value* *val) { - SendOut(new DeleteMessage(frontend, id, val)); + SendOut(new DeleteMessage(frontend, val)); } -void ReaderBackend::Clear(int id) +void ReaderBackend::Clear() { - SendOut(new ClearMessage(frontend, id)); + SendOut(new ClearMessage(frontend)); } void ReaderBackend::SendEvent(const string& name, const int num_vals, Value* *vals) @@ -175,70 +154,32 @@ void ReaderBackend::SendEvent(const string& name, const int num_vals, Value* *va SendOut(new SendEventMessage(frontend, name, num_vals, vals)); } -void ReaderBackend::EndCurrentSend(int id) +void ReaderBackend::EndCurrentSend() { - SendOut(new EndCurrentSendMessage(frontend, id)); + SendOut(new EndCurrentSendMessage(frontend)); } -void ReaderBackend::SendEntry(int id, Value* *vals) +void ReaderBackend::SendEntry(Value* *vals) { - SendOut(new SendEntryMessage(frontend, id, vals)); + SendOut(new SendEntryMessage(frontend, vals)); } -bool ReaderBackend::Init(string arg_source, int mode, bool arg_autostart) +bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, const threading::Field* const* arg_fields) { source = arg_source; - autostart = arg_autostart; SetName("InputReader/"+source); // disable if DoInit returns error. - disabled = !DoInit(arg_source, mode); + int success = DoInit(arg_source, mode, arg_num_fields, arg_fields); - if ( disabled ) { + if ( !success ) { Error("Init failed"); DisableFrontend(); } - return !disabled; -} - -bool ReaderBackend::StartReading() { - if ( disabled ) - return false; - - int success = DoStartReading(); - - if ( success == false ) { - DisableFrontend(); - } - return success; } -bool ReaderBackend::AddFilter(int id, int arg_num_fields, - const Field* const * arg_fields) -{ - if ( disabled ) - return false; - - bool success = DoAddFilter(id, arg_num_fields, arg_fields); - if ( success && autostart) { - autostart = false; - return StartReading(); - } - return success; -} - -bool ReaderBackend::RemoveFilter(int id) -{ - if ( disabled ) - return false; - - bool success = DoRemoveFilter(id); - SendOut(new FilterRemovedMessage(frontend, id)); - return success; // yes, I know, noone reads this. -} - void ReaderBackend::Finish() { DoFinish(); diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index e167f8ff47..28fd99f2b9 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -53,46 +53,14 @@ public: * * @param mode the opening mode for the input source * - * @param autostart automatically start the input source after the first filter has been added - * - * @return False if an error occured. - */ - bool Init(string arg_source, int mode, bool autostart); - - /** - * One-time start method of the reader. - * - * This method is called from the scripting layer, after all filters have been added. - * No data should be read before this method is called. - * - * If autostart in Init is set to true, this method is called automatically by the backend after - * the first filter has been added. - */ - bool StartReading(); - - /** - * Add an input filter to the input stream - * - * @param id identifier of the input stream - * * @param arg_num_fields number of fields contained in \a fields * * @param fields the types and names of the fields to be retrieved from the input source * * @return False if an error occured. */ - bool AddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); + bool Init(string arg_source, int mode, int arg_num_fields, const threading::Field* const* fields); - - /** - * Remove an input filter to the input stream - * - * @param id identifier of the input stream - * - * @return False if an error occured. - */ - bool RemoveFilter ( int id ); - /** * Finishes reading from this input stream in a regular fashion. Must not be * called if an error has been indicated earlier. After calling this, @@ -131,33 +99,7 @@ protected: * disabled and eventually deleted. When returning false, an * implementation should also call Error() to indicate what happened. */ - virtual bool DoInit(string arg_sources, int mode) = 0; - - /** - * Reader-specific start method. After this function has been called, data may be read from - * the input source and be sent to the specified filters - * - * A reader implementation must override this method. - * If it returns false, it will be assumed that a fatal error has occured - * that prevents the reader from further operation; it will then be - * disabled and eventually deleted. When returning false, an implementation - * should also call Error to indicate what happened. - */ - virtual bool DoStartReading() = 0; - - /** - * Reader-specific method to add a filter. - * - * A reader implementation must override this method. - */ - virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ) = 0; - - /** - * Reader-specific method to remove a filter. - * - * A reader implementation must override this method. - */ - virtual bool DoRemoveFilter( int id ) = 0; + virtual bool DoInit(string arg_sources, int mode, int arg_num_fields, const threading::Field* const* fields) = 0; /** * Reader-specific method implementing input finalization at @@ -209,31 +151,26 @@ protected: * * If the filter points to a table, the values are inserted into the table; if it points to an event, the event is raised * - * @param id the input filter id for which the values are sent - * * @param val list of threading::Values expected by the filter */ - void Put(int id, threading::Value* *val); + void Put(threading::Value* *val); /** * Method allowing a reader to delete a specific value from a bro table. * * If the receiving filter is an event, only a removed event is raised * - * @param id the input filter id for which the values are sent - * * @param val list of threading::Values expected by the filter */ - void Delete(int id, threading::Value* *val); + void Delete(threading::Value* *val); /** * Method allowing a reader to clear a value from a bro table. * * If the receiving filter is an event, this is ignored. * - * @param id the input filter id for which the values are sent */ - void Clear(int id); + void Clear(); // Content-sending-functions (tracking mode): Only changed lines are propagated. @@ -243,11 +180,9 @@ protected: * * If the filter points to a table, the values are inserted into the table; if it points to an event, the event is raised. * - * @param id the input filter id for which the values are sent - * * @param val list of threading::Values expected by the filter */ - void SendEntry(int id, threading::Value* *vals); + void SendEntry(threading::Value* *vals); /** * Method telling the manager, that the current list of entries sent by SendEntry is finished. @@ -255,9 +190,8 @@ protected: * For table filters, all entries that were not updated since the last EndCurrentSend will be deleted, because they are no longer * present in the input source * - * @param id the input filter id for which the values are sent */ - void EndCurrentSend(int id); + void EndCurrentSend(); /** * Triggered by regular heartbeat messages from the main thread. diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index f7fc23bf72..9711997821 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -12,16 +12,17 @@ namespace input { class InitMessage : public threading::InputMessage { public: - InitMessage(ReaderBackend* backend, const string source, const int mode, const bool autostart) + InitMessage(ReaderBackend* backend, const string source, const int mode, const int num_fields, const threading::Field* const* fields) : threading::InputMessage("Init", backend), - source(source), mode(mode), autostart(autostart) { } + source(source), mode(mode), num_fields(num_fields), fields(fields) { } - virtual bool Process() { return Object()->Init(source, mode, autostart); } + virtual bool Process() { return Object()->Init(source, mode, num_fields, fields); } private: const string source; const int mode; - const bool autostart; + const int num_fields; + const threading::Field* const* fields; }; class UpdateMessage : public threading::InputMessage @@ -44,44 +45,6 @@ public: virtual bool Process() { Object()->Finish(); return true; } }; -class StartReadingMessage : public threading::InputMessage -{ -public: - StartReadingMessage(ReaderBackend* backend) - : threading::InputMessage("StartReading", backend) - { } - - virtual bool Process() { Object()->StartReading(); return true; } -}; - -class AddFilterMessage : public threading::InputMessage -{ -public: - AddFilterMessage(ReaderBackend* backend, const int id, const int num_fields, const threading::Field* const* fields) - : threading::InputMessage("AddFilter", backend), - id(id), num_fields(num_fields), fields(fields) { } - - virtual bool Process() { return Object()->AddFilter(id, num_fields, fields); } - -private: - const int id; - const int num_fields; - const threading::Field* const* fields; -}; - -class RemoveFilterMessage : public threading::InputMessage -{ -public: - RemoveFilterMessage(ReaderBackend* backend, const int id) - : threading::InputMessage("RemoveFilter", backend), - id(id) { } - - virtual bool Process() { return Object()->RemoveFilter(id); } - -private: - const int id; -}; - ReaderFrontend::ReaderFrontend(bro_int_t type) { disabled = initialized = false; @@ -95,7 +58,7 @@ ReaderFrontend::ReaderFrontend(bro_int_t type) { ReaderFrontend::~ReaderFrontend() { } -void ReaderFrontend::Init(string arg_source, int mode, bool autostart) { +void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, const threading::Field* const* fields) { if ( disabled ) return; @@ -105,37 +68,33 @@ void ReaderFrontend::Init(string arg_source, int mode, bool autostart) { source = arg_source; initialized = true; - backend->SendIn(new InitMessage(backend, arg_source, mode, autostart)); + backend->SendIn(new InitMessage(backend, arg_source, mode, num_fields, fields)); } void ReaderFrontend::Update() { if ( disabled ) return; + if ( !initialized ) { + reporter->Error("Tried to call update on uninitialized reader"); + return; + } + backend->SendIn(new UpdateMessage(backend)); } void ReaderFrontend::Finish() { if ( disabled ) return; + + if ( !initialized ) { + reporter->Error("Tried to call finish on uninitialized reader"); + return; + } backend->SendIn(new FinishMessage(backend)); } -void ReaderFrontend::AddFilter(const int id, const int arg_num_fields, const threading::Field* const* fields) { - if ( disabled ) - return; - - backend->SendIn(new AddFilterMessage(backend, id, arg_num_fields, fields)); -} - -void ReaderFrontend::RemoveFilter(const int id) { - if ( disabled ) - return; - - backend->SendIn(new RemoveFilterMessage(backend, id)); -} - string ReaderFrontend::Name() const { if ( source.size() ) @@ -144,13 +103,5 @@ string ReaderFrontend::Name() const return ty_name + "/" + source; } -void ReaderFrontend::StartReading() { - if ( disabled ) - return; - - backend->SendIn(new StartReadingMessage(backend)); } -} - - diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 0df4c00e8c..1c3306e0c1 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -49,18 +49,7 @@ public: * See ReaderBackend::Init() for arguments. * This method must only be called from the main thread. */ - void Init(string arg_source, int mode, bool autostart); - - /** - * Start the reader. - * - * This methods starts the reader, after all necessary filters have been added. - * It is not necessary to call this function, if autostart has been set. - * If autostart has been set, the reader will be initialized automatically after the first filter has been added - * - * This method must only be called from the main thread. - */ - void StartReading(); + void Init(string arg_source, int mode, const int arg_num_fields, const threading::Field* const* fields); /** * Force an update of the current input source. Actual action depends on @@ -72,20 +61,6 @@ public: */ void Update(); - /** - * Add a filter to the current input source. - * - * See ReaderBackend::AddFilter for arguments. - * - * The method takes ownership of \a fields - */ - void AddFilter( const int id, const int arg_num_fields, const threading::Field* const* fields ); - - /** - * Removes a filter to the current input source. - */ - void RemoveFilter ( const int id ); - /** * Finalizes writing to this tream. * diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index ab26442524..bb59b3fc1d 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -76,7 +76,6 @@ Ascii::~Ascii() void Ascii::DoFinish() { - filters.empty(); if ( file != 0 ) { file->close(); delete(file); @@ -84,9 +83,8 @@ void Ascii::DoFinish() } } -bool Ascii::DoInit(string path, int arg_mode) +bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) { - started = false; fname = path; mode = arg_mode; mtime = 0; @@ -107,17 +105,10 @@ bool Ascii::DoInit(string path, int arg_mode) file->close(); return false; } + + num_fields = arg_num_fields; + fields = arg_fields; - return true; -} - -bool Ascii::DoStartReading() { - if ( started == true ) { - Error("Started twice"); - return false; - } - - started = true; switch ( mode ) { case MANUAL: case REREAD: @@ -131,46 +122,11 @@ bool Ascii::DoStartReading() { return true; } -bool Ascii::DoAddFilter( int id, int arg_num_fields, const Field* const* fields ) { - if ( HasFilter(id) ) { - Error("Filter was added twice, ignoring."); - return false; // no, we don't want to add this a second time - } - - Filter f; - f.num_fields = arg_num_fields; - f.fields = fields; - - filters[id] = f; - - return true; -} - -bool Ascii::DoRemoveFilter ( int id ) { - if (!HasFilter(id) ) { - Error("Filter removal of nonexisting filter requested."); - return false; - } - - assert ( filters.erase(id) == 1 ); - - return true; -} - - -bool Ascii::HasFilter(int id) { - map::iterator it = filters.find(id); - if ( it == filters.end() ) { - return false; - } - return true; -} - bool Ascii::ReadHeader(bool useCached) { // try to read the header line... string line; - map fields; + map ifields; if ( !useCached ) { if ( !GetLine(line) ) { @@ -194,37 +150,35 @@ bool Ascii::ReadHeader(bool useCached) { if ( !getline(splitstream, s, separator[0])) break; - fields[s] = pos; + ifields[s] = pos; pos++; } //printf("Updating fields from description %s\n", line.c_str()); - for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - (*it).second.columnMap.clear(); - - for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { - const Field* field = (*it).second.fields[i]; - - map::iterator fit = fields.find(field->name); - if ( fit == fields.end() ) { - Error(Fmt("Did not find requested field %s in input data file.", field->name.c_str())); - return false; - } - - - FieldMapping f(field->name, field->type, field->subtype, fields[field->name]); - if ( field->secondary_name != "" ) { - map::iterator fit2 = fields.find(field->secondary_name); - if ( fit2 == fields.end() ) { - Error(Fmt("Could not find requested port type field %s in input data file.", field->secondary_name.c_str())); - return false; - } - f.secondary_position = fields[field->secondary_name]; - } - (*it).second.columnMap.push_back(f); + columnMap.clear(); + + for ( unsigned int i = 0; i < num_fields; i++ ) { + const Field* field = fields[i]; + + map::iterator fit = ifields.find(field->name); + if ( fit == ifields.end() ) { + Error(Fmt("Did not find requested field %s in input data file.", field->name.c_str())); + return false; } + + FieldMapping f(field->name, field->type, field->subtype, ifields[field->name]); + if ( field->secondary_name != "" ) { + map::iterator fit2 = ifields.find(field->secondary_name); + if ( fit2 == ifields.end() ) { + Error(Fmt("Could not find requested port type field %s in input data file.", field->secondary_name.c_str())); + return false; + } + f.secondary_position = ifields[field->secondary_name]; + } + columnMap.push_back(f); } + // well, that seems to have worked... return true; @@ -461,57 +415,55 @@ bool Ascii::DoUpdate() { pos--; // for easy comparisons of max element. - for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - Value** fields = new Value*[(*it).second.num_fields]; + Value** fields = new Value*[num_fields]; - int fpos = 0; - for ( vector::iterator fit = (*it).second.columnMap.begin(); - fit != (*it).second.columnMap.end(); - fit++ ){ + int fpos = 0; + for ( vector::iterator fit = columnMap.begin(); + fit != columnMap.end(); + fit++ ){ - if ( (*fit).position > pos || (*fit).secondary_position > pos ) { - Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", line.c_str(), pos, (*fit).position, (*fit).secondary_position)); - return false; - } - - Value* val = EntryToVal(stringfields[(*fit).position], *fit); - if ( val == 0 ) { - Error("Could not convert String value to Val"); - return false; - } - - if ( (*fit).secondary_position != -1 ) { - // we have a port definition :) - assert(val->type == TYPE_PORT ); - // Error(Fmt("Got type %d != PORT with secondary position!", val->type)); - - val->val.port_val.proto = StringToProto(stringfields[(*fit).secondary_position]); - } - - fields[fpos] = val; - - fpos++; + if ( (*fit).position > pos || (*fit).secondary_position > pos ) { + Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", line.c_str(), pos, (*fit).position, (*fit).secondary_position)); + return false; } - //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); - assert ( (unsigned int) fpos == (*it).second.num_fields ); + Value* val = EntryToVal(stringfields[(*fit).position], *fit); + if ( val == 0 ) { + Error("Could not convert String value to Val"); + return false; + } + + if ( (*fit).secondary_position != -1 ) { + // we have a port definition :) + assert(val->type == TYPE_PORT ); + // Error(Fmt("Got type %d != PORT with secondary position!", val->type)); - if ( mode == STREAM ) { - Put((*it).first, fields); - } else { - SendEntry((*it).first, fields); + val->val.port_val.proto = StringToProto(stringfields[(*fit).secondary_position]); } - /* Do not do this, ownership changes to other thread - * for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { - delete fields[i]; - } - delete [] fields; - */ + fields[fpos] = val; + + fpos++; } + //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); + assert ( (unsigned int) fpos == num_fields ); + + if ( mode == STREAM ) { + Put(fields); + } else { + SendEntry(fields); + } + + /* Do not do this, ownership changes to other thread + * for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { + delete fields[i]; + } + delete [] fields; + */ + } @@ -519,9 +471,7 @@ bool Ascii::DoUpdate() { //file->seekg(0, ios::beg); // and seek to start. if ( mode != STREAM ) { - for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { - EndCurrentSend((*it).first); - } + EndCurrentSend(); } return true; diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index 3bb0e91853..40f92be717 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -39,33 +39,22 @@ public: protected: - virtual bool DoInit(string path, int mode); - - virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); - - virtual bool DoRemoveFilter ( int id ); + virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoFinish(); virtual bool DoUpdate(); - virtual bool DoStartReading(); - private: virtual bool DoHeartbeat(double network_time, double current_time); - struct Filter { - unsigned int num_fields; + unsigned int num_fields; - const threading::Field* const * fields; // raw mapping + const threading::Field* const * fields; // raw mapping - // map columns in the file to columns to send back to the manager - vector columnMap; - - }; - - bool HasFilter(int id); + // map columns in the file to columns to send back to the manager + vector columnMap; bool ReadHeader(bool useCached); threading::Value* EntryToVal(string s, FieldMapping type); @@ -75,8 +64,6 @@ private: ifstream* file; string fname; - map filters; - // Options set from the script-level. string separator; @@ -91,7 +78,6 @@ private: int mode; - bool started; time_t mtime; }; diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index f2892e7af5..27415b525f 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -40,7 +40,6 @@ Raw::~Raw() void Raw::DoFinish() { - filters.empty(); if ( file != 0 ) { file->close(); delete(file); @@ -48,9 +47,8 @@ void Raw::DoFinish() } } -bool Raw::DoInit(string path, int arg_mode) +bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) { - started = false; fname = path; mode = arg_mode; mtime = 0; @@ -66,16 +64,19 @@ bool Raw::DoInit(string path, int arg_mode) return false; } - return true; -} - -bool Raw::DoStartReading() { - if ( started == true ) { - Error("Started twice"); + if ( arg_num_fields != 1 ) { + Error("Filter for raw reader contains more than one field. Filters for the raw reader may only contain exactly one string field. Filter ignored."); return false; - } + } + + if ( fields[0]->type != TYPE_STRING ) { + Error("Filter for raw reader contains a field that is not of type string."); + return false; + } + + num_fields = arg_num_fields; + fields = arg_fields; - started = true; switch ( mode ) { case MANUAL: case REREAD: @@ -89,51 +90,6 @@ bool Raw::DoStartReading() { return true; } -bool Raw::DoAddFilter( int id, int arg_num_fields, const Field* const* fields ) { - - if ( arg_num_fields != 1 ) { - Error("Filter for raw reader contains more than one field. Filters for the raw reader may only contain exactly one string field. Filter ignored."); - return false; - } - - if ( fields[0]->type != TYPE_STRING ) { - Error("Filter for raw reader contains a field that is not of type string."); - return false; - } - - if ( HasFilter(id) ) { - Error("Filter was added twice, ignoring"); - return false; // no, we don't want to add this a second time - } - - Filter f; - f.num_fields = arg_num_fields; - f.fields = fields; - - filters[id] = f; - - return true; -} - -bool Raw::DoRemoveFilter ( int id ) { - if (!HasFilter(id) ) { - Error("Filter removal of nonexisting filter requested."); - return false; - } - - assert ( filters.erase(id) == 1 ); - - return true; -} - - -bool Raw::HasFilter(int id) { - map::iterator it = filters.find(id); - if ( it == filters.end() ) { - return false; - } - return true; -} bool Raw::GetLine(string& str) { while ( getline(*file, str, separator[0]) ) { @@ -188,21 +144,16 @@ bool Raw::DoUpdate() { string line; while ( GetLine(line) ) { - for ( map::iterator it = filters.begin(); it != filters.end(); it++ ) { + assert (num_fields == 1); + + Value** fields = new Value*[1]; - assert ((*it).second.num_fields == 1); + // filter has exactly one text field. convert to it. + Value* val = new Value(TYPE_STRING, true); + val->val.string_val = new string(line); + fields[0] = val; - Value** fields = new Value*[1]; - - // filter has exactly one text field. convert to it. - Value* val = new Value(TYPE_STRING, true); - val->val.string_val = new string(line); - fields[0] = val; - - Put((*it).first, fields); - - } - + Put(fields); } return true; diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index e046cb2ff7..ace4e0ee88 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -19,37 +19,21 @@ public: protected: - virtual bool DoInit(string path, int mode); - - virtual bool DoAddFilter( int id, int arg_num_fields, const threading::Field* const* fields ); - - virtual bool DoRemoveFilter ( int id ); + virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoFinish(); virtual bool DoUpdate(); - virtual bool DoStartReading(); - private: virtual bool DoHeartbeat(double network_time, double current_time); - struct Filter { - unsigned int num_fields; - - const threading::Field* const * fields; // raw mapping - }; - - bool HasFilter(int id); - bool GetLine(string& str); ifstream* file; string fname; - map filters; - // Options set from the script-level. string separator; @@ -58,8 +42,11 @@ private: int mode; - bool started; time_t mtime; + + unsigned int num_fields; + + const threading::Field* const * fields; // raw mapping }; diff --git a/src/types.bif b/src/types.bif index a9c6ecb3a8..26850bfa93 100644 --- a/src/types.bif +++ b/src/types.bif @@ -182,10 +182,6 @@ enum Event %{ EVENT_REMOVED, %} -enum ID %{ - Unknown, -%} - enum Mode %{ MANUAL = 0, REREAD = 1, diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 156898edca..827b1ce283 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -14,10 +14,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -45,12 +41,10 @@ global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log"]); - Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers]); - Input::remove_tablefilter(A::INPUT, "ssh"); - Input::remove_stream(A::INPUT); + Input::add_table([$source="input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); } -event Input::update_finished(id: Input::ID) { +event Input::update_finished(id: string) { print servers; } From 367c4b4a7e7d8522d16a64f1d113a56104cab316 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 16 Mar 2012 07:53:29 -0700 Subject: [PATCH 139/651] make raw reading work. apparently there was a crash in the reader plugin, but main bro did not notice but waited for eternity for it do to something. --- src/input/Manager.cc | 18 ++++++++---------- src/input/readers/Raw.cc | 9 +++++++-- .../scripts/base/frameworks/input/raw.bro | 9 ++------- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 44b5bf44db..af82b676c6 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -220,11 +220,8 @@ bool Manager::CreateStream(Filter* info, RecordVal* description) info->name = name; info->source = source; - -#ifdef DEBUG - DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", - name.c_str()); -#endif + DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", + name.c_str()); return true; @@ -334,6 +331,10 @@ bool Manager::CreateEventStream(RecordVal* fval) { filter->reader->Init(filter->source, filter->mode, filter->num_fields, logf ); readers[filter->reader] = filter; + + DBG_LOG(DBG_INPUT, "Successfully created event stream %s", + filter->name.c_str()); + return true; } @@ -482,12 +483,9 @@ bool Manager::CreateTableStream(RecordVal* fval) { filter->reader->Init(filter->source, filter->mode, fieldsV.size(), fields ); readers[filter->reader] = filter; - -#ifdef DEBUG - DBG_LOG(DBG_INPUT, "Successfully created table stream %s", - filter->name.c_str()); -#endif + DBG_LOG(DBG_INPUT, "Successfully created table stream %s", + filter->name.c_str()); return true; } diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 27415b525f..a83314c491 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -63,6 +63,9 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con Error(Fmt("Init: cannot open %s", fname.c_str())); return false; } + + num_fields = arg_num_fields; + fields = arg_fields; if ( arg_num_fields != 1 ) { Error("Filter for raw reader contains more than one field. Filters for the raw reader may only contain exactly one string field. Filter ignored."); @@ -74,8 +77,9 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con return false; } - num_fields = arg_num_fields; - fields = arg_fields; +#ifdef DEBUG + Debug(DBG_INPUT, "Raw reader created, will perform first update"); +#endif switch ( mode ) { case MANUAL: @@ -87,6 +91,7 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con assert(false); } + return true; } diff --git a/testing/btest/scripts/base/frameworks/input/raw.bro b/testing/btest/scripts/base/frameworks/input/raw.bro index 5f196648b6..4de5c3450e 100644 --- a/testing/btest/scripts/base/frameworks/input/raw.bro +++ b/testing/btest/scripts/base/frameworks/input/raw.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro -b %INPUT >out # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -16,10 +16,6 @@ sdf module A; -export { - redef enum Input::ID += { INPUT }; -} - type Val: record { s: string; }; @@ -30,6 +26,5 @@ event line(tpe: Input::Event, s: string) { event bro_init() { - Input::create_stream(A::INPUT, [$source="input.log", $reader=Input::READER_RAW, $mode=Input::STREAM]); - Input::add_eventfilter(A::INPUT, [$name="input", $fields=Val, $ev=line]); + Input::add_event([$source="input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); } From 842f635695d67f027ff2911aa41272ec9ceabf71 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 16 Mar 2012 08:10:28 -0700 Subject: [PATCH 140/651] give EventDescripion field back to events --- src/input/Manager.cc | 25 ++++++++++++++----- .../scripts/base/frameworks/input/raw.bro | 3 ++- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index af82b676c6..7ac1de92c2 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -41,11 +41,14 @@ public: EnumVal* type; ReaderFrontend* reader; + RecordVal* description; + virtual ~Filter(); }; Manager::Filter::~Filter() { Unref(type); + Unref(description); delete(reader); } @@ -219,6 +222,8 @@ bool Manager::CreateStream(Filter* info, RecordVal* description) info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault info->name = name; info->source = source; + Ref(description); + info->description = description; DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", name.c_str()); @@ -274,27 +279,33 @@ bool Manager::CreateEventStream(RecordVal* fval) { reporter->Error("events first attribute must be of type Input::Event"); return false; } + + if ( ! same_type((*args)[1], BifType::Record::Input::EventDescription, 0) ) + { + reporter->Error("events second attribute must be of type Input::EventDescription"); + return false; + } if ( want_record->InternalInt() == 0 ) { - if ( args->length() != fields->NumFields() + 1 ) { - reporter->Error("events has wrong number of arguments"); + if ( args->length() != fields->NumFields() + 2 ) { + reporter->Error("event has wrong number of arguments"); return false; } for ( int i = 0; i < fields->NumFields(); i++ ) { - if ( !same_type((*args)[i+1], fields->FieldType(i) ) ) { + if ( !same_type((*args)[i+2], fields->FieldType(i) ) ) { reporter->Error("Incompatible type for event"); return false; } } } else if ( want_record->InternalInt() == 1 ) { - if ( args->length() != 2 ) { - reporter->Error("events has wrong number of arguments"); + if ( args->length() != 3 ) { + reporter->Error("event has wrong number of arguments"); return false; } - if ( !same_type((*args)[1], fields ) ) { + if ( !same_type((*args)[2], fields ) ) { reporter->Error("Incompatible type for event"); return false; } @@ -965,6 +976,8 @@ int Manager::SendEventFilterEvent(Filter* i, EnumVal* type, const Value* const * // no tracking, send everything with a new event... //out_vals.push_back(new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event)); out_vals.push_back(type); + Ref(filter->description); + out_vals.push_back(filter->description); int position = 0; if ( filter->want_record ) { diff --git a/testing/btest/scripts/base/frameworks/input/raw.bro b/testing/btest/scripts/base/frameworks/input/raw.bro index 4de5c3450e..0399eb301d 100644 --- a/testing/btest/scripts/base/frameworks/input/raw.bro +++ b/testing/btest/scripts/base/frameworks/input/raw.bro @@ -20,7 +20,8 @@ type Val: record { s: string; }; -event line(tpe: Input::Event, s: string) { +event line(tpe: Input::Event, description: Input::EventDescription, s: string) { + print description; print s; } From e59aed6ce35afc89bdf0d8114a0a2a330dee83ec Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 16 Mar 2012 08:31:19 -0700 Subject: [PATCH 141/651] for seth - reverse order of event arguments --- src/input/Manager.cc | 12 ++++++------ testing/btest/scripts/base/frameworks/input/raw.bro | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 7ac1de92c2..d0db846769 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -274,15 +274,15 @@ bool Manager::CreateEventStream(RecordVal* fval) { return false; } - if ( ! same_type((*args)[0], BifType::Enum::Input::Event, 0) ) + if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) { - reporter->Error("events first attribute must be of type Input::Event"); + reporter->Error("events second attribute must be of type Input::Event"); return false; } - if ( ! same_type((*args)[1], BifType::Record::Input::EventDescription, 0) ) + if ( ! same_type((*args)[0], BifType::Record::Input::EventDescription, 0) ) { - reporter->Error("events second attribute must be of type Input::EventDescription"); + reporter->Error("events first attribute must be of type Input::EventDescription"); return false; } @@ -973,11 +973,11 @@ int Manager::SendEventFilterEvent(Filter* i, EnumVal* type, const Value* const * Val *val; list out_vals; + Ref(filter->description); + out_vals.push_back(filter->description); // no tracking, send everything with a new event... //out_vals.push_back(new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event)); out_vals.push_back(type); - Ref(filter->description); - out_vals.push_back(filter->description); int position = 0; if ( filter->want_record ) { diff --git a/testing/btest/scripts/base/frameworks/input/raw.bro b/testing/btest/scripts/base/frameworks/input/raw.bro index 0399eb301d..6b9fb8ef96 100644 --- a/testing/btest/scripts/base/frameworks/input/raw.bro +++ b/testing/btest/scripts/base/frameworks/input/raw.bro @@ -20,7 +20,7 @@ type Val: record { s: string; }; -event line(tpe: Input::Event, description: Input::EventDescription, s: string) { +event line(description: Input::EventDescription, tpe: Input::Event, s: string) { print description; print s; } From 89a3bb33c8003bd0be1369e756eef31af23cb18e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 8 Mar 2012 20:22:39 -0800 Subject: [PATCH 142/651] Don't assert during shutdown. --- src/RemoteSerializer.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 56e27c2104..f29e907790 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -2561,7 +2561,9 @@ bool RemoteSerializer::SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, st if ( ! peer->logs_requested ) return false; - assert(peer->log_buffer); + if ( ! peer->log_buffer ) + // Peer shutting down. + return false; // Serialize the log record entry. From e3f5cbb670a0a59ab80ed58224f23cb053e4cc9f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 16 Mar 2012 09:11:31 -0700 Subject: [PATCH 143/651] Small fixes and tweaks. - Fixing tiny leak. - Fixing threads stat output. --- src/logging/Manager.cc | 5 ++--- src/threading/Manager.cc | 30 +++++++++++++++--------------- src/threading/MsgThread.cc | 4 ++-- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 4e97351e57..74220ecde4 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -677,11 +677,11 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) Val* path_arg; if ( filter->path_val ) - path_arg = filter->path_val; + path_arg = filter->path_val->Ref(); else path_arg = new StringVal(""); - vl.append(path_arg->Ref()); + vl.append(path_arg); Val* rec_arg; BroType* rt = filter->path_func->FType()->Args()->FieldType("rec"); @@ -715,7 +715,6 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) if ( ! filter->path_val ) { - Unref(path_arg); filter->path = v->AsString()->CheckString(); filter->path_val = v->Ref(); } diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index f5770e2dd8..43eb0313f4 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -102,25 +102,25 @@ void Manager::Process() next_beat = 0; } - if ( ! t->HasOut() ) - continue; - - Message* msg = t->RetrieveOut(); - - if ( msg->Process() ) + while ( t->HasOut() ) { - if ( network_time ) - did_process = true; + Message* msg = t->RetrieveOut(); + + if ( msg->Process() ) + { + if ( network_time ) + did_process = true; + } + + else + { + string s = msg->Name() + " failed, terminating thread"; + reporter->Error("%s", s.c_str()); + t->Stop(); } - else - { - string s = msg->Name() + " failed, terminating thread"; - reporter->Error("%s", s.c_str()); - t->Stop(); + delete msg; } - - delete msg; } // fprintf(stderr, "P %.6f %.6f do_beat=%d did_process=%d next_next=%.6f\n", network_time, timer_mgr->Time(), do_beat, (int)did_process, next_beat); diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index b7782b9a05..145e16c57b 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -281,7 +281,7 @@ void MsgThread::GetStats(Stats* stats) { stats->sent_in = cnt_sent_in; stats->sent_out = cnt_sent_out; - stats->pending_in = cnt_sent_in - queue_in.Size(); - stats->pending_out = cnt_sent_out - queue_out.Size(); + stats->pending_in = queue_in.Size(); + stats->pending_out = queue_out.Size(); } From 29f56b4986bfe3b4fff59458fe0aacfef572c1e6 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 16 Mar 2012 23:43:13 -0700 Subject: [PATCH 144/651] continue finetuning of interface + adjust tests. streaming + re-reading do not seem to work completely correctly + there are still some strange random crashes. --- .../scripts.base.frameworks.input.event/out | 49 ++++++++ .../scripts.base.frameworks.input.raw/out | 56 ++++++++++ .../out | 105 ++++++++++++++++++ .../scripts/base/frameworks/input/basic.bro | 2 +- .../scripts/base/frameworks/input/event.bro | 13 +-- .../frameworks/input/onecolumn-norecord.bro | 13 +-- .../frameworks/input/onecolumn-record.bro | 11 +- .../scripts/base/frameworks/input/port.bro | 15 +-- .../frameworks/input/predicate-stream.bro | 11 +- .../base/frameworks/input/predicate.bro | 12 +- .../scripts/base/frameworks/input/raw.bro | 2 + .../scripts/base/frameworks/input/reread.bro | 15 +-- .../scripts/base/frameworks/input/stream.bro | 12 +- .../base/frameworks/input/tableevent.bro | 15 +-- .../base/frameworks/input/twofilters.bro | 6 +- 15 files changed, 251 insertions(+), 86 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.event/out b/testing/btest/Baseline/scripts.base.frameworks.input.event/out index e32a2aea00..59070cd88e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.event/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.event/out @@ -1,21 +1,70 @@ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::i; +print A::b; +}] Input::EVENT_NEW 1 T +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::i; +print A::b; +}] Input::EVENT_NEW 2 T +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::i; +print A::b; +}] Input::EVENT_NEW 3 F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::i; +print A::b; +}] Input::EVENT_NEW 4 F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::i; +print A::b; +}] Input::EVENT_NEW 5 F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::i; +print A::b; +}] Input::EVENT_NEW 6 F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::i; +print A::b; +}] Input::EVENT_NEW 7 T diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out index 2059013c5d..34a5599dc9 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out @@ -1,8 +1,64 @@ +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW q3r3057fdf +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW sdfs\d +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW dfsdf +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW sdf +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out index 54048a86b8..56b36a1a0e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out @@ -1,21 +1,126 @@ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[2] = T, +[4] = F, +[6] = F, +[7] = T, +[1] = T, +[5] = F, +[3] = F +}, idx=, val=, want_record=F, ev=line +{ +print description; +print tpe; +print left; +print right; +}, pred=] Input::EVENT_NEW [i=1] T +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[2] = T, +[4] = F, +[6] = F, +[7] = T, +[1] = T, +[5] = F, +[3] = F +}, idx=, val=, want_record=F, ev=line +{ +print description; +print tpe; +print left; +print right; +}, pred=] Input::EVENT_NEW [i=2] T +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[2] = T, +[4] = F, +[6] = F, +[7] = T, +[1] = T, +[5] = F, +[3] = F +}, idx=, val=, want_record=F, ev=line +{ +print description; +print tpe; +print left; +print right; +}, pred=] Input::EVENT_NEW [i=3] F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[2] = T, +[4] = F, +[6] = F, +[7] = T, +[1] = T, +[5] = F, +[3] = F +}, idx=, val=, want_record=F, ev=line +{ +print description; +print tpe; +print left; +print right; +}, pred=] Input::EVENT_NEW [i=4] F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[2] = T, +[4] = F, +[6] = F, +[7] = T, +[1] = T, +[5] = F, +[3] = F +}, idx=, val=, want_record=F, ev=line +{ +print description; +print tpe; +print left; +print right; +}, pred=] Input::EVENT_NEW [i=5] F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[2] = T, +[4] = F, +[6] = F, +[7] = T, +[1] = T, +[5] = F, +[3] = F +}, idx=, val=, want_record=F, ev=line +{ +print description; +print tpe; +print left; +print right; +}, pred=] Input::EVENT_NEW [i=6] F +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[2] = T, +[4] = F, +[6] = F, +[7] = T, +[1] = T, +[5] = F, +[3] = F +}, idx=, val=, want_record=F, ev=line +{ +print description; +print tpe; +print left; +print right; +}, pred=] Input::EVENT_NEW [i=7] T diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 827b1ce283..8d4028a12e 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -45,6 +45,6 @@ event bro_init() Input::remove("ssh"); } -event Input::update_finished(id: string) { +event Input::update_finished(name: string, source:string) { print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro index 41eba1613c..dca75334d0 100644 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro -b %INPUT >out # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -19,16 +19,13 @@ module A; -export { - redef enum Input::ID += { INPUT }; -} - type Val: record { i: int; b: bool; }; -event line(tpe: Input::Event, i: int, b: bool) { +event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) { + print description; print tpe; print i; print b; @@ -36,6 +33,6 @@ event line(tpe: Input::Event, i: int, b: bool) { event bro_init() { - Input::create_stream(A::INPUT, [$source="input.log"]); - Input::add_eventfilter(A::INPUT, [$name="input", $fields=Val, $ev=line]); + Input::add_event([$source="input.log", $name="input", $fields=Val, $ev=line]); + Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro index bcbba05a3e..d6c81cb2db 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro -b %INPUT >out # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -14,10 +14,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -30,12 +26,11 @@ global servers: table[int] of Val = table(); event bro_init() { - # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log"]); - Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); + Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); + Input::remove("input"); } -event Input::update_finished(id: Input::ID) { +event Input::update_finished(name: string, source: string) { print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro index 1c532ba6a9..ca1e956f35 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -14,10 +14,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -30,12 +26,11 @@ global servers: table[int] of Val = table(); event bro_init() { - # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log"]); - Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers]); + Input::add_table([$name="input", $source="input.log", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("input"); } -event Input::update_finished(id: Input::ID) { +event Input::update_finished(name: string, source: string) { print servers; } diff --git a/testing/btest/scripts/base/frameworks/input/port.bro b/testing/btest/scripts/base/frameworks/input/port.bro index 801d6bac3f..88e86eb5dc 100644 --- a/testing/btest/scripts/base/frameworks/input/port.bro +++ b/testing/btest/scripts/base/frameworks/input/port.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro -b %INPUT >out # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -13,10 +13,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: addr; }; @@ -29,17 +25,14 @@ global servers: table[addr] of Val = table(); event bro_init() { - # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log"]); - Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers]); + Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers]); print servers[1.2.3.4]; print servers[1.2.3.5]; print servers[1.2.3.6]; - Input::remove_tablefilter(A::INPUT, "input"); - Input::remove_stream(A::INPUT); + Input::remove("input"); } -event Input::update_finished(id: Input::ID) { +event Input::update_finished(name: string, source: string) { print servers[1.2.3.4]; print servers[1.2.3.5]; print servers[1.2.3.6]; diff --git a/testing/btest/scripts/base/frameworks/input/predicate-stream.bro b/testing/btest/scripts/base/frameworks/input/predicate-stream.bro index f08aaef998..20c69131cb 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate-stream.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate-stream.bro @@ -23,10 +23,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -38,7 +34,7 @@ type Val: record { global servers: table[int] of Val = table(); global ct: int; -event line(tpe: Input::Event, left: Idx, right: bool) { +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) { ct = ct + 1; if ( ct < 3 ) { return; @@ -75,9 +71,10 @@ event bro_init() { ct = 0; # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log", $mode=Input::STREAM]); - Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $ev=line, + Input::add_table([$source="input.log", $mode=Input::STREAM, $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $ev=line, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); + Input::remove("input"); + } diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro index 009911e6a8..278ac7418e 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro -b %INPUT >out # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -20,10 +20,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -37,13 +33,13 @@ global servers: table[int] of Val = table(); event bro_init() { # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log"]); - Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, + Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); + Input::remove("input"); } -event Input::update_finished(id: Input::ID) { +event Input::update_finished(name: string, source: string) { if ( 1 in servers ) { print "VALID"; } diff --git a/testing/btest/scripts/base/frameworks/input/raw.bro b/testing/btest/scripts/base/frameworks/input/raw.bro index 6b9fb8ef96..8ec6c12a78 100644 --- a/testing/btest/scripts/base/frameworks/input/raw.bro +++ b/testing/btest/scripts/base/frameworks/input/raw.bro @@ -22,10 +22,12 @@ type Val: record { event line(description: Input::EventDescription, tpe: Input::Event, s: string) { print description; + print tpe; print s; } event bro_init() { Input::add_event([$source="input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); + Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro index 742d68605b..0930cdcb34 100644 --- a/testing/btest/scripts/base/frameworks/input/reread.bro +++ b/testing/btest/scripts/base/frameworks/input/reread.bro @@ -62,10 +62,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -94,8 +90,9 @@ global outfile: file; global try: count; -event line(tpe: Input::Event, left: Idx, right: Val) { +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { print outfile, "============EVENT============"; + #print outfile, description; print outfile, tpe; print outfile, left; print outfile, right; @@ -106,8 +103,7 @@ event bro_init() outfile = open ("../out"); try = 0; # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="../input.log", $mode=Input::REREAD]); - Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, $pred(typ: Input::Event, left: Idx, right: Val) = { print outfile, "============PREDICATE============"; print outfile, typ; @@ -119,7 +115,7 @@ event bro_init() } -event Input::update_finished(id: Input::ID) { +event Input::update_finished(name: string, source: string) { print outfile, "==========SERVERS============"; print outfile, servers; @@ -127,7 +123,6 @@ event Input::update_finished(id: Input::ID) { if ( try == 5 ) { print outfile, "done"; close(outfile); - Input::remove_tablefilter(A::INPUT, "ssh"); - Input::remove_stream(A::INPUT); + Input::remove("input"); } } diff --git a/testing/btest/scripts/base/frameworks/input/stream.bro b/testing/btest/scripts/base/frameworks/input/stream.bro index db368074aa..571a2273c1 100644 --- a/testing/btest/scripts/base/frameworks/input/stream.bro +++ b/testing/btest/scripts/base/frameworks/input/stream.bro @@ -28,10 +28,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -60,7 +56,7 @@ global outfile: file; global try: count; -event line(tpe: Input::Event, left: Idx, right: Val) { +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { print outfile, "============EVENT============"; print outfile, tpe; print outfile, left; @@ -73,8 +69,7 @@ event line(tpe: Input::Event, left: Idx, right: Val) { if ( try == 3 ) { print outfile, "done"; close(outfile); - Input::remove_tablefilter(A::INPUT, "ssh"); - Input::remove_stream(A::INPUT); + Input::remove("input"); } } @@ -83,7 +78,6 @@ event bro_init() outfile = open ("../out"); try = 0; # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="../input.log", $mode=Input::STREAM]); - Input::add_tablefilter(A::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); + Input::add_table([$source="../input.log", $mode=Input::STREAM, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); } diff --git a/testing/btest/scripts/base/frameworks/input/tableevent.bro b/testing/btest/scripts/base/frameworks/input/tableevent.bro index 0c86ac94b8..e40485dd12 100644 --- a/testing/btest/scripts/base/frameworks/input/tableevent.bro +++ b/testing/btest/scripts/base/frameworks/input/tableevent.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro -b %INPUT >out # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -18,12 +18,6 @@ redef InputAscii::empty_field = "EMPTY"; -module A; - -export { - redef enum Log::ID += { LOG }; -} - type Idx: record { i: int; }; @@ -34,7 +28,8 @@ type Val: record { global destination: table[int] of Val = table(); -event line(tpe: Input::Event, left: Idx, right: bool) { +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) { + print description; print tpe; print left; print right; @@ -42,6 +37,6 @@ event line(tpe: Input::Event, left: Idx, right: bool) { event bro_init() { - Input::create_stream(A::LOG, [$source="input.log"]); - Input::add_tablefilter(A::LOG, [$name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); + Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); + Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/twofilters.bro b/testing/btest/scripts/base/frameworks/input/twofilters.bro index 260f73e58f..5e94aafba9 100644 --- a/testing/btest/scripts/base/frameworks/input/twofilters.bro +++ b/testing/btest/scripts/base/frameworks/input/twofilters.bro @@ -20,10 +20,6 @@ redef InputAscii::empty_field = "EMPTY"; module A; -export { - redef enum Input::ID += { INPUT }; -} - type Idx: record { i: int; }; @@ -49,7 +45,7 @@ event bro_init() Input::force_update(A::INPUT); } -event Input::update_finished(id: Input::ID) { +event Input::update_finished(name: string, source: string) { if ( done == T ) { return; } From 3286d013c9d37b2fbeb9bcdbf171d83fae12f168 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 16 Mar 2012 23:45:10 -0700 Subject: [PATCH 145/651] forgot two files. --- scripts/base/frameworks/input/main.bro | 2 +- src/input/Manager.cc | 38 +++++++++++++++----------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 4f7f9983d1..e06dfae005 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -110,7 +110,7 @@ export { global force_update: function(id: string) : bool; ## Event that is called, when the update of a specific source is finished - global update_finished: event(id: string); + global update_finished: event(name: string, source:string); } @load base/input.bif diff --git a/src/input/Manager.cc b/src/input/Manager.cc index d0db846769..28b6afe63f 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -376,7 +376,7 @@ bool Manager::CreateTableStream(RecordVal* fval) { } TableVal *dst = fval->LookupWithDefault(rtype->FieldOffset("destination"))->AsTableVal(); - // check if index fields match tabla description + // check if index fields match table description { int num = idx->NumFields(); const type_list* tl = dst->Type()->AsTableType()->IndexTypes(); @@ -416,29 +416,35 @@ bool Manager::CreateTableStream(RecordVal* fval) { const type_list* args = etype->ArgTypes()->Types(); - if ( args->length() != 3 ) + if ( args->length() != 4 ) { - reporter->Error("Table event must take 3 arguments"); + reporter->Error("Table event must take 4 arguments"); return false; } - if ( ! same_type((*args)[0], BifType::Enum::Input::Event, 0) ) + if ( ! same_type((*args)[0], BifType::Record::Input::TableDescription, 0) ) { - reporter->Error("table events first attribute must be of type Input::Event"); + reporter->Error("table events first attribute must be of type Input::TableDescription"); return false; } - if ( ! same_type((*args)[1], idx) ) + if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) + { + reporter->Error("table events second attribute must be of type Input::Event"); + return false; + } + + if ( ! same_type((*args)[2], idx) ) { reporter->Error("table events index attributes do not match"); return false; } - if ( want_record->InternalInt() == 1 && ! same_type((*args)[2], val) ) + if ( want_record->InternalInt() == 1 && ! same_type((*args)[3], val) ) { reporter->Error("table events value attributes do not match"); return false; - } else if ( want_record->InternalInt() == 0 && !same_type((*args)[2], val->FieldType(0) ) ) { + } else if ( want_record->InternalInt() == 0 && !same_type((*args)[3], val->FieldType(0) ) ) { reporter->Error("table events value attribute does not match"); return false; } @@ -825,14 +831,15 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { assert ( filter->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); - SendEvent(filter->event, 3, ev, predidx, oldval); + SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); Ref(valval); if ( filter->num_val_fields == 0 ) { - SendEvent(filter->event, 3, ev, predidx); + Ref(filter->description); + SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx); } else { - SendEvent(filter->event, 3, ev, predidx, valval); + SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval); } } } @@ -936,7 +943,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { } - SendEvent(handler, 1, new BroString(i->name)); + SendEvent(handler, 2, new BroString(i->name), new BroString(i->source)); } void Manager::Put(ReaderFrontend* reader, Value* *vals) { @@ -1080,14 +1087,13 @@ int Manager::PutTable(Filter* i, const Value* const *vals) { assert ( filter->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); - SendEvent(filter->event, 3, ev, predidx, oldval); + SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - Ref(valval); if ( filter->num_val_fields == 0 ) { - SendEvent(filter->event, 3, ev, predidx); + SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx); } else { - SendEvent(filter->event, 3, ev, predidx, valval); + SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval->Ref()); } } } From bf597012f89bbcfd374e84e98a85b48d637254f6 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 18 Mar 2012 10:50:10 -0700 Subject: [PATCH 146/651] fix some stupid, not that easy to find bugs. Functionality seems to work completely again - including all tests passing. --- src/input/Manager.cc | 10 +- src/input/ReaderBackend.cc | 2 + src/input/readers/Ascii.cc | 8 +- .../scripts.base.frameworks.input.repeat/out | 160 +++++ .../scripts.base.frameworks.input.reread/out | 662 +++++++++++++----- .../out | 15 - .../scripts/base/frameworks/input/repeat.bro | 41 ++ .../scripts/base/frameworks/input/reread.bro | 6 +- .../base/frameworks/input/twofilters.bro | 100 --- 9 files changed, 690 insertions(+), 314 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.repeat/out delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out create mode 100644 testing/btest/scripts/base/frameworks/input/repeat.bro delete mode 100644 testing/btest/scripts/base/frameworks/input/twofilters.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 28b6afe63f..fb7ea6edca 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -837,7 +837,7 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { Ref(valval); if ( filter->num_val_fields == 0 ) { Ref(filter->description); - SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx); + SendEvent(filter->event, 3, filter->description->Ref(), ev, predidx); } else { SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval); } @@ -898,7 +898,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { Ref(predidx); Ref(val); - bool result = CallPred(filter->pred, 3, ev, predidx, val); + bool result = CallPred(filter->pred, 4, filter->description->Ref(), ev, predidx, val); if ( result == false ) { // Keep it. Hence - we quit and simply go to the next entry of lastDict @@ -943,7 +943,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { } - SendEvent(handler, 2, new BroString(i->name), new BroString(i->source)); + SendEvent(handler, 2, new StringVal(i->name.c_str()), new StringVal(i->source.c_str())); } void Manager::Put(ReaderFrontend* reader, Value* *vals) { @@ -1154,7 +1154,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { int startpos = 0; Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); - filterresult = CallPred(filter->pred, 3, ev, predidx, val); + filterresult = CallPred(filter->pred, 4, filter->description->Ref(), ev, predidx, val); if ( filterresult == false ) { // keep it. @@ -1170,7 +1170,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { assert(val != 0); Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - SendEvent(filter->event, 3, ev, idxval, val); + SendEvent(filter->event, 4, filter->description->Ref(), ev, idxval, val); } } diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index b33e19d297..0a6ff37dc2 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -177,6 +177,8 @@ bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, DisableFrontend(); } + disabled = !success; + return success; } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index bb59b3fc1d..a04a40e780 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -89,6 +89,9 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c mode = arg_mode; mtime = 0; + num_fields = arg_num_fields; + fields = arg_fields; + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); return false; @@ -106,9 +109,6 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c return false; } - num_fields = arg_num_fields; - fields = arg_fields; - switch ( mode ) { case MANUAL: case REREAD: @@ -480,7 +480,7 @@ bool Ascii::DoUpdate() { bool Ascii::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - + switch ( mode ) { case MANUAL: // yay, we do nothing :) diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.repeat/out b/testing/btest/Baseline/scripts.base.frameworks.input.repeat/out new file mode 100644 index 0000000000..71de0d2570 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.repeat/out @@ -0,0 +1,160 @@ +input0 +input.log +{ +[1] = T +} +input1 +input.log +{ +[1] = T +} +input2 +input.log +{ +[1] = T +} +input3 +input.log +{ +[1] = T +} +input4 +input.log +{ +[1] = T +} +input5 +input.log +{ +[1] = T +} +input6 +input.log +{ +[1] = T +} +input7 +input.log +{ +[1] = T +} +input8 +input.log +{ +[1] = T +} +input9 +input.log +{ +[1] = T +} +input10 +input.log +{ +[1] = T +} +input11 +input.log +{ +[1] = T +} +input12 +input.log +{ +[1] = T +} +input13 +input.log +{ +[1] = T +} +input14 +input.log +{ +[1] = T +} +input15 +input.log +{ +[1] = T +} +input16 +input.log +{ +[1] = T +} +input17 +input.log +{ +[1] = T +} +input18 +input.log +{ +[1] = T +} +input19 +input.log +{ +[1] = T +} +input20 +input.log +{ +[1] = T +} +input21 +input.log +{ +[1] = T +} +input22 +input.log +{ +[1] = T +} +input23 +input.log +{ +[1] = T +} +input24 +input.log +{ +[1] = T +} +input25 +input.log +{ +[1] = T +} +input26 +input.log +{ +[1] = T +} +input27 +input.log +{ +[1] = T +} +input28 +input.log +{ +[1] = T +} +input29 +input.log +{ +[1] = T +} +input30 +input.log +{ +[1] = T +} +input31 +input.log +{ +[1] = T +} diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index f244f11a73..545a1cb781 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -14,8 +14,44 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type Input::EVENT_NEW +Left [i=-42] +Right [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -59,8 +95,56 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type Input::EVENT_NEW +Left [i=-43] +Right [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -116,8 +200,56 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type Input::EVENT_CHANGED +Left [i=-43] +Right [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -173,8 +305,68 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type Input::EVENT_NEW +Left [i=-44] +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -203,8 +395,80 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type Input::EVENT_NEW +Left [i=-45] +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -233,8 +497,92 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type Input::EVENT_NEW +Left [i=-46] +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -263,8 +611,104 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-47] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type Input::EVENT_NEW +Left [i=-47] +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -293,22 +737,8 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ -Input::EVENT_NEW -[i=-48] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -==========SERVERS============ -{ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -393,10 +823,30 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -} -============PREDICATE============ -Input::EVENT_REMOVED -[i=-43] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type +Input::EVENT_NEW +Left +[i=-48] +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -408,175 +858,10 @@ AA, BB }, se={ -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_REMOVED -[i=-46] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_REMOVED -[i=-44] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_REMOVED -[i=-47] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_REMOVED -[i=-45] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_REMOVED -[i=-42] -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Input::EVENT_REMOVED -[i=-43] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Input::EVENT_REMOVED -[i=-46] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Input::EVENT_REMOVED -[i=-44] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Input::EVENT_REMOVED -[i=-47] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Input::EVENT_REMOVED -[i=-45] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Input::EVENT_REMOVED -[i=-42] -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - }, vc=[10, 20, 30], ve=[]] ==========SERVERS============ { -[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, 1, @@ -587,6 +872,5 @@ AA, BB }, se={ -}, vc=[10, 20, 30], ve=[]] -} -done +}, vc=[10, 20, 30], ve=[]], +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out b/testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out deleted file mode 100644 index 5b1ee5e983..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.input.twofilters/out +++ /dev/null @@ -1,15 +0,0 @@ -VALID -VALID -VALID -VALID -VALID -VALID -VALID -MARK -VALID -VALID -VALID -VALID -VALID -VALID -VALID diff --git a/testing/btest/scripts/base/frameworks/input/repeat.bro b/testing/btest/scripts/base/frameworks/input/repeat.bro new file mode 100644 index 0000000000..58ce9a1675 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/repeat.bro @@ -0,0 +1,41 @@ +# +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global destination: table[int] of Val = table(); + +const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; + +event bro_init() +{ + for ( i in one_to_32 ) { + Input::add_table([$source="input.log", $name=fmt("input%d", i), $idx=Idx, $val=Val, $destination=destination, $want_record=F]); + Input::remove(fmt("input%d", i)); + } +} + +event Input::update_finished(name: string, source: string) { + print name; + print source; + print destination; +} diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro index 0930cdcb34..f33b060fe0 100644 --- a/testing/btest/scripts/base/frameworks/input/reread.bro +++ b/testing/btest/scripts/base/frameworks/input/reread.bro @@ -92,9 +92,13 @@ global try: count; event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { print outfile, "============EVENT============"; - #print outfile, description; + print outfile, "Description"; + print outfile, description; + print outfile, "Type"; print outfile, tpe; + print outfile, "Left"; print outfile, left; + print outfile, "Right"; print outfile, right; } diff --git a/testing/btest/scripts/base/frameworks/input/twofilters.bro b/testing/btest/scripts/base/frameworks/input/twofilters.bro deleted file mode 100644 index 5e94aafba9..0000000000 --- a/testing/btest/scripts/base/frameworks/input/twofilters.bro +++ /dev/null @@ -1,100 +0,0 @@ -# -# @TEST-EXEC: bro %INPUT >out -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -2 T -3 F -4 F -5 F -6 F -7 T -@TEST-END-FILE - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global destination1: table[int] of Val = table(); -global destination2: table[int] of Val = table(); - -global done: bool = F; - -event bro_init() -{ - # first read in the old stuff into the table... - Input::create_stream(A::INPUT, [$source="input.log", $autostart=F]); - Input::add_tablefilter(A::INPUT, [$name="input", $idx=Idx, $val=Val, $destination=destination1, $want_record=F, - $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } - ]); - Input::add_tablefilter(A::INPUT, [$name="input2",$idx=Idx, $val=Val, $destination=destination2]); - - Input::force_update(A::INPUT); -} - -event Input::update_finished(name: string, source: string) { - if ( done == T ) { - return; - } - - done = T; - - if ( 1 in destination1 ) { - print "VALID"; - } - if ( 2 in destination1 ) { - print "VALID"; - } - if ( !(3 in destination1) ) { - print "VALID"; - } - if ( !(4 in destination1) ) { - print "VALID"; - } - if ( !(5 in destination1) ) { - print "VALID"; - } - if ( !(6 in destination1) ) { - print "VALID"; - } - if ( 7 in destination1 ) { - print "VALID"; - } - - print "MARK"; - - if ( 2 in destination2 ) { - print "VALID"; - } - if ( 2 in destination2 ) { - print "VALID"; - } - if ( 3 in destination2 ) { - print "VALID"; - } - if ( 4 in destination2 ) { - print "VALID"; - } - if ( 5 in destination2 ) { - print "VALID"; - } - if ( 6 in destination2 ) { - print "VALID"; - } - if ( 7 in destination2 ) { - print "VALID"; - } -} From aa6026c1a7d40b9fa8bc553ad4f42a7150a1cbeb Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 18 Mar 2012 10:52:23 -0700 Subject: [PATCH 147/651] forgot to undo this - this idea did not work, because records cannot reference themselves. --- src/input/Manager.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index fb7ea6edca..d0386fbb3f 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -898,7 +898,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { Ref(predidx); Ref(val); - bool result = CallPred(filter->pred, 4, filter->description->Ref(), ev, predidx, val); + bool result = CallPred(filter->pred, 3, ev, predidx, val); if ( result == false ) { // Keep it. Hence - we quit and simply go to the next entry of lastDict @@ -1154,7 +1154,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { int startpos = 0; Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); - filterresult = CallPred(filter->pred, 4, filter->description->Ref(), ev, predidx, val); + filterresult = CallPred(filter->pred, 3, ev, predidx, val); if ( filterresult == false ) { // keep it. From 88e0cea598e5e57d87150c05e3d59989b6102fee Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 18 Mar 2012 15:31:47 -0700 Subject: [PATCH 148/651] add execute-mode support to the raw reader - allows to directly call commands and read their output. Note that fdstream.h is from boost and has a separate license: * (C) Copyright Nicolai M. Josuttis 2001. * Permission to copy, use, modify, sell and distribute this software * is granted provided this copyright notice appears in all copies. * This software is provided "as is" without express or implied * warranty, and with no claim as to its suitability for any purpose. --- src/input/fdstream.h | 184 ++++++++++++++++++ src/input/readers/Raw.cc | 63 ++++-- src/input/readers/Raw.h | 4 + src/types.bif | 1 + .../base/frameworks/input/executeraw.bro | 33 ++++ 5 files changed, 267 insertions(+), 18 deletions(-) create mode 100644 src/input/fdstream.h create mode 100644 testing/btest/scripts/base/frameworks/input/executeraw.bro diff --git a/src/input/fdstream.h b/src/input/fdstream.h new file mode 100644 index 0000000000..585e03d10b --- /dev/null +++ b/src/input/fdstream.h @@ -0,0 +1,184 @@ +/* The following code declares classes to read from and write to + * file descriptore or file handles. + * + * See + * http://www.josuttis.com/cppcode + * for details and the latest version. + * + * - open: + * - integrating BUFSIZ on some systems? + * - optimized reading of multiple characters + * - stream for reading AND writing + * - i18n + * + * (C) Copyright Nicolai M. Josuttis 2001. + * Permission to copy, use, modify, sell and distribute this software + * is granted provided this copyright notice appears in all copies. + * This software is provided "as is" without express or implied + * warranty, and with no claim as to its suitability for any purpose. + * + * Version: Jul 28, 2002 + * History: + * Jul 28, 2002: bugfix memcpy() => memmove() + * fdinbuf::underflow(): cast for return statements + * Aug 05, 2001: first public version + */ +#ifndef BOOST_FDSTREAM_HPP +#define BOOST_FDSTREAM_HPP + +#include +#include +#include +// for EOF: +#include +// for memmove(): +#include + + +// low-level read and write functions +#ifdef _MSC_VER +# include +#else +# include +//extern "C" { +// int write (int fd, const char* buf, int num); +// int read (int fd, char* buf, int num); +//} +#endif + + +// BEGIN namespace BOOST +namespace boost { + + +/************************************************************ + * fdostream + * - a stream that writes on a file descriptor + ************************************************************/ + + +class fdoutbuf : public std::streambuf { + protected: + int fd; // file descriptor + public: + // constructor + fdoutbuf (int _fd) : fd(_fd) { + } + protected: + // write one character + virtual int_type overflow (int_type c) { + if (c != EOF) { + char z = c; + if (write (fd, &z, 1) != 1) { + return EOF; + } + } + return c; + } + // write multiple characters + virtual + std::streamsize xsputn (const char* s, + std::streamsize num) { + return write(fd,s,num); + } +}; + +class fdostream : public std::ostream { + protected: + fdoutbuf buf; + public: + fdostream (int fd) : std::ostream(0), buf(fd) { + rdbuf(&buf); + } +}; + + +/************************************************************ + * fdistream + * - a stream that reads on a file descriptor + ************************************************************/ + +class fdinbuf : public std::streambuf { + protected: + int fd; // file descriptor + protected: + /* data buffer: + * - at most, pbSize characters in putback area plus + * - at most, bufSize characters in ordinary read buffer + */ + static const int pbSize = 4; // size of putback area + static const int bufSize = 1024; // size of the data buffer + char buffer[bufSize+pbSize]; // data buffer + + public: + /* constructor + * - initialize file descriptor + * - initialize empty data buffer + * - no putback area + * => force underflow() + */ + fdinbuf (int _fd) : fd(_fd) { + setg (buffer+pbSize, // beginning of putback area + buffer+pbSize, // read position + buffer+pbSize); // end position + } + + protected: + // insert new characters into the buffer + virtual int_type underflow () { +#ifndef _MSC_VER + using std::memmove; +#endif + + // is read position before end of buffer? + if (gptr() < egptr()) { + return traits_type::to_int_type(*gptr()); + } + + /* process size of putback area + * - use number of characters read + * - but at most size of putback area + */ + int numPutback; + numPutback = gptr() - eback(); + if (numPutback > pbSize) { + numPutback = pbSize; + } + + /* copy up to pbSize characters previously read into + * the putback area + */ + memmove (buffer+(pbSize-numPutback), gptr()-numPutback, + numPutback); + + // read at most bufSize new characters + int num; + num = read (fd, buffer+pbSize, bufSize); + if (num <= 0) { + // ERROR or EOF + return EOF; + } + + // reset buffer pointers + setg (buffer+(pbSize-numPutback), // beginning of putback area + buffer+pbSize, // read position + buffer+pbSize+num); // end of buffer + + // return next character + return traits_type::to_int_type(*gptr()); + } +}; + +class fdistream : public std::istream { + protected: + fdinbuf buf; + public: + fdistream (int fd) : std::istream(0), buf(fd) { + rdbuf(&buf); + } +}; + + +} // END namespace boost + +#endif /*BOOST_FDSTREAM_HPP*/ diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index a83314c491..777acb5951 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -7,10 +7,12 @@ #include #include "../../threading/SerialTypes.h" +#include "../fdstream.h" #define MANUAL 0 #define REREAD 1 #define STREAM 2 +#define EXECUTE 3 #include #include @@ -23,6 +25,7 @@ using threading::Field; Raw::Raw(ReaderFrontend *frontend) : ReaderBackend(frontend) { file = 0; + in = 0; //keyMap = new map(); @@ -41,9 +44,15 @@ Raw::~Raw() void Raw::DoFinish() { if ( file != 0 ) { - file->close(); - delete(file); + if ( mode != EXECUTE ) { + file->close(); + delete(file); + } else { // mode == EXECUTE + delete(in); + pclose(pfile); + } file = 0; + in = 0; } } @@ -53,15 +62,29 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con mode = arg_mode; mtime = 0; - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) && ( mode != EXECUTE ) ) { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); return false; } - file = new ifstream(path.c_str()); - if ( !file->is_open() ) { - Error(Fmt("Init: cannot open %s", fname.c_str())); - return false; + if ( mode != EXECUTE ) { + + file = new ifstream(path.c_str()); + if ( !file->is_open() ) { + Error(Fmt("Init: cannot open %s", fname.c_str())); + return false; + } + in = file; + + } else { // mode == EXECUTE + + pfile = popen(path.c_str(), "r"); + if ( pfile == NULL ) { + Error(Fmt("Could not execute command %s", path.c_str())); + return false; + } + + in = new boost::fdistream(fileno(pfile)); } num_fields = arg_num_fields; @@ -81,23 +104,14 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con Debug(DBG_INPUT, "Raw reader created, will perform first update"); #endif - switch ( mode ) { - case MANUAL: - case REREAD: - case STREAM: - DoUpdate(); - break; - default: - assert(false); - } - + DoUpdate(); return true; } bool Raw::GetLine(string& str) { - while ( getline(*file, str, separator[0]) ) { + while ( getline(*in, str, separator[0]) ) { return true; } @@ -141,6 +155,18 @@ bool Raw::DoUpdate() { return false; } + break; + case EXECUTE: + // re-execute it... + pclose(pfile); + + pfile = popen(fname.c_str(), "r"); + if ( pfile == NULL ) { + Error(Fmt("Could not execute command %s", fname.c_str())); + return false; + } + + in = new boost::fdistream(fileno(pfile)); break; default: assert(false); @@ -171,6 +197,7 @@ bool Raw::DoHeartbeat(double network_time, double current_time) switch ( mode ) { case MANUAL: + case EXECUTE: // yay, we do nothing :) break; case REREAD: diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index ace4e0ee88..55d14d956d 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -31,7 +31,11 @@ private: bool GetLine(string& str); + istream* in; ifstream* file; + + FILE* pfile; + string fname; // Options set from the script-level. diff --git a/src/types.bif b/src/types.bif index 26850bfa93..ebd206c6fa 100644 --- a/src/types.bif +++ b/src/types.bif @@ -186,6 +186,7 @@ enum Mode %{ MANUAL = 0, REREAD = 1, STREAM = 2, + EXECUTE = 3, %} module GLOBAL; diff --git a/testing/btest/scripts/base/frameworks/input/executeraw.bro b/testing/btest/scripts/base/frameworks/input/executeraw.bro new file mode 100644 index 0000000000..85c1415bf3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/executeraw.bro @@ -0,0 +1,33 @@ +# +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) { + print description; + print tpe; + print s; +} + +event bro_init() +{ + Input::add_event([$source="wc input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $mode=Input::EXECUTE]); + Input::remove("input"); +} From 667487cec927bcdba148f5a58d5dbc86f723e48f Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 19 Mar 2012 11:26:31 -0500 Subject: [PATCH 149/651] Adapt FreeBSD's inet_ntop implementation for internal use. So we get consistent text representations of IPv6 addresses across platforms. --- src/CMakeLists.txt | 1 + src/IPAddr.cc | 5 +- src/RemoteSerializer.cc | 3 +- src/bro_inet_ntop.c | 189 ++++++++++++++++++ src/bro_inet_ntop.h | 18 ++ .../Baseline/language.ipv6-literals/output | 2 + testing/btest/language/ipv6-literals.bro | 2 + 7 files changed, 217 insertions(+), 3 deletions(-) create mode 100644 src/bro_inet_ntop.c create mode 100644 src/bro_inet_ntop.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d51211f0d1..785001b920 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -402,6 +402,7 @@ set(bro_SRCS XDR.cc ZIP.cc bsd-getopt-long.c + bro_inet_ntop.c cq.c md5.c patricia.c diff --git a/src/IPAddr.cc b/src/IPAddr.cc index ff124025f9..52c3f9b35c 100644 --- a/src/IPAddr.cc +++ b/src/IPAddr.cc @@ -6,6 +6,7 @@ #include "Reporter.h" #include "Conn.h" #include "DPM.h" +#include "bro_inet_ntop.h" const uint8_t IPAddr::v4_mapped_prefix[12] = { 0, 0, 0, 0, 0, 0, 0, 0, @@ -159,7 +160,7 @@ string IPAddr::AsString() const { char s[INET_ADDRSTRLEN]; - if ( inet_ntop(AF_INET, &in6.s6_addr[12], s, INET_ADDRSTRLEN) == NULL ) + if ( ! bro_inet_ntop(AF_INET, &in6.s6_addr[12], s, INET_ADDRSTRLEN) ) return " +#include +#include + +#include +#include +#include + +#include +#include +#include + +/*% + * WARNING: Don't even consider trying to compile this on a system where + * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. + */ + +static const char *bro_inet_ntop4(const u_char *src, char *dst, socklen_t size); +static const char *bro_inet_ntop6(const u_char *src, char *dst, socklen_t size); + +/* char * + * bro_inet_ntop(af, src, dst, size) + * convert a network format address to presentation format. + * return: + * pointer to presentation format address (`dst'), or NULL (see errno). + * author: + * Paul Vixie, 1996. + */ +const char * +bro_inet_ntop(int af, const void * __restrict src, char * __restrict dst, + socklen_t size) +{ + switch (af) { + case AF_INET: + return (bro_inet_ntop4(src, dst, size)); + case AF_INET6: + return (bro_inet_ntop6(src, dst, size)); + default: + errno = EAFNOSUPPORT; + return (NULL); + } + /* NOTREACHED */ +} + +/* const char * + * bro_inet_ntop4(src, dst, size) + * format an IPv4 address + * return: + * `dst' (as a const) + * notes: + * (1) uses no statics + * (2) takes a u_char* not an in_addr as input + * author: + * Paul Vixie, 1996. Modified by Jon Siwek, 2012, to replace strlcpy + */ +static const char * +bro_inet_ntop4(const u_char *src, char *dst, socklen_t size) +{ + static const char fmt[] = "%u.%u.%u.%u"; + char tmp[sizeof "255.255.255.255"]; + int l; + + l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]); + if (l <= 0 || (socklen_t) l >= size) { + errno = ENOSPC; + return (NULL); + } + strncpy(dst, tmp, size - 1); + dst[size - 1] = 0; + return (dst); +} + +/* const char * + * bro_inet_ntop6(src, dst, size) + * convert IPv6 binary address into presentation (printable) format + * author: + * Paul Vixie, 1996. Modified by Jon Siwek, 2012, for IPv4-translated format + */ +static const char * +bro_inet_ntop6(const u_char *src, char *dst, socklen_t size) +{ + /* + * Note that int32_t and int16_t need only be "at least" large enough + * to contain a value of the specified size. On some systems, like + * Crays, there is no such thing as an integer variable with 16 bits. + * Keep this in mind if you think this function should have been coded + * to use pointer overlays. All the world's not a VAX. + */ + char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; + struct { int base, len; } best, cur; + u_int words[NS_IN6ADDRSZ / NS_INT16SZ]; + int i; + + /* + * Preprocess: + * Copy the input (bytewise) array into a wordwise array. + * Find the longest run of 0x00's in src[] for :: shorthanding. + */ + memset(words, '\0', sizeof words); + for (i = 0; i < NS_IN6ADDRSZ; i++) + words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); + best.base = -1; + best.len = 0; + cur.base = -1; + cur.len = 0; + for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { + if (words[i] == 0) { + if (cur.base == -1) + cur.base = i, cur.len = 1; + else + cur.len++; + } else { + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + cur.base = -1; + } + } + } + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + } + if (best.base != -1 && best.len < 2) + best.base = -1; + + /* + * Format the result. + */ + tp = tmp; + for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { + /* Are we inside the best run of 0x00's? */ + if (best.base != -1 && i >= best.base && + i < (best.base + best.len)) { + if (i == best.base) + *tp++ = ':'; + continue; + } + /* Are we following an initial run of 0x00s or any real hex? */ + if (i != 0) + *tp++ = ':'; + /* Is this address an encapsulated IPv4? */ + if (i == 6 && best.base == 0 && (best.len == 6 || + (best.len == 7 && words[7] != 0x0001) || + (best.len == 5 && words[5] == 0xffff) || + (best.len == 4 && words[4] == 0xffff && words[5] == 0))) { + if (!bro_inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp))) + return (NULL); + tp += strlen(tp); + break; + } + tp += sprintf(tp, "%x", words[i]); + } + /* Was it a trailing run of 0x00's? */ + if (best.base != -1 && (best.base + best.len) == + (NS_IN6ADDRSZ / NS_INT16SZ)) + *tp++ = ':'; + *tp++ = '\0'; + + /* + * Check for overflow, copy, and we're done. + */ + if ((socklen_t)(tp - tmp) > size) { + errno = ENOSPC; + return (NULL); + } + strcpy(dst, tmp); + return (dst); +} diff --git a/src/bro_inet_ntop.h b/src/bro_inet_ntop.h new file mode 100644 index 0000000000..00326b092e --- /dev/null +++ b/src/bro_inet_ntop.h @@ -0,0 +1,18 @@ +#ifndef BRO_INET_NTOP_H +#define BRO_INET_NTOP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +const char * +bro_inet_ntop(int af, const void * __restrict src, char * __restrict dst, + socklen_t size); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/testing/btest/Baseline/language.ipv6-literals/output b/testing/btest/Baseline/language.ipv6-literals/output index f2b9a985f0..8542af7f91 100644 --- a/testing/btest/Baseline/language.ipv6-literals/output +++ b/testing/btest/Baseline/language.ipv6-literals/output @@ -15,8 +15,10 @@ aaaa::ffff 192.168.1.100 ffff::c0a8:164 ::192.168.1.100 +::ffff:0:192.168.1.100 805b:2d9d:dc28::fc57:d4c8:1fff aaaa::bbbb aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222 aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222 aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222 +aaaa:bbbb:cccc:dddd:eeee::2222 diff --git a/testing/btest/language/ipv6-literals.bro b/testing/btest/language/ipv6-literals.bro index 6f1f9d59fb..004d104c6e 100644 --- a/testing/btest/language/ipv6-literals.bro +++ b/testing/btest/language/ipv6-literals.bro @@ -20,11 +20,13 @@ v[|v|] = [aaaa:0::ffff]; v[|v|] = [::ffff:192.168.1.100]; v[|v|] = [ffff::192.168.1.100]; v[|v|] = [::192.168.1.100]; +v[|v|] = [::ffff:0:192.168.1.100]; v[|v|] = [805B:2D9D:DC28::FC57:212.200.31.255]; v[|v|] = [0xaaaa::bbbb]; v[|v|] = [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]; v[|v|] = [aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222]; v[|v|] = [aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222]; +v[|v|] = [aaaa:bbbb:cccc:dddd:eeee:0:0:2222]; for (i in v) print v[i]; From 08e1771682da9d90a1deeae3918f3f2960b38772 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 20 Mar 2012 12:07:37 -0700 Subject: [PATCH 150/651] update to execute raw. support reading from commands by adppending | to the filename. support streaming reads from command. Fix something to make rearead work better. (magically happened) --- src/input/fdstream.h | 5 + src/input/readers/Raw.cc | 202 ++-- src/input/readers/Raw.h | 7 +- src/types.bif | 1 - .../out | 145 +++ .../scripts.base.frameworks.input.reread/out | 966 +++++++++++++----- .../out | 128 +++ .../out | 120 +++ .../base/frameworks/input/executeraw.bro | 2 +- .../frameworks/input/executestreamraw.bro | 58 ++ .../base/frameworks/input/rereadraw.bro | 34 + .../base/frameworks/input/streamraw.bro | 56 + 12 files changed, 1364 insertions(+), 360 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out create mode 100644 testing/btest/scripts/base/frameworks/input/executestreamraw.bro create mode 100644 testing/btest/scripts/base/frameworks/input/rereadraw.bro create mode 100644 testing/btest/scripts/base/frameworks/input/streamraw.bro diff --git a/src/input/fdstream.h b/src/input/fdstream.h index 585e03d10b..cda767dd52 100644 --- a/src/input/fdstream.h +++ b/src/input/fdstream.h @@ -35,10 +35,12 @@ #include + // low-level read and write functions #ifdef _MSC_VER # include #else +# include # include //extern "C" { // int write (int fd, const char* buf, int num); @@ -154,6 +156,9 @@ class fdinbuf : public std::streambuf { // read at most bufSize new characters int num; num = read (fd, buffer+pbSize, bufSize); + if ( num == EAGAIN ) { + return 0; + } if (num <= 0) { // ERROR or EOF return EOF; diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 777acb5951..fb9243e713 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -12,11 +12,11 @@ #define MANUAL 0 #define REREAD 1 #define STREAM 2 -#define EXECUTE 3 #include #include #include +#include using namespace input::reader; using threading::Value; @@ -44,52 +44,73 @@ Raw::~Raw() void Raw::DoFinish() { if ( file != 0 ) { - if ( mode != EXECUTE ) { - file->close(); - delete(file); - } else { // mode == EXECUTE - delete(in); - pclose(pfile); - } - file = 0; - in = 0; + Close(); } } +bool Raw::Open() +{ + if ( execute ) { + file = popen(fname.c_str(), "r"); + if ( file == NULL ) { + Error(Fmt("Could not execute command %s", fname.c_str())); + return false; + } + } else { + file = fopen(fname.c_str(), "r"); + if ( file == NULL ) { + Error(Fmt("Init: cannot open %s", fname.c_str())); + return false; + } + } + + in = new boost::fdistream(fileno(file)); + + if ( execute && mode == STREAM ) { + fcntl(fileno(file), F_SETFL, O_NONBLOCK); + } + + return true; +} + +bool Raw::Close() +{ + if ( file == NULL ) { + InternalError(Fmt("Trying to close closed file for stream %s", fname.c_str())); + return false; + } + + if ( execute ) { + delete(in); + pclose(file); + } else { + delete(in); + fclose(file); + } + + in = NULL; + file = NULL; + + return true; +} + bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) { fname = path; mode = arg_mode; mtime = 0; - - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) && ( mode != EXECUTE ) ) { - Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); - return false; - } + execute = false; + firstrun = true; + bool result; - if ( mode != EXECUTE ) { - - file = new ifstream(path.c_str()); - if ( !file->is_open() ) { - Error(Fmt("Init: cannot open %s", fname.c_str())); - return false; - } - in = file; - - } else { // mode == EXECUTE - - pfile = popen(path.c_str(), "r"); - if ( pfile == NULL ) { - Error(Fmt("Could not execute command %s", path.c_str())); - return false; - } - - in = new boost::fdistream(fileno(pfile)); - } - num_fields = arg_num_fields; fields = arg_fields; + if ( path.length() == 0 ) { + Error("No source path provided"); + return false; + } + if ( arg_num_fields != 1 ) { Error("Filter for raw reader contains more than one field. Filters for the raw reader may only contain exactly one string field. Filter ignored."); return false; @@ -100,12 +121,45 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con return false; } + // do Initialization + char last = path[path.length()-1]; + if ( last == '|' ) { + execute = true; + fname = path.substr(0, fname.length() - 1); + + if ( ( mode != MANUAL ) && ( mode != STREAM ) ) { + Error(Fmt("Unsupported read mode %d for source %s in execution mode", mode, fname.c_str())); + return false; + } + + result = Open(); + + } else { + execute = false; + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + Error(Fmt("Unsupported read mode %d for source %s", mode, fname.c_str())); + return false; + } + + result = Open(); + + } + + if ( result == false ) { + return result; + } + + #ifdef DEBUG Debug(DBG_INPUT, "Raw reader created, will perform first update"); #endif + // after initialization - do update DoUpdate(); +#ifdef DEBUG + Debug(DBG_INPUT, "First update went through"); +#endif return true; } @@ -121,56 +175,45 @@ bool Raw::GetLine(string& str) { // read the entire file and send appropriate thingies back to InputMgr bool Raw::DoUpdate() { - switch ( mode ) { - case REREAD: - // check if the file has changed - struct stat sb; - if ( stat(fname.c_str(), &sb) == -1 ) { - Error(Fmt("Could not get stat for %s", fname.c_str())); - return false; - } + if ( firstrun ) { + firstrun = false; + } else { + switch ( mode ) { + case REREAD: + // check if the file has changed + struct stat sb; + if ( stat(fname.c_str(), &sb) == -1 ) { + Error(Fmt("Could not get stat for %s", fname.c_str())); + return false; + } - if ( sb.st_mtime <= mtime ) { - // no change - return true; - } + if ( sb.st_mtime <= mtime ) { + // no change + return true; + } - mtime = sb.st_mtime; - // file changed. reread. + mtime = sb.st_mtime; + // file changed. reread. - // fallthrough - case MANUAL: - case STREAM: - - if ( file && file->is_open() ) { - if ( mode == STREAM ) { - file->clear(); // remove end of file evil bits + // fallthrough + case MANUAL: + case STREAM: + Debug(DBG_INPUT, "Updating"); + if ( mode == STREAM && file != NULL && in != NULL ) { + fpurge(file); + in->clear(); // remove end of file evil bits break; } - file->close(); - } - file = new ifstream(fname.c_str()); - if ( !file->is_open() ) { - Error(Fmt("cannot open %s", fname.c_str())); - return false; - } - break; - case EXECUTE: - // re-execute it... - pclose(pfile); - - pfile = popen(fname.c_str(), "r"); - if ( pfile == NULL ) { - Error(Fmt("Could not execute command %s", fname.c_str())); - return false; - } - - in = new boost::fdistream(fileno(pfile)); - break; - default: - assert(false); + Close(); + if ( !Open() ) { + return false; + } + break; + default: + assert(false); + } } string line; @@ -195,9 +238,10 @@ bool Raw::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); + Debug(DBG_INPUT, "Heartbeat"); + switch ( mode ) { case MANUAL: - case EXECUTE: // yay, we do nothing :) break; case REREAD: diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index 55d14d956d..1cbeff4f83 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -28,13 +28,14 @@ protected: private: virtual bool DoHeartbeat(double network_time, double current_time); + bool Open(); + bool Close(); bool GetLine(string& str); istream* in; - ifstream* file; - FILE* pfile; + FILE* file; string fname; @@ -45,6 +46,8 @@ private: string headerline; int mode; + bool execute; + bool firstrun; time_t mtime; diff --git a/src/types.bif b/src/types.bif index ebd206c6fa..26850bfa93 100644 --- a/src/types.bif +++ b/src/types.bif @@ -186,7 +186,6 @@ enum Mode %{ MANUAL = 0, REREAD = 1, STREAM = 2, - EXECUTE = 3, %} module GLOBAL; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out new file mode 100644 index 0000000000..06e28de441 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out @@ -0,0 +1,145 @@ +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +q3r3057fdf +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +sdfs\d +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW + +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +dfsdf +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +sdf +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +3rw43wRRERLlL#RWERERERE. +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +A::try = A::try + 1; +if (9 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW + +done diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index 545a1cb781..46a30f387f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -303,81 +303,6 @@ AA, BB }, se={ -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ -[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}] -Type -Input::EVENT_NEW -Left -[i=-44] -Right -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - }, vc=[10, 20, 30], ve=[]] ============PREDICATE============ Input::EVENT_NEW @@ -393,93 +318,6 @@ AA, BB }, se={ -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ -[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}] -Type -Input::EVENT_NEW -Left -[i=-45] -Right -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - }, vc=[10, 20, 30], ve=[]] ============PREDICATE============ Input::EVENT_NEW @@ -495,105 +333,6 @@ AA, BB }, se={ -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ -[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}] -Type -Input::EVENT_NEW -Left -[i=-46] -Right -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - }, vc=[10, 20, 30], ve=[]] ============PREDICATE============ Input::EVENT_NEW @@ -609,6 +348,21 @@ AA, BB }, se={ +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_NEW +[i=-48] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description @@ -636,6 +390,387 @@ AA, BB }, se={ +}, vc=[10, 20, 30], ve=[]], +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-47] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type +Input::EVENT_NEW +Left +[i=-44] +Right +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-47] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type +Input::EVENT_NEW +Left +[i=-45] +Right +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-47] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type +Input::EVENT_NEW +Left +[i=-46] +Right +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + }, vc=[10, 20, 30], ve=[]], [-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, @@ -720,21 +855,6 @@ AA, BB }, se={ -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_NEW -[i=-48] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description @@ -873,4 +993,296 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]], -[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, \ No newline at end of file +[-46] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-47] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-45] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +============PREDICATE============ +Input::EVENT_REMOVED +[i=-43] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-46] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-44] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-47] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-45] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +Input::EVENT_REMOVED +Type +[i=-43] +Left +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +Right +============EVENT============ +Description +Input::EVENT_REMOVED +Type +[i=-46] +Left +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +Right +============EVENT============ +Description +Input::EVENT_REMOVED +Type +[i=-44] +Left +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +Right +============EVENT============ +Description +Input::EVENT_REMOVED +Type +[i=-47] +Left +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +Right +============EVENT============ +Description +Input::EVENT_REMOVED +Type +[i=-45] +Left +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +Right +============EVENT============ +Description +Input::EVENT_REMOVED +Type +[i=-42] +Left +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +Right +==========SERVERS============ +{ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +done diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out new file mode 100644 index 0000000000..d85c8f2e83 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out @@ -0,0 +1,128 @@ +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +q3r3057fdf +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +sdfs\d +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW + +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +dfsdf +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +sdf +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +3rw43wRRERLlL#RWERERERE. +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +q3r3057fdf +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +sdfs\d +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW + +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +dfsdf +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +sdf +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::description; +print A::tpe; +print A::s; +}] +Input::EVENT_NEW +3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out new file mode 100644 index 0000000000..937acf428e --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out @@ -0,0 +1,120 @@ +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +q3r3057fdf +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +sdfs\d +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW + +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +dfsdf +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +sdf +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +{ +print A::outfile, A::description; +print A::outfile, A::tpe; +print A::outfile, A::s; +if (3 == A::try) +{ +print A::outfile, done; +close(A::outfile); +Input::remove(input); +} + +}] +Input::EVENT_NEW +3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/scripts/base/frameworks/input/executeraw.bro b/testing/btest/scripts/base/frameworks/input/executeraw.bro index 85c1415bf3..6fceebf885 100644 --- a/testing/btest/scripts/base/frameworks/input/executeraw.bro +++ b/testing/btest/scripts/base/frameworks/input/executeraw.bro @@ -28,6 +28,6 @@ event line(description: Input::EventDescription, tpe: Input::Event, s: string) { event bro_init() { - Input::add_event([$source="wc input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $mode=Input::EXECUTE]); + Input::add_event([$source="wc input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line]); Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/executestreamraw.bro b/testing/btest/scripts/base/frameworks/input/executestreamraw.bro new file mode 100644 index 0000000000..d97a7b26a0 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/executestreamraw.bro @@ -0,0 +1,58 @@ +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cat input2.log >> input.log +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cat input3.log >> input.log +# @TEST-EXEC: btest-bg-wait -k 3 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +@TEST-END-FILE + +@TEST-START-FILE input2.log +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +@TEST-END-FILE + +@TEST-START-FILE input3.log +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. + +@TEST-END-FILE + +@load frameworks/communication/listen + +module A; + +type Val: record { + s: string; +}; + +global try: count; +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) { + print outfile, description; + print outfile, tpe; + print outfile, s; + try = try + 1; + + if ( try == 9 ) { + print outfile, "done"; + close(outfile); + Input::remove("input"); + } +} + +event bro_init() +{ + outfile = open ("../out"); + try = 0; + Input::add_event([$source="tail -f ../input.log |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); +} diff --git a/testing/btest/scripts/base/frameworks/input/rereadraw.bro b/testing/btest/scripts/base/frameworks/input/rereadraw.bro new file mode 100644 index 0000000000..33361ad27e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/rereadraw.bro @@ -0,0 +1,34 @@ +# +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) { + print description; + print tpe; + print s; +} + +event bro_init() +{ + Input::add_event([$source="input.log", $reader=Input::READER_RAW, $mode=Input::REREAD, $name="input", $fields=Val, $ev=line]); + Input::force_update("input"); + Input::remove("input"); +} diff --git a/testing/btest/scripts/base/frameworks/input/streamraw.bro b/testing/btest/scripts/base/frameworks/input/streamraw.bro new file mode 100644 index 0000000000..cc0afd5ae8 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/streamraw.bro @@ -0,0 +1,56 @@ +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cat input2.log >> input.log +# @TEST-EXEC: sleep 3 +# @TEST-EXEC: cat input3.log >> input.log +# @TEST-EXEC: btest-bg-wait -k 3 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +@TEST-END-FILE + +@TEST-START-FILE input2.log +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +@TEST-END-FILE + +@TEST-START-FILE input3.log +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + +@load frameworks/communication/listen + +module A; + +type Val: record { + s: string; +}; + +global try: count; +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) { + print outfile, description; + print outfile, tpe; + print outfile, s; + + if ( try == 3 ) { + print outfile, "done"; + close(outfile); + Input::remove("input"); + } +} + +event bro_init() +{ + outfile = open ("../out"); + try = 0; + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); +} From 1c1d6570395432b9bfef8fb9ab1c27f02491f754 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 20 Mar 2012 15:38:37 -0500 Subject: [PATCH 151/651] Changes to IPv6 ext. header parsing (addresses #795). In response to feedback from Robin: - rename "ip_hdr" to "ip4_hdr" - pkt_hdr$ip6 is now of type "ip6_hdr" instead of "ip6_hdr_chain" - "ip6_hdr_chain" no longer contains an "ip6_hdr" field, instead it's the other way around, "ip6_hdr" contains an "ip6_hdr_chain" - other internal refactoring --- scripts/base/init-bare.bro | 108 ++++-- src/Frag.cc | 18 +- src/IP.cc | 345 +++++++++--------- src/IP.h | 190 +++++----- src/PacketSort.cc | 5 +- src/Sessions.cc | 16 + testing/btest/Baseline/core.ipv6-frag/output | 10 +- testing/btest/Baseline/core.ipv6_esp/output | 240 ++++++------ .../Baseline/core.ipv6_ext_headers/output | 2 +- .../btest/bifs/routing0_data_to_addrs.test | 6 +- 10 files changed, 491 insertions(+), 449 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 98da9f331d..42215839c0 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -934,6 +934,10 @@ const ICMP_UNREACH_ADMIN_PROHIB = 13; ##< Adminstratively prohibited. # discarders. # todo::these should go into an enum to make them autodoc'able const IPPROTO_IP = 0; ##< Dummy for IP. [Robin] Rename to IPPROTO_IP4? +# [Jon] I'd say leave it be or remove it because from +# IPPROTO_IPV4 can actually be the same as IPPROTO_IPIP (4)... +# IPPROTO_IP seems to be just for use with the socket API and not +# actually identifying protocol numbers in packet headers const IPPROTO_ICMP = 1; ##< Control message protocol. const IPPROTO_IGMP = 2; ##< Group management protocol. const IPPROTO_IPIP = 4; ##< IP encapsulation in IP. @@ -944,6 +948,13 @@ const IPPROTO_RAW = 255; ##< Raw IP packet. # Definitions for IPv6 extension headers. # [Robin] Do we need a constant for unknown extensions? +# [Jon] I don't think so, these constants are just conveniences to improve +# script readability, but they also identify the actual assigned protocol +# number of the header type. If the core were to actually pass to the +# script-layer a next-header value of something we don't know about yet, +# that value would be the actual value seen in the packet, not something +# we should make up. We could provide a "KNOWN_PROTOCOLS" set for +# convenience that one could check membership against. const IPPROTO_HOPOPTS = 0; ##< IPv6 hop-by-hop-options header. const IPPROTO_ROUTING = 43; ##< IPv6 routing header. const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. @@ -952,20 +963,6 @@ const IPPROTO_AH = 51; ##< IPv6 authentication header. const IPPROTO_NONE = 59; ##< IPv6 no next header. const IPPROTO_DSTOPTS = 60; ##< IPv6 destination options header. -## Values extracted from an IPv6 header. -## -## .. bro:see:: pkt_hdr ip_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts ip6_routing -## ip6_fragment ip6_ah ip6_esp -type ip6_hdr: record { - class: count; ##< Traffic class. - flow: count; ##< Flow label. - len: count; ##< Payload length. - nxt: count; ##< Next header (RFC 1700 assigned number). # [Robin] That's just the IPPROTO_* constant right. Then we should refer to them. - hlim: count; ##< Hop limit. - src: addr; ##< Source address. - dst: addr; ##< Destination address. -}; - ## Values extracted from an IPv6 extension header's (e.g. hop-by-hop or ## destination option headers) option field. ## @@ -978,9 +975,10 @@ type ip6_option: record { ## Values extracted from an IPv6 Hop-by-Hop options extension header. ## -## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain ip6_option +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain ip6_option type ip6_hopopts: record { - ## Next header (RFC 1700 assigned number). + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :bro:id:`IPPROTO_ICMP`. nxt: count; ## Length of header in 8-octet units, excluding first unit. len: count; @@ -990,9 +988,10 @@ type ip6_hopopts: record { ## Values extracted from an IPv6 Destination options extension header. ## -## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain ip6_option +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain ip6_option type ip6_dstopts: record { - ## Next header (RFC 1700 assigned number). + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :bro:id:`IPPROTO_ICMP`. nxt: count; ## Length of header in 8-octet units, excluding first unit. len: count; @@ -1002,9 +1001,10 @@ type ip6_dstopts: record { ## Values extracted from an IPv6 Routing extension header. ## -## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain type ip6_routing: record { - ## Next header (RFC 1700 assigned number). + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :bro:id:`IPPROTO_ICMP`. nxt: count; ## Length of header in 8-octet units, excluding first unit. len: count; @@ -1018,9 +1018,10 @@ type ip6_routing: record { ## Values extracted from an IPv6 Fragment extension header. ## -## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain type ip6_fragment: record { - ## Next header (RFC 1700 assigned number). + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :bro:id:`IPPROTO_ICMP`. nxt: count; ## 8-bit reserved field. rsv1: count; @@ -1036,9 +1037,10 @@ type ip6_fragment: record { ## Values extracted from an IPv6 Authentication extension header. ## -## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain type ip6_ah: record { - ## Next header (RFC 1700 assigned number). # [Robin] Same as above. + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :bro:id:`IPPROTO_ICMP`. nxt: count; ## Length of header in 4-octet units, excluding first two units. len: count; @@ -1054,7 +1056,7 @@ type ip6_ah: record { ## Values extracted from an IPv6 ESP extension header. ## -## .. bro:see:: pkt_hdr ip_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain type ip6_esp: record { ## Security Parameters Index. spi: count; @@ -1064,20 +1066,24 @@ type ip6_esp: record { ## An IPv6 header chain. ## -## .. bro:see:: pkt_hdr ip_hdr -# -# [Robin] How about turning ip6_hdr_chain and ip6_hdr around, making the latter -# the top-level record that then contains an ip6_hdr_chain instance. That way, the -# pkt_hdr record would have ip4_hdr and ip6_hdr members, which seems more natural. +## .. bro:see:: pkt_hdr ip4_hdr # # [Robin] What happens to unknown extension headers? We should keep them too so that # one can at least identify what one can't analyze. +# [Jon] Currently, they show up as "unknown_protocol" weirds and those packets +# are skipped before any "new_packet" or "ipv6_ext_headers" events are +# raised as those depend on a connection parameter which can't be +# created since we can't parse past unknown extension headers to get +# at the upper layer protocol. Does that seem reasonable for at +# being able to identify things that couldn't be analyzed? type ip6_hdr_chain: record { # [Robin] This looses the order of the headers (partially at least, even with ext_order I believe). # Not sure how to do it differently, but can order be important for us? + # [Jon] I do think order can be interesting as RFC 2460 specifies some + # ordering constraints, and I think I provide enough info in this + # record for one to reconstruct the order. Reread my new comments + # for the "ext_order" field below and see if you change your mind. - ## The main IPv6 header. - hdr: ip6_hdr; ## Hop-by-hop option extension header. hopopts: vector of ip6_hopopts; ## Destination option extension headers. @@ -1091,17 +1097,39 @@ type ip6_hdr_chain: record { ## Encapsulating security payload headers. esp: vector of ip6_esp; - ## Order of extension headers identified by RFC 1700 assigned numbers. - # [Robin] I don't understand how this works. + ## Order of extension headers as seen in the packet header. + ## The value at an index indicates the protocol number (RFC 1700 et seq., + ## IANA assigned number) of the header at that same position in the chain. + ## e.g. if :bro:id:`IPPROTO_DSTOPTS` is at index 0 and index 2 and + ## :bro:id:`IPPROTO_ROUTING` is at index 1, then the order of the headers + ## in the chain is the header at index 0 of *dstopts* followed by + ## the header at index 0 of *routing* and then the header at index 1 of + ## *dstopts* (tracking of duplicate header types to know where to + ## index into each vector would be up to the script following the chain). ext_order: vector of count; }; +## Values extracted from an IPv6 header. +## +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts +## ip6_routing ip6_fragment ip6_ah ip6_esp +type ip6_hdr: record { + class: count; ##< Traffic class. + flow: count; ##< Flow label. + len: count; ##< Payload length. + nxt: count; ##< Protocol number of the next header + ##< (RFC 1700 et seq., IANA assigned number), e.g. + ##< :bro:id:`IPPROTO_ICMP`. + hlim: count; ##< Hop limit. + src: addr; ##< Source address. + dst: addr; ##< Destination address. + exts: ip6_hdr_chain;##< Extension header chain. +}; + ## Values extracted from an IPv4 header. ## ## .. bro:see:: pkt_hdr ip6_hdr discarder_check_ip -## -# [Robin] Rename to ip4_hdr? -type ip_hdr: record { +type ip4_hdr: record { hl: count; ##< Header length in bytes. tos: count; ##< Type of service. len: count; ##< Total length. @@ -1159,9 +1187,11 @@ type icmp_hdr: record { # # [Robin] Add flags saying whether it's v4/v6, tcp/udp/icmp? The day will come where # we can't infer that from the connection anymore (tunnels). +# [Jon] I'm not sure what you mean, doesn't checking result of ?$ operator +# always work for finding out protocols involved? type pkt_hdr: record { - ip: ip_hdr &optional; ##< The IPv4 header if an IPv4 packet. - ip6: ip6_hdr_chain &optional; ##< The IPv6 header chain if an IPv6 packet. + ip: ip4_hdr &optional; ##< The IPv4 header if an IPv4 packet. + ip6: ip6_hdr &optional; ##< The IPv6 header if an IPv6 packet. tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet. udp: udp_hdr &optional; ##< The UDP header if a UDP packet. icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. diff --git a/src/Frag.cc b/src/Frag.cc index 68c5c108f1..5fcad35560 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -34,10 +34,13 @@ FragReassembler::FragReassembler(NetSessions* arg_s, key = k; // [Robin] Can't we merge these two cases now? + // [Jon] I think we'll always have to check v4 versus v6 to get the correct + // proto_hdr_len unless IP_Hdr::HdrLen itself makes a special case for + // IPv6 fragments (but that seems more confusing to me) const struct ip* ip4 = ip->IP4_Hdr(); if ( ip4 ) { - proto_hdr_len = ip4->ip_hl * 4; // [Robin] HdrLen? + proto_hdr_len = ip->HdrLen(); proto_hdr = new u_char[64]; // max IP header + slop // Don't do a structure copy - need to pick up options, too. memcpy((void*) proto_hdr, (const void*) ip4, proto_hdr_len); @@ -247,12 +250,7 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) reassembled_pkt = new IP_Hdr(reassem4, true); } - // [Robin] Please always check for IP version explicitly, like here - // do "if ... ip_v == 6", and then catch other values via - // weird/errors. Even of it shouldn't happen (because of earlier - // checks), it's better to be safe. I believe there are more places - // like this elsewhere, please check. - else + else if ( ((const struct ip*)pkt_start)->ip_v == 6 ) { struct ip6_hdr* reassem6 = (struct ip6_hdr*) pkt_start; reassem6->ip6_plen = htons(frag_size + proto_hdr_len - 40); @@ -260,6 +258,12 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) reassembled_pkt = new IP_Hdr(reassem6, true, chain); } + else + { + reporter->InternalError("bad IP version in fragment reassembly"); + } + + DeleteTimer(); } diff --git a/src/IP.cc b/src/IP.cc index 77797ece8f..9dcb372d3f 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -5,7 +5,7 @@ #include "Val.h" #include "Var.h" -static RecordType* ip_hdr_type = 0; +static RecordType* ip4_hdr_type = 0; static RecordType* ip6_hdr_type = 0; static RecordType* ip6_hdr_chain_type = 0; static RecordType* ip6_option_type = 0; @@ -22,20 +22,6 @@ static inline RecordType* hdrType(RecordType*& type, const char* name) return type; } -RecordVal* IPv6_Hdr::BuildRecordVal() const - { - RecordVal* rv = new RecordVal(hdrType(ip6_hdr_type, "ip6_hdr")); - const struct ip6_hdr* ip6 = (const struct ip6_hdr*)data; - rv->Assign(0, new Val((ntohl(ip6->ip6_flow) & 0x0ff00000)>>20, TYPE_COUNT)); - rv->Assign(1, new Val(ntohl(ip6->ip6_flow) & 0x000fffff, TYPE_COUNT)); - rv->Assign(2, new Val(ntohs(ip6->ip6_plen), TYPE_COUNT)); - rv->Assign(3, new Val(ip6->ip6_nxt, TYPE_COUNT)); - rv->Assign(4, new Val(ip6->ip6_hlim, TYPE_COUNT)); - rv->Assign(5, new AddrVal(ip6->ip6_src)); - rv->Assign(6, new AddrVal(ip6->ip6_dst)); - return rv; - } - static VectorVal* BuildOptionsVal(const u_char* data, uint16 len) { VectorVal* vv = new VectorVal(new VectorType( @@ -71,73 +57,100 @@ static VectorVal* BuildOptionsVal(const u_char* data, uint16 len) return vv; } -RecordVal* IPv6_HopOpts::BuildRecordVal() const +RecordVal* IPv6_Hdr::BuildRecordVal() const { - RecordVal* rv = new RecordVal(hdrType(ip6_hopopts_type, "ip6_hopopts")); - const struct ip6_hbh* hbh = (const struct ip6_hbh*)data; - rv->Assign(0, new Val(hbh->ip6h_nxt, TYPE_COUNT)); - rv->Assign(1, new Val(hbh->ip6h_len, TYPE_COUNT)); - uint16 off = 2 * sizeof(uint8); - rv->Assign(2, BuildOptionsVal(data + off, Length() - off)); - return rv; + RecordVal* rv = 0; + + switch ( type ) { + case IPPROTO_IPV6: + { + rv = new RecordVal(hdrType(ip6_hdr_type, "ip6_hdr")); + const struct ip6_hdr* ip6 = (const struct ip6_hdr*)data; + rv->Assign(0, new Val((ntohl(ip6->ip6_flow) & 0x0ff00000)>>20, TYPE_COUNT)); + rv->Assign(1, new Val(ntohl(ip6->ip6_flow) & 0x000fffff, TYPE_COUNT)); + rv->Assign(2, new Val(ntohs(ip6->ip6_plen), TYPE_COUNT)); + rv->Assign(3, new Val(ip6->ip6_nxt, TYPE_COUNT)); + rv->Assign(4, new Val(ip6->ip6_hlim, TYPE_COUNT)); + rv->Assign(5, new AddrVal(ip6->ip6_src)); + rv->Assign(6, new AddrVal(ip6->ip6_dst)); + } + break; + + case IPPROTO_HOPOPTS: + { + rv = new RecordVal(hdrType(ip6_hopopts_type, "ip6_hopopts")); + const struct ip6_hbh* hbh = (const struct ip6_hbh*)data; + rv->Assign(0, new Val(hbh->ip6h_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(hbh->ip6h_len, TYPE_COUNT)); + uint16 off = 2 * sizeof(uint8); + rv->Assign(2, BuildOptionsVal(data + off, Length() - off)); + + } + break; + + case IPPROTO_DSTOPTS: + { + rv = new RecordVal(hdrType(ip6_dstopts_type, "ip6_dstopts")); + const struct ip6_dest* dst = (const struct ip6_dest*)data; + rv->Assign(0, new Val(dst->ip6d_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(dst->ip6d_len, TYPE_COUNT)); + uint16 off = 2 * sizeof(uint8); + rv->Assign(2, BuildOptionsVal(data + off, Length() - off)); + } + break; + + case IPPROTO_ROUTING: + { + rv = new RecordVal(hdrType(ip6_routing_type, "ip6_routing")); + const struct ip6_rthdr* rt = (const struct ip6_rthdr*)data; + rv->Assign(0, new Val(rt->ip6r_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(rt->ip6r_len, TYPE_COUNT)); + rv->Assign(2, new Val(rt->ip6r_type, TYPE_COUNT)); + rv->Assign(3, new Val(rt->ip6r_segleft, TYPE_COUNT)); + uint16 off = 4 * sizeof(uint8); + rv->Assign(4, new StringVal(new BroString(data + off, Length() - off, 1))); + } + break; + + case IPPROTO_FRAGMENT: + { + rv = new RecordVal(hdrType(ip6_fragment_type, "ip6_fragment")); + const struct ip6_frag* frag = (const struct ip6_frag*)data; + rv->Assign(0, new Val(frag->ip6f_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(frag->ip6f_reserved, TYPE_COUNT)); + rv->Assign(2, new Val((ntohs(frag->ip6f_offlg) & 0xfff8)>>3, TYPE_COUNT)); + rv->Assign(3, new Val((ntohs(frag->ip6f_offlg) & 0x0006)>>1, TYPE_COUNT)); + rv->Assign(4, new Val(ntohs(frag->ip6f_offlg) & 0x0001, TYPE_BOOL)); + rv->Assign(5, new Val(ntohl(frag->ip6f_ident), TYPE_COUNT)); + } + break; + + case IPPROTO_AH: + { + rv = new RecordVal(hdrType(ip6_ah_type, "ip6_ah")); + rv->Assign(0, new Val(((ip6_ext*)data)->ip6e_nxt, TYPE_COUNT)); + rv->Assign(1, new Val(((ip6_ext*)data)->ip6e_len, TYPE_COUNT)); + rv->Assign(2, new Val(ntohs(((uint16*)data)[1]), TYPE_COUNT)); + rv->Assign(3, new Val(ntohl(((uint32*)data)[1]), TYPE_COUNT)); + rv->Assign(4, new Val(ntohl(((uint32*)data)[2]), TYPE_COUNT)); + uint16 off = 3 * sizeof(uint32); + rv->Assign(5, new StringVal(new BroString(data + off, Length() - off, 1))); + } + break; + + case IPPROTO_ESP: + { + rv = new RecordVal(hdrType(ip6_esp_type, "ip6_esp")); + const uint32* esp = (const uint32*)data; + rv->Assign(0, new Val(ntohl(esp[0]), TYPE_COUNT)); + rv->Assign(1, new Val(ntohl(esp[1]), TYPE_COUNT)); + } + break; + + default: + break; } -RecordVal* IPv6_DstOpts::BuildRecordVal() const - { - RecordVal* rv = new RecordVal(hdrType(ip6_dstopts_type, "ip6_dstopts")); - const struct ip6_dest* dst = (const struct ip6_dest*)data; - rv->Assign(0, new Val(dst->ip6d_nxt, TYPE_COUNT)); - rv->Assign(1, new Val(dst->ip6d_len, TYPE_COUNT)); - uint16 off = 2 * sizeof(uint8); - rv->Assign(2, BuildOptionsVal(data + off, Length() - off)); - return rv; - } - -RecordVal* IPv6_Routing::BuildRecordVal() const - { - RecordVal* rv = new RecordVal(hdrType(ip6_routing_type, "ip6_routing")); - const struct ip6_rthdr* rt = (const struct ip6_rthdr*)data; - rv->Assign(0, new Val(rt->ip6r_nxt, TYPE_COUNT)); - rv->Assign(1, new Val(rt->ip6r_len, TYPE_COUNT)); - rv->Assign(2, new Val(rt->ip6r_type, TYPE_COUNT)); - rv->Assign(3, new Val(rt->ip6r_segleft, TYPE_COUNT)); - uint16 off = 4 * sizeof(uint8); - rv->Assign(4, new StringVal(new BroString(data + off, Length() - off, 1))); - return rv; - } - -RecordVal* IPv6_Fragment::BuildRecordVal() const - { - RecordVal* rv = new RecordVal(hdrType(ip6_fragment_type, "ip6_fragment")); - const struct ip6_frag* frag = (const struct ip6_frag*)data; - rv->Assign(0, new Val(frag->ip6f_nxt, TYPE_COUNT)); - rv->Assign(1, new Val(frag->ip6f_reserved, TYPE_COUNT)); - rv->Assign(2, new Val((ntohs(frag->ip6f_offlg) & 0xfff8)>>3, TYPE_COUNT)); - rv->Assign(3, new Val((ntohs(frag->ip6f_offlg) & 0x0006)>>1, TYPE_COUNT)); - rv->Assign(4, new Val(ntohs(frag->ip6f_offlg) & 0x0001, TYPE_BOOL)); - rv->Assign(5, new Val(ntohl(frag->ip6f_ident), TYPE_COUNT)); - return rv; - } - -RecordVal* IPv6_AH::BuildRecordVal() const - { - RecordVal* rv = new RecordVal(hdrType(ip6_ah_type, "ip6_ah")); - rv->Assign(0, new Val(((ip6_ext*)data)->ip6e_nxt, TYPE_COUNT)); - rv->Assign(1, new Val(((ip6_ext*)data)->ip6e_len, TYPE_COUNT)); - rv->Assign(2, new Val(ntohs(((uint16*)data)[1]), TYPE_COUNT)); - rv->Assign(3, new Val(ntohl(((uint32*)data)[1]), TYPE_COUNT)); - rv->Assign(4, new Val(ntohl(((uint32*)data)[2]), TYPE_COUNT)); - uint16 off = 3 * sizeof(uint32); - rv->Assign(5, new StringVal(new BroString(data + off, Length() - off, 1))); - return rv; - } - -RecordVal* IPv6_ESP::BuildRecordVal() const - { - RecordVal* rv = new RecordVal(hdrType(ip6_esp_type, "ip6_esp")); - const uint32* esp = (const uint32*)data; - rv->Assign(0, new Val(ntohl(esp[0]), TYPE_COUNT)); - rv->Assign(1, new Val(ntohl(esp[1]), TYPE_COUNT)); return rv; } @@ -145,22 +158,9 @@ RecordVal* IP_Hdr::BuildIPHdrVal() const { RecordVal* rval = 0; - if ( ! ip_hdr_type ) - { - ip_hdr_type = internal_type("ip_hdr")->AsRecordType(); - ip6_hdr_type = internal_type("ip6_hdr")->AsRecordType(); - ip6_hdr_chain_type = internal_type("ip6_hdr_chain")->AsRecordType(); - ip6_hopopts_type = internal_type("ip6_hopopts")->AsRecordType(); - ip6_dstopts_type = internal_type("ip6_dstopts")->AsRecordType(); - ip6_routing_type = internal_type("ip6_routing")->AsRecordType(); - ip6_fragment_type = internal_type("ip6_fragment")->AsRecordType(); - ip6_ah_type = internal_type("ip6_ah")->AsRecordType(); - ip6_esp_type = internal_type("ip6_esp")->AsRecordType(); - } - if ( ip4 ) { - rval = new RecordVal(ip_hdr_type); + rval = new RecordVal(hdrType(ip4_hdr_type, "ip4_hdr")); rval->Assign(0, new Val(ip4->ip_hl * 4, TYPE_COUNT)); rval->Assign(1, new Val(ip4->ip_tos, TYPE_COUNT)); rval->Assign(2, new Val(ntohs(ip4->ip_len), TYPE_COUNT)); @@ -172,55 +172,8 @@ RecordVal* IP_Hdr::BuildIPHdrVal() const } else { - rval = new RecordVal(ip6_hdr_chain_type); - - VectorVal* hopopts = new VectorVal(new VectorType(ip6_hopopts_type->Ref())); - VectorVal* dstopts = new VectorVal(new VectorType(ip6_dstopts_type->Ref())); - VectorVal* routing = new VectorVal(new VectorType(ip6_routing_type->Ref())); - VectorVal* fragment = new VectorVal(new VectorType(ip6_fragment_type->Ref())); - VectorVal* ah = new VectorVal(new VectorType(ip6_ah_type->Ref())); - VectorVal* esp = new VectorVal(new VectorType(ip6_esp_type->Ref())); - VectorVal* order = new VectorVal(new VectorType(base_type(TYPE_COUNT))); - - for ( size_t i = 1; i < ip6_hdrs->Size(); ++i ) - { - RecordVal* v = ((*ip6_hdrs)[i])->BuildRecordVal(); - uint8 type = ((*ip6_hdrs)[i])->Type(); - switch (type) { - case IPPROTO_HOPOPTS: - hopopts->Assign(hopopts->Size(), v, 0); - break; - case IPPROTO_ROUTING: - routing->Assign(routing->Size(), v, 0); - break; - case IPPROTO_DSTOPTS: - dstopts->Assign(dstopts->Size(), v, 0); - break; - case IPPROTO_FRAGMENT: - fragment->Assign(fragment->Size(), v, 0); - break; - case IPPROTO_AH: - ah->Assign(ah->Size(), v, 0); - break; - case IPPROTO_ESP: - esp->Assign(esp->Size(), v, 0); - break; - case IPPROTO_IPV6: - default: - reporter->InternalError("pkt_hdr assigned bad header %d", type); - break; - } - order->Assign(i-1, new Val(type, TYPE_COUNT), 0); - } - - rval->Assign(0, ((*ip6_hdrs)[0])->BuildRecordVal()); - rval->Assign(1, hopopts); - rval->Assign(2, dstopts); - rval->Assign(3, routing); - rval->Assign(4, fragment); - rval->Assign(5, ah); - rval->Assign(6, esp); - rval->Assign(7, order); + rval = ((*ip6_hdrs)[0])->BuildRecordVal(); + rval->Assign(7, ip6_hdrs->BuildRecordVal()); } return rval; @@ -308,34 +261,6 @@ RecordVal* IP_Hdr::BuildPktHdrVal() const return pkt_hdr; } -static inline IPv6_Hdr* getIPv6Header(uint8 type, const u_char* d, - bool set_next = false, uint16 nxt = 0) - { - switch (type) { - case IPPROTO_IPV6: - return set_next ? new IPv6_Hdr(d, nxt) : new IPv6_Hdr(d); - case IPPROTO_HOPOPTS: - return set_next ? new IPv6_HopOpts(d, nxt) : new IPv6_HopOpts(d); - case IPPROTO_ROUTING: - return set_next ? new IPv6_Routing(d, nxt) : new IPv6_Routing(d); - case IPPROTO_DSTOPTS: - return set_next ? new IPv6_DstOpts(d, nxt) : new IPv6_DstOpts(d); - case IPPROTO_FRAGMENT: - return set_next ? new IPv6_Fragment(d, nxt) : new IPv6_Fragment(d); - case IPPROTO_AH: - return set_next ? new IPv6_AH(d, nxt) : new IPv6_AH(d); - case IPPROTO_ESP: - return new IPv6_ESP(d); // never able to set ESP header's next - default: - // should never get here if calls are protected by isIPv6ExtHeader() - reporter->InternalError("Unknown IPv6 header type: %d", type); - break; - } - // can't be reached - assert(false); - return 0; - } - static inline bool isIPv6ExtHeader(uint8 type) { switch (type) { @@ -361,12 +286,86 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) do { current_type = next_type; - chain.push_back(getIPv6Header(current_type, hdrs, set_next, next)); - next_type = chain[chain.size()-1]->NextHdr(); - uint16 len = chain[chain.size()-1]->Length(); + IPv6_Hdr* p = new IPv6_Hdr(current_type, hdrs); + + next_type = p->NextHdr(); + uint16 len = p->Length(); + + if ( set_next && next_type == IPPROTO_FRAGMENT ) + { + p->ChangeNext(next); + next_type = next; + } + + chain.push_back(p); + hdrs += len; length += len; } while ( current_type != IPPROTO_FRAGMENT && current_type != IPPROTO_ESP && isIPv6ExtHeader(next_type) ); } + +RecordVal* IPv6_Hdr_Chain::BuildRecordVal() const + { + if ( ! ip6_hdr_chain_type ) + { + ip6_hdr_chain_type = internal_type("ip6_hdr_chain")->AsRecordType(); + ip6_hopopts_type = internal_type("ip6_hopopts")->AsRecordType(); + ip6_dstopts_type = internal_type("ip6_dstopts")->AsRecordType(); + ip6_routing_type = internal_type("ip6_routing")->AsRecordType(); + ip6_fragment_type = internal_type("ip6_fragment")->AsRecordType(); + ip6_ah_type = internal_type("ip6_ah")->AsRecordType(); + ip6_esp_type = internal_type("ip6_esp")->AsRecordType(); + } + + RecordVal* rval = new RecordVal(ip6_hdr_chain_type); + + VectorVal* hopopts = new VectorVal(new VectorType(ip6_hopopts_type->Ref())); + VectorVal* dstopts = new VectorVal(new VectorType(ip6_dstopts_type->Ref())); + VectorVal* routing = new VectorVal(new VectorType(ip6_routing_type->Ref())); + VectorVal* fragment = new VectorVal(new VectorType(ip6_fragment_type->Ref())); + VectorVal* ah = new VectorVal(new VectorType(ip6_ah_type->Ref())); + VectorVal* esp = new VectorVal(new VectorType(ip6_esp_type->Ref())); + VectorVal* order = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + + for ( size_t i = 1; i < chain.size(); ++i ) + { + RecordVal* v = chain[i]->BuildRecordVal(); + uint8 type = chain[i]->Type(); + switch (type) { + case IPPROTO_HOPOPTS: + hopopts->Assign(hopopts->Size(), v, 0); + break; + case IPPROTO_ROUTING: + routing->Assign(routing->Size(), v, 0); + break; + case IPPROTO_DSTOPTS: + dstopts->Assign(dstopts->Size(), v, 0); + break; + case IPPROTO_FRAGMENT: + fragment->Assign(fragment->Size(), v, 0); + break; + case IPPROTO_AH: + ah->Assign(ah->Size(), v, 0); + break; + case IPPROTO_ESP: + esp->Assign(esp->Size(), v, 0); + break; + case IPPROTO_IPV6: + default: + reporter->InternalError("pkt_hdr assigned bad header %d", type); + break; + } + order->Assign(i-1, new Val(type, TYPE_COUNT), 0); + } + + rval->Assign(0, hopopts); + rval->Assign(1, dstopts); + rval->Assign(2, routing); + rval->Assign(3, fragment); + rval->Assign(4, ah); + rval->Assign(5, esp); + rval->Assign(6, order); + return rval; + } diff --git a/src/IP.h b/src/IP.h index b876a2ac3b..5e5e3c0748 100644 --- a/src/IP.h +++ b/src/IP.h @@ -22,56 +22,94 @@ // members: we're creating/allocating those for every IPv6 packet, right? // // Any idea how to avoid these? +// +// [Jon] Seems fair enough to just remove the virtual method concern at this +// point by replacing the class hierarchy with some inline functions that +// do switch statements. I don't know what to do about the +// vector and ip6_hdrs data members being allocated for every +// IPv6 packet, maybe it's too early to try to optimize before we know +// the frequency at which extension headers appear in real IPv6 traffic? /** * Base class for IPv6 header/extensions. */ class IPv6_Hdr { public: - IPv6_Hdr() : type(0), data(0) {} - - /** - * Construct the main IPv6 header. - */ - IPv6_Hdr(const u_char* d) : type(IPPROTO_IPV6), data(d) {} - - /** - * Construct the main IPv6 header, but replace the next protocol field - * if it points to a fragment. - */ - IPv6_Hdr(const u_char* d, uint16 nxt) : type(IPPROTO_IPV6), data(d) - { - // [Robin]. This looks potentially dangerous as it's changing - // the data passed in, which the caller may not realize. From - // quick look, it's only used from Frag.cc, so that may be - // ok. But could we guard against accidental use somehome? - // Like making this protected and then declare a friend; or a - // seperate method ChangeNext(). (I saw it's used by derived - // classes so not sure wehat works best.) - if ( ((ip6_hdr*)data)->ip6_nxt == IPPROTO_FRAGMENT ) - ((ip6_hdr*)data)->ip6_nxt = nxt; - } - /** * Construct an IPv6 header or extension header from assigned type number. */ IPv6_Hdr(uint8 t, const u_char* d) : type(t), data(d) {} - virtual ~IPv6_Hdr() {} + /** + * Replace the value of the next protocol field. + */ + void ChangeNext(uint8 next_type) + { + switch ( type ) { + case IPPROTO_IPV6: + ((ip6_hdr*)data)->ip6_nxt = next_type; + break; + case IPPROTO_HOPOPTS: + case IPPROTO_DSTOPTS: + case IPPROTO_ROUTING: + case IPPROTO_FRAGMENT: + case IPPROTO_AH: + ((ip6_ext*)data)->ip6e_nxt = next_type; + break; + case IPPROTO_ESP: + default: + break; + } + } + + ~IPv6_Hdr() {} /** * Returns the assigned IPv6 extension header type number of the header * that immediately follows this one. */ - virtual uint8 NextHdr() const { return ((ip6_hdr*)data)->ip6_nxt; } + uint8 NextHdr() const + { + switch ( type ) { + case IPPROTO_IPV6: + return ((ip6_hdr*)data)->ip6_nxt; + case IPPROTO_HOPOPTS: + case IPPROTO_DSTOPTS: + case IPPROTO_ROUTING: + case IPPROTO_FRAGMENT: + case IPPROTO_AH: + return ((ip6_ext*)data)->ip6e_nxt; + case IPPROTO_ESP: + default: + return IPPROTO_NONE; + } + } /** * Returns the length of the header in bytes. */ - virtual uint16 Length() const { return 40; } + uint16 Length() const + { + switch ( type ) { + case IPPROTO_IPV6: + return 40; + case IPPROTO_HOPOPTS: + case IPPROTO_DSTOPTS: + case IPPROTO_ROUTING: + return 8 + 8 * ((ip6_ext*)data)->ip6e_len; + case IPPROTO_FRAGMENT: + return 8; + case IPPROTO_AH: + return 8 + 4 * ((ip6_ext*)data)->ip6e_len; + case IPPROTO_ESP: + return 8; //encrypted payload begins after 8 bytes + default: + return 0; + } + } /** - * Returns the RFC 1700 assigned number indicating the header type. + * Returns the RFC 1700 et seq. IANA assigned number for the header. */ uint8 Type() const { return type; } @@ -83,75 +121,13 @@ public: /** * Returns the script-layer record representation of the header. */ - virtual RecordVal* BuildRecordVal() const; + RecordVal* BuildRecordVal() const; protected: uint8 type; const u_char* data; }; -class IPv6_Ext : public IPv6_Hdr { -public: - IPv6_Ext(uint16 type, const u_char* d) : IPv6_Hdr(type, d) {} - IPv6_Ext(uint16 type, const u_char* d, uint16 nxt) : IPv6_Hdr(type, d) - { - if ( ((ip6_ext*)data)->ip6e_nxt == IPPROTO_FRAGMENT ) - ((ip6_ext*)data)->ip6e_nxt = nxt; - } - uint8 NextHdr() const { return ((ip6_ext*)data)->ip6e_nxt; } - virtual uint16 Length() const = 0; - virtual RecordVal* BuildRecordVal() const = 0; -}; - -class IPv6_HopOpts : public IPv6_Ext { -public: - IPv6_HopOpts(const u_char* d) : IPv6_Ext(IPPROTO_HOPOPTS, d) {} - IPv6_HopOpts(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_HOPOPTS, d, n) {} - uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } - RecordVal* BuildRecordVal() const; -}; - -class IPv6_DstOpts : public IPv6_Ext { -public: - IPv6_DstOpts(const u_char* d) : IPv6_Ext(IPPROTO_DSTOPTS, d) {} - IPv6_DstOpts(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_DSTOPTS, d, n) {} - uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } - RecordVal* BuildRecordVal() const; -}; - -class IPv6_Routing : public IPv6_Ext { -public: - IPv6_Routing(const u_char* d) : IPv6_Ext(IPPROTO_ROUTING, d) {} - IPv6_Routing(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_ROUTING, d, n) {} - uint16 Length() const { return 8 + 8 * ((ip6_ext*)data)->ip6e_len; } - RecordVal* BuildRecordVal() const; -}; - -class IPv6_Fragment : public IPv6_Ext { -public: - IPv6_Fragment(const u_char* d) : IPv6_Ext(IPPROTO_FRAGMENT, d) {} - IPv6_Fragment(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_FRAGMENT, d, n) - {} - uint16 Length() const { return 8; } - RecordVal* BuildRecordVal() const; -}; - -class IPv6_AH : public IPv6_Ext { -public: - IPv6_AH(const u_char* d) : IPv6_Ext(IPPROTO_AH, d) {} - IPv6_AH(const u_char* d, uint16 n) : IPv6_Ext(IPPROTO_AH, d, n) {} - uint16 Length() const { return 8 + 4 * ((ip6_ext*)data)->ip6e_len; } - RecordVal* BuildRecordVal() const; -}; - -class IPv6_ESP : public IPv6_Ext { -public: - IPv6_ESP(const u_char* d) : IPv6_Ext(IPPROTO_ESP, d) {} - // encrypted payload begins after 8 bytes - uint16 Length() const { return 8; } - RecordVal* BuildRecordVal() const; -}; - class IPv6_Hdr_Chain { public: /** @@ -159,13 +135,6 @@ public: */ IPv6_Hdr_Chain(const struct ip6_hdr* ip6) { Init(ip6, false); } - /** - * Initializes the header chain from an IPv6 header structure, and replaces - * the first next protocol pointer field that points to a fragment header. - */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) - { Init(ip6, true, next); } - ~IPv6_Hdr_Chain() { for ( size_t i = 0; i < chain.size(); ++i ) delete chain[i]; } @@ -218,7 +187,24 @@ public: { return IsFragment() ? (ntohs(GetFragHdr()->ip6f_offlg) & 0x0001) != 0 : 0; } + /** + * Returns an ip6_hdr_chain RecordVal that includes script-layer + * representation of all extension headers in the chain. + */ + RecordVal* BuildRecordVal() const; + protected: + // for access to protected ctor that changes next header values that + // point to a fragment + friend class FragReassembler; + + /** + * Initializes the header chain from an IPv6 header structure, and replaces + * the first next protocol pointer field that points to a fragment header. + */ + IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) + { Init(ip6, true, next); } + void Init(const struct ip6_hdr* ip6, bool set_next, uint16 next = 0); vector chain; @@ -237,8 +223,12 @@ public: ip6 = (const struct ip6_hdr*)p; ip6_hdrs = new IPv6_Hdr_Chain(ip6); } - else if ( arg_del ) - delete [] p; + else + { + if ( arg_del ) + delete [] p; + reporter->InternalError("bad IP version in IP_Hdr ctor"); + } } IP_Hdr(const struct ip* arg_ip4, bool arg_del) diff --git a/src/PacketSort.cc b/src/PacketSort.cc index 7bfdaba9a0..aec7639f4a 100644 --- a/src/PacketSort.cc +++ b/src/PacketSort.cc @@ -28,8 +28,11 @@ PacketSortElement::PacketSortElement(PktSrc* arg_src, const struct ip* ip = (const struct ip*) (pkt + hdr_size); if ( ip->ip_v == 4 ) ip_hdr = new IP_Hdr(ip, false); - else + else if ( ip->ip_v == 6 ) ip_hdr = new IP_Hdr((const struct ip6_hdr*) ip, false); + else + // weird will be generated later in NetSessions::NextPacket + return; if ( ip_hdr->NextProto() == IPPROTO_TCP && // Note: can't sort fragmented packets diff --git a/src/Sessions.cc b/src/Sessions.cc index 675cc240c6..9e91fdc304 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -431,6 +431,11 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; // [Robin] dump_this_packet = 1 for non-ICMP/UDP/TCP removed here. Why? + // [Jon] The default case of the "switch ( proto )" calls Weird() which + // should set dump_this_packet = 1. The old code also returned + // at this point for non-ICMP/UDP/TCP, but for IPv6 fragments + // we need to do the reassembly first before knowing for sure what + // upper-layer protocol it is. FragReassembler* f = 0; @@ -468,8 +473,12 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, caplen -= ip_hdr_len; // [Robin] Does ESP need to be the last header? + // [Jon] In terms of what we try to parse, yes, we can't go any further + // in parsing a header chain once we reach an ESP one since + // encrypted payload immediately follows. if ( ip_hdr->LastHeader() == IPPROTO_ESP ) { + dump_this_packet = 1; if ( esp_packet ) { val_list* vl = new val_list(); @@ -491,6 +500,13 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, // [Robin] The Remove(f) used to be here, while it's now before every // return statement. I'm not seeing why? + // [Jon] That Remove(f) is still here above in the CheckHeaderTrunc() + // conditional that's just a refactoring of the old code. + // The reason why it's not done unconditionally after the reassembly + // is because doing that could cause the object that ip_hdr points + // to to be freed when we still need to use that below. + // I added Remove(f)'s before other "abnormal" return points that + // looked like they'd otherwise leak the memory. const u_char* data = ip_hdr->Payload(); diff --git a/testing/btest/Baseline/core.ipv6-frag/output b/testing/btest/Baseline/core.ipv6-frag/output index 5020d94e8d..80c1a2cc93 100644 --- a/testing/btest/Baseline/core.ipv6-frag/output +++ b/testing/btest/Baseline/core.ipv6-frag/output @@ -1,5 +1,5 @@ -ip6=[hdr=[class=0, flow=0, len=81, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=51850/udp, dport=53/udp, ulen=81] -ip6=[hdr=[class=0, flow=0, len=331, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=53/udp, dport=51850/udp, ulen=331] -ip6=[hdr=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[hdr=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[hdr=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] +ip6=[class=0, flow=0, len=81, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=51850/udp, dport=53/udp, ulen=81] +ip6=[class=0, flow=0, len=331, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=53/udp, dport=51850/udp, ulen=331] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] diff --git a/testing/btest/Baseline/core.ipv6_esp/output b/testing/btest/Baseline/core.ipv6_esp/output index 645b4c8c56..db27689364 100644 --- a/testing/btest/Baseline/core.ipv6_esp/output +++ b/testing/btest/Baseline/core.ipv6_esp/output @@ -1,120 +1,120 @@ -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=1]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=2]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=3]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=4]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=5]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=6]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=7]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=8]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=9]], ext_order=[50]], tcp=, udp=, icmp=] -[ip=, ip6=[hdr=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25], hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=10]], ext_order=[50]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index 4cc9c706ae..9348cc41c8 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1 +1 @@ -[ip=, ip6=[hdr=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b], hopopts=[[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]]], dstopts=[], routing=[[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B]], fragment=[], ah=[], esp=[], ext_order=[0, 43]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] +[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[hopopts=[[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]]], dstopts=[], routing=[[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B]], fragment=[], ah=[], esp=[], ext_order=[0, 43]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/bifs/routing0_data_to_addrs.test b/testing/btest/bifs/routing0_data_to_addrs.test index f150ec2a35..eb6ebbc614 100644 --- a/testing/btest/bifs/routing0_data_to_addrs.test +++ b/testing/btest/bifs/routing0_data_to_addrs.test @@ -3,7 +3,7 @@ event ipv6_ext_headers(c: connection, p: pkt_hdr) { - for ( h in p$ip6$routing ) - if ( p$ip6$routing[h]$rtype == 0 ) - print routing0_data_to_addrs(p$ip6$routing[h]$data); + for ( h in p$ip6$exts$routing ) + if ( p$ip6$exts$routing[h]$rtype == 0 ) + print routing0_data_to_addrs(p$ip6$exts$routing[h]$data); } From d39a389201180c730d6a7d24a2f0c6426ff07fe9 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 20 Mar 2012 14:11:59 -0700 Subject: [PATCH 152/651] make optional fields possible for input framework. This do not have to be present in the input file and are marked as &optional in the record description. Those can e.g. be used to create field values on the file in a predicate while reading a file - example: Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $pred(typ: Input::Event, left: Idx, right: Val) = { right$notb = !right$b; return T; } --- src/input/Manager.cc | 10 ++++- src/input/readers/Ascii.cc | 24 ++++++++-- src/input/readers/Ascii.h | 1 + src/threading/SerialTypes.cc | 4 +- src/threading/SerialTypes.h | 5 ++- .../out | 9 ++++ .../base/frameworks/input/optional.bro | 45 +++++++++++++++++++ 7 files changed, 90 insertions(+), 8 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.optional/out create mode 100644 testing/btest/scripts/base/frameworks/input/optional.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index d0386fbb3f..f6ba6f9f49 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -492,7 +492,11 @@ bool Manager::CreateTableStream(RecordVal* fval) { Unref(pred); if ( valfields > 1 ) { - assert(filter->want_record); + if ( ! filter->want_record ) { + reporter->Error("Stream %s does not want a record (want_record=F), but has more then one value field. Aborting", filter->name.c_str()); + delete filter; + return false; + } } @@ -631,6 +635,10 @@ bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, co field->secondary_name = c->AsStringVal()->AsString()->CheckString(); } + if ( rec->FieldDecl(i)->FindAttr(ATTR_OPTIONAL ) ) { + field->optional = true; + } + fields->push_back(field); } } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index a04a40e780..20ae79ab19 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -26,6 +26,7 @@ FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int { position = arg_position; secondary_position = -1; + present = true; } FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position) @@ -33,10 +34,11 @@ FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, cons { position = arg_position; secondary_position = -1; + present = true; } FieldMapping::FieldMapping(const FieldMapping& arg) - : name(arg.name), type(arg.type), subtype(arg.subtype) + : name(arg.name), type(arg.type), subtype(arg.subtype), present(arg.present) { position = arg.position; secondary_position = arg.secondary_position; @@ -162,7 +164,15 @@ bool Ascii::ReadHeader(bool useCached) { map::iterator fit = ifields.find(field->name); if ( fit == ifields.end() ) { - Error(Fmt("Did not find requested field %s in input data file.", field->name.c_str())); + if ( field->optional ) { + // we do not really need this field. mark it as not present and always send an undef back. + FieldMapping f(field->name, field->type, field->subtype, -1); + f.present = false; + columnMap.push_back(f); + continue; + } + + Error(Fmt("Did not find requested field %s in input data file %s.", field->name.c_str(), fname.c_str())); return false; } @@ -220,7 +230,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { } else if ( s == "F" ) { val->val.int_val = 0; } else { - Error(Fmt("Invalid value for boolean: %s", s.c_str())); + Error(Fmt("Field: %s Invalid value for boolean: %s", field.name.c_str(), s.c_str())); return false; } break; @@ -423,6 +433,14 @@ bool Ascii::DoUpdate() { fit != columnMap.end(); fit++ ){ + if ( ! fit->present ) { + // add non-present field + fields[fpos] = new Value((*fit).type, false); + fpos++; + continue; + } + + assert(fit->position >= 0 ); if ( (*fit).position > pos || (*fit).secondary_position > pos ) { Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", line.c_str(), pos, (*fit).position, (*fit).secondary_position)); diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index 40f92be717..a9b14768fb 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -19,6 +19,7 @@ struct FieldMapping { int position; // for ports: pos of the second field int secondary_position; + bool present; FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position); diff --git a/src/threading/SerialTypes.cc b/src/threading/SerialTypes.cc index 78556e5271..1d7255d695 100644 --- a/src/threading/SerialTypes.cc +++ b/src/threading/SerialTypes.cc @@ -13,7 +13,7 @@ bool Field::Read(SerializationFormat* fmt) int st; bool success = (fmt->Read(&name, "name") && fmt->Read(&secondary_name, "secondary_name") && - fmt->Read(&t, "type") && fmt->Read(&st, "subtype") ); + fmt->Read(&t, "type") && fmt->Read(&st, "subtype") && fmt->Read(&optional, "optional")); type = (TypeTag) t; subtype = (TypeTag) st; @@ -23,7 +23,7 @@ bool Field::Read(SerializationFormat* fmt) bool Field::Write(SerializationFormat* fmt) const { return (fmt->Write(name, "name") && fmt->Write(secondary_name, "secondary_name") && fmt->Write((int)type, "type") && - fmt->Write((int)subtype, "subtype")); + fmt->Write((int)subtype, "subtype"), fmt->Write(optional, "optional")); } Value::~Value() diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index ac34f3e476..bee84f2b54 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -24,17 +24,18 @@ struct Field { string secondary_name; TypeTag type; //! Type of the field. TypeTag subtype; //! Inner type for sets. + bool optional; //! needed by input framework. Is the field optional or does it have to be present in the input data /** * Constructor. */ - Field() { subtype = TYPE_VOID; } + Field() { subtype = TYPE_VOID; optional = false; } /** * Copy constructor. */ Field(const Field& other) - : name(other.name), type(other.type), subtype(other.subtype) { } + : name(other.name), type(other.type), subtype(other.subtype), optional(other.optional) { } /** * Unserializes a field. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.optional/out b/testing/btest/Baseline/scripts.base.frameworks.input.optional/out new file mode 100644 index 0000000000..7a304fc918 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.optional/out @@ -0,0 +1,9 @@ +{ +[2] = [b=T, notb=F], +[4] = [b=F, notb=T], +[6] = [b=F, notb=T], +[7] = [b=T, notb=F], +[1] = [b=T, notb=F], +[5] = [b=F, notb=T], +[3] = [b=F, notb=T] +} diff --git a/testing/btest/scripts/base/frameworks/input/optional.bro b/testing/btest/scripts/base/frameworks/input/optional.bro new file mode 100644 index 0000000000..c354f7c3ab --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/optional.bro @@ -0,0 +1,45 @@ +# +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + notb: bool &optional; +}; + +global servers: table[int] of Val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { right$notb = !right$b; return T; } + ]); + Input::remove("input"); +} + +event Input::update_finished(name: string, source: string) { + print servers; +} From c765f43fe3eb6fd4cb49b2b947654881a225e145 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 21 Mar 2012 10:32:39 -0500 Subject: [PATCH 153/651] Refactor script-layer IPv6 ext. header chain (addresses #795) This replaces the "ip6_hdr_chain" in the "ip6_hdr" record with a vector of "ip6_ext_hdr" to make it easier to traverse the chain. --- scripts/base/init-bare.bro | 70 ++--- src/IP.cc | 58 ++--- src/IP.h | 6 +- testing/btest/Baseline/core.ipv6-frag/output | 10 +- testing/btest/Baseline/core.ipv6_esp/output | 240 +++++++++--------- .../Baseline/core.ipv6_ext_headers/output | 2 +- .../btest/bifs/routing0_data_to_addrs.test | 7 +- 7 files changed, 185 insertions(+), 208 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 42215839c0..b3c997a750 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1064,9 +1064,10 @@ type ip6_esp: record { seq: count; }; -## An IPv6 header chain. +## A general container for a more specific IPv6 extension header. ## -## .. bro:see:: pkt_hdr ip4_hdr +## .. bro:see:: pkt_hdr ip4_hdr ip6_hopopts ip6_dstopts ip6_routing ip6_fragment +## ip6_ah ip6_esp # # [Robin] What happens to unknown extension headers? We should keep them too so that # one can at least identify what one can't analyze. @@ -1076,37 +1077,22 @@ type ip6_esp: record { # created since we can't parse past unknown extension headers to get # at the upper layer protocol. Does that seem reasonable for at # being able to identify things that couldn't be analyzed? -type ip6_hdr_chain: record { - # [Robin] This looses the order of the headers (partially at least, even with ext_order I believe). - # Not sure how to do it differently, but can order be important for us? - # [Jon] I do think order can be interesting as RFC 2460 specifies some - # ordering constraints, and I think I provide enough info in this - # record for one to reconstruct the order. Reread my new comments - # for the "ext_order" field below and see if you change your mind. - +type ip6_ext_hdr: record { + ## The RFC 1700 et seq. IANA assigned number identifying the type of + ## the extension header. + id: count; ## Hop-by-hop option extension header. - hopopts: vector of ip6_hopopts; - ## Destination option extension headers. - dstopts: vector of ip6_dstopts; - ## Routing extension headers. - routing: vector of ip6_routing; - ## Fragment headers. - fragment: vector of ip6_fragment; - ## Authentication extension headers. - ah: vector of ip6_ah; - ## Encapsulating security payload headers. - esp: vector of ip6_esp; - - ## Order of extension headers as seen in the packet header. - ## The value at an index indicates the protocol number (RFC 1700 et seq., - ## IANA assigned number) of the header at that same position in the chain. - ## e.g. if :bro:id:`IPPROTO_DSTOPTS` is at index 0 and index 2 and - ## :bro:id:`IPPROTO_ROUTING` is at index 1, then the order of the headers - ## in the chain is the header at index 0 of *dstopts* followed by - ## the header at index 0 of *routing* and then the header at index 1 of - ## *dstopts* (tracking of duplicate header types to know where to - ## index into each vector would be up to the script following the chain). - ext_order: vector of count; + hopopts: ip6_hopopts &optional; + ## Destination option extension header. + dstopts: ip6_dstopts &optional; + ## Routing extension header. + routing: ip6_routing &optional; + ## Fragment header. + fragment: ip6_fragment &optional; + ## Authentication extension header. + ah: ip6_ah &optional; + ## Encapsulating security payload header. + esp: ip6_esp &optional; }; ## Values extracted from an IPv6 header. @@ -1114,16 +1100,16 @@ type ip6_hdr_chain: record { ## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts ## ip6_routing ip6_fragment ip6_ah ip6_esp type ip6_hdr: record { - class: count; ##< Traffic class. - flow: count; ##< Flow label. - len: count; ##< Payload length. - nxt: count; ##< Protocol number of the next header - ##< (RFC 1700 et seq., IANA assigned number), e.g. - ##< :bro:id:`IPPROTO_ICMP`. - hlim: count; ##< Hop limit. - src: addr; ##< Source address. - dst: addr; ##< Destination address. - exts: ip6_hdr_chain;##< Extension header chain. + class: count; ##< Traffic class. + flow: count; ##< Flow label. + len: count; ##< Payload length. + nxt: count; ##< Protocol number of the next header + ##< (RFC 1700 et seq., IANA assigned number) + ##< e.g. :bro:id:`IPPROTO_ICMP`. + hlim: count; ##< Hop limit. + src: addr; ##< Source address. + dst: addr; ##< Destination address. + exts: vector of ip6_ext_hdr; ##< Extension header chain. }; ## Values extracted from an IPv4 header. diff --git a/src/IP.cc b/src/IP.cc index 9dcb372d3f..d6d1df0c31 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -7,7 +7,7 @@ static RecordType* ip4_hdr_type = 0; static RecordType* ip6_hdr_type = 0; -static RecordType* ip6_hdr_chain_type = 0; +static RecordType* ip6_ext_hdr_type = 0; static RecordType* ip6_option_type = 0; static RecordType* ip6_hopopts_type = 0; static RecordType* ip6_dstopts_type = 0; @@ -57,7 +57,7 @@ static VectorVal* BuildOptionsVal(const u_char* data, uint16 len) return vv; } -RecordVal* IPv6_Hdr::BuildRecordVal() const +RecordVal* IPv6_Hdr::BuildRecordVal(VectorVal* chain) const { RecordVal* rv = 0; @@ -73,6 +73,10 @@ RecordVal* IPv6_Hdr::BuildRecordVal() const rv->Assign(4, new Val(ip6->ip6_hlim, TYPE_COUNT)); rv->Assign(5, new AddrVal(ip6->ip6_src)); rv->Assign(6, new AddrVal(ip6->ip6_dst)); + if ( ! chain ) + chain = new VectorVal(new VectorType( + hdrType(ip6_ext_hdr_type, "ip6_ext_hdr")->Ref())); + rv->Assign(7, chain); } break; @@ -172,8 +176,7 @@ RecordVal* IP_Hdr::BuildIPHdrVal() const } else { - rval = ((*ip6_hdrs)[0])->BuildRecordVal(); - rval->Assign(7, ip6_hdrs->BuildRecordVal()); + rval = ((*ip6_hdrs)[0])->BuildRecordVal(ip6_hdrs->BuildVal()); } return rval; @@ -306,11 +309,11 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) isIPv6ExtHeader(next_type) ); } -RecordVal* IPv6_Hdr_Chain::BuildRecordVal() const +VectorVal* IPv6_Hdr_Chain::BuildVal() const { - if ( ! ip6_hdr_chain_type ) + if ( ! ip6_ext_hdr_type ) { - ip6_hdr_chain_type = internal_type("ip6_hdr_chain")->AsRecordType(); + ip6_ext_hdr_type = internal_type("ip6_ext_hdr")->AsRecordType(); ip6_hopopts_type = internal_type("ip6_hopopts")->AsRecordType(); ip6_dstopts_type = internal_type("ip6_dstopts")->AsRecordType(); ip6_routing_type = internal_type("ip6_routing")->AsRecordType(); @@ -319,53 +322,40 @@ RecordVal* IPv6_Hdr_Chain::BuildRecordVal() const ip6_esp_type = internal_type("ip6_esp")->AsRecordType(); } - RecordVal* rval = new RecordVal(ip6_hdr_chain_type); - - VectorVal* hopopts = new VectorVal(new VectorType(ip6_hopopts_type->Ref())); - VectorVal* dstopts = new VectorVal(new VectorType(ip6_dstopts_type->Ref())); - VectorVal* routing = new VectorVal(new VectorType(ip6_routing_type->Ref())); - VectorVal* fragment = new VectorVal(new VectorType(ip6_fragment_type->Ref())); - VectorVal* ah = new VectorVal(new VectorType(ip6_ah_type->Ref())); - VectorVal* esp = new VectorVal(new VectorType(ip6_esp_type->Ref())); - VectorVal* order = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + VectorVal* rval = new VectorVal(new VectorType(ip6_ext_hdr_type->Ref())); for ( size_t i = 1; i < chain.size(); ++i ) { RecordVal* v = chain[i]->BuildRecordVal(); + RecordVal* ext_hdr = new RecordVal(ip6_ext_hdr_type); uint8 type = chain[i]->Type(); + ext_hdr->Assign(0, new Val(type, TYPE_COUNT)); + switch (type) { case IPPROTO_HOPOPTS: - hopopts->Assign(hopopts->Size(), v, 0); - break; - case IPPROTO_ROUTING: - routing->Assign(routing->Size(), v, 0); + ext_hdr->Assign(1, v); break; case IPPROTO_DSTOPTS: - dstopts->Assign(dstopts->Size(), v, 0); + ext_hdr->Assign(2, v); + break; + case IPPROTO_ROUTING: + ext_hdr->Assign(3, v); break; case IPPROTO_FRAGMENT: - fragment->Assign(fragment->Size(), v, 0); + ext_hdr->Assign(4, v); break; case IPPROTO_AH: - ah->Assign(ah->Size(), v, 0); + ext_hdr->Assign(5, v); break; case IPPROTO_ESP: - esp->Assign(esp->Size(), v, 0); + ext_hdr->Assign(6, v); break; - case IPPROTO_IPV6: default: - reporter->InternalError("pkt_hdr assigned bad header %d", type); + reporter->InternalError("IPv6_Hdr_Chain bad header %d", type); break; } - order->Assign(i-1, new Val(type, TYPE_COUNT), 0); + rval->Assign(rval->Size(), ext_hdr, 0); } - rval->Assign(0, hopopts); - rval->Assign(1, dstopts); - rval->Assign(2, routing); - rval->Assign(3, fragment); - rval->Assign(4, ah); - rval->Assign(5, esp); - rval->Assign(6, order); return rval; } diff --git a/src/IP.h b/src/IP.h index 5e5e3c0748..a989b04d76 100644 --- a/src/IP.h +++ b/src/IP.h @@ -121,7 +121,7 @@ public: /** * Returns the script-layer record representation of the header. */ - RecordVal* BuildRecordVal() const; + RecordVal* BuildRecordVal(VectorVal* chain = 0) const; protected: uint8 type; @@ -188,10 +188,10 @@ public: (ntohs(GetFragHdr()->ip6f_offlg) & 0x0001) != 0 : 0; } /** - * Returns an ip6_hdr_chain RecordVal that includes script-layer + * Returns a vector of ip6_ext_hdr RecordVals that includes script-layer * representation of all extension headers in the chain. */ - RecordVal* BuildRecordVal() const; + VectorVal* BuildVal() const; protected: // for access to protected ctor that changes next header values that diff --git a/testing/btest/Baseline/core.ipv6-frag/output b/testing/btest/Baseline/core.ipv6-frag/output index 80c1a2cc93..12dfc3a841 100644 --- a/testing/btest/Baseline/core.ipv6-frag/output +++ b/testing/btest/Baseline/core.ipv6-frag/output @@ -1,5 +1,5 @@ -ip6=[class=0, flow=0, len=81, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=51850/udp, dport=53/udp, ulen=81] -ip6=[class=0, flow=0, len=331, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=53/udp, dport=51850/udp, ulen=331] -ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[], ext_order=[]]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] +ip6=[class=0, flow=0, len=81, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51850/udp, dport=53/udp, ulen=81] +ip6=[class=0, flow=0, len=331, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[]], udp = [sport=53/udp, dport=51850/udp, ulen=331] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] diff --git a/testing/btest/Baseline/core.ipv6_esp/output b/testing/btest/Baseline/core.ipv6_esp/output index db27689364..97a8434e7b 100644 --- a/testing/btest/Baseline/core.ipv6_esp/output +++ b/testing/btest/Baseline/core.ipv6_esp/output @@ -1,120 +1,120 @@ -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=10, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=11, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=12, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=13, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=20, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=21, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=22, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=1]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=2]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=3]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=4]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=5]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=6]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=7]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=8]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=9]], ext_order=[50]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[hopopts=[], dstopts=[], routing=[], fragment=[], ah=[], esp=[[spi=23, seq=10]], ext_order=[50]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=10]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=1]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=2]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=3]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=4]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=5]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=6]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=7]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=8]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=9]]]], tcp=, udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=10]]]], tcp=, udp=, icmp=] diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index 9348cc41c8..a5a0caf7c6 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1 +1 @@ -[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[hopopts=[[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]]], dstopts=[], routing=[[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B]], fragment=[], ah=[], esp=[], ext_order=[0, 43]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] +[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/bifs/routing0_data_to_addrs.test b/testing/btest/bifs/routing0_data_to_addrs.test index eb6ebbc614..4bf15cae87 100644 --- a/testing/btest/bifs/routing0_data_to_addrs.test +++ b/testing/btest/bifs/routing0_data_to_addrs.test @@ -3,7 +3,8 @@ event ipv6_ext_headers(c: connection, p: pkt_hdr) { - for ( h in p$ip6$exts$routing ) - if ( p$ip6$exts$routing[h]$rtype == 0 ) - print routing0_data_to_addrs(p$ip6$exts$routing[h]$data); + for ( h in p$ip6$exts ) + if ( p$ip6$exts[h]$id == IPPROTO_ROUTING ) + if ( p$ip6$exts[h]$routing$rtype == 0 ) + print routing0_data_to_addrs(p$ip6$exts[h]$routing$data); } From 51ddc9f572a1ab8524ecc27ec5ee6de494d2fc56 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 21 Mar 2012 15:51:21 -0700 Subject: [PATCH 154/651] fix bug that crashed input framework when creating already existing stream (tried to free not yet alloccated data) + write twotables test --- src/input/Manager.cc | 48 ++- .../out | 349 ++++++++++++++++++ .../base/frameworks/input/twotables.bro | 113 ++++++ 3 files changed, 500 insertions(+), 10 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twotables/out create mode 100644 testing/btest/scripts/base/frameworks/input/twotables.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index f6ba6f9f49..f62e87d937 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -43,14 +43,24 @@ public: RecordVal* description; + Filter(); virtual ~Filter(); }; -Manager::Filter::~Filter() { - Unref(type); - Unref(description); +Manager::Filter::Filter() { + type = 0; + reader = 0; + description = 0; +} - delete(reader); +Manager::Filter::~Filter() { + if ( type ) + Unref(type); + if ( description ) + Unref(description); + + if ( reader ) + delete(reader); } class Manager::TableFilter: public Manager::Filter { @@ -85,28 +95,46 @@ public: bool want_record; EventFilter(); + ~EventFilter(); }; -Manager::TableFilter::TableFilter() { +Manager::TableFilter::TableFilter() : Manager::Filter::Filter() { filter_type = TABLE_FILTER; tab = 0; itype = 0; rtype = 0; + + currDict = 0; + lastDict = 0; + + pred = 0; } -Manager::EventFilter::EventFilter() { +Manager::EventFilter::EventFilter() : Manager::Filter::Filter() { + fields = 0; filter_type = EVENT_FILTER; } +Manager::EventFilter::~EventFilter() { + if ( fields ) { + Unref(fields); + } +} + Manager::TableFilter::~TableFilter() { - Unref(tab); - Unref(itype); + if ( tab ) + Unref(tab); + if ( itype ) + Unref(itype); if ( rtype ) // can be 0 for sets Unref(rtype); - delete currDict; - delete lastDict; + if ( currDict != 0 ) + delete currDict; + + if ( lastDict != 0 ) + delete lastDict; } struct ReaderDefinition { diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out new file mode 100644 index 0000000000..a61a4a2993 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out @@ -0,0 +1,349 @@ +============PREDICATE============ +Input::EVENT_NEW +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE 2============ +Input::EVENT_NEW +[i=-43] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type +Input::EVENT_NEW +Left +[i=-42] +Right +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +==========SERVERS============ +{ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +============EVENT============ +Description +[source=../input2.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh2, destination={ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE 2============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type +Input::EVENT_NEW +Left +[i=-43] +Right +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +==========SERVERS============ +{ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} +============PREDICATE============ +Input::EVENT_NEW +[i=-44] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}] +Type +Input::EVENT_NEW +Left +[i=-44] +Right +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +Input::EVENT_REMOVED +Type +[i=-42] +Left +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +Right +==========SERVERS============ +{ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} diff --git a/testing/btest/scripts/base/frameworks/input/twotables.bro b/testing/btest/scripts/base/frameworks/input/twotables.bro new file mode 100644 index 0000000000..6f18e0e939 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/twotables.bro @@ -0,0 +1,113 @@ +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run bro bro %INPUT +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input3.log input.log +# @TEST-EXEC: btest-bg-wait -k 2 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input2.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input3.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load frameworks/communication/listen + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { + print outfile, "============EVENT============"; + print outfile, "Description"; + print outfile, description; + print outfile, "Type"; + print outfile, tpe; + print outfile, "Left"; + print outfile, left; + print outfile, "Right"; + print outfile, right; +} + +event bro_init() +{ + outfile = open ("../out"); + try = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print outfile, "============PREDICATE============"; + print outfile, typ; + print outfile, left; + print outfile, right; + return T; + } + ]); + Input::add_table([$source="../input2.log", $mode=Input::REREAD, $name="ssh2", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print outfile, "============PREDICATE 2============"; + print outfile, typ; + print outfile, left; + print outfile, right; + return T; + } + ]); +} + + +event Input::update_finished(name: string, source: string) { + print outfile, "==========SERVERS============"; + print outfile, servers; + + try = try + 1; + if ( try == 5 ) { + print outfile, "done"; + close(outfile); + Input::remove("input"); + } +} From 0db89bed3ba6248354315f6d3f1b77ab84e65a41 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 10:33:49 -0700 Subject: [PATCH 155/651] fix crash when deleting data from source where there are no events or predicates... (that happens when all testcases are too complicated and use all features..) --- src/input/Manager.cc | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index f62e87d937..c1fa060b2b 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -915,18 +915,20 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { ListVal * idx = 0; Val *val = 0; + + Val* predidx = 0; + EnumVal* ev = 0; + int startpos = 0; if ( filter->pred || filter->event ) { idx = filter->tab->RecoverIndex(ih->idxkey); assert(idx != 0); val = filter->tab->Lookup(idx); assert(val != 0); + predidx = ListValToRecordVal(idx, filter->itype, &startpos); + ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); } - int startpos = 0; - Val* predidx = ListValToRecordVal(idx, filter->itype, &startpos); - EnumVal* ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - if ( filter->pred ) { // ask predicate, if we want to expire this element... @@ -953,8 +955,10 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { SendEvent(filter->event, 3, ev, predidx, val); } - Unref(predidx); - Unref(ev); + if ( predidx ) // if we have a filter or an event... + Unref(predidx); + if ( ev ) + Unref(ev); filter->tab->Delete(ih->idxkey); filter->lastDict->Remove(lastDictIdxKey); // deletex in next line @@ -1321,6 +1325,7 @@ RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, RecordVal* rec = new RecordVal(request_type->AsRecordType()); + assert(list != 0); int maxpos = list->Length(); for ( int i = 0; i < request_type->NumFields(); i++ ) { From 14c6c4004289e715101dcddc1a42955ac6f86e8c Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 10:59:36 -0700 Subject: [PATCH 156/651] fix crash when all value fields of imported table are uninitialized. --- .../out | 4 ++ .../base/frameworks/input/emptyvals.bro | 37 +++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.emptyvals/out create mode 100644 testing/btest/scripts/base/frameworks/input/emptyvals.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.emptyvals/out b/testing/btest/Baseline/scripts.base.frameworks.input.emptyvals/out new file mode 100644 index 0000000000..f75248cf97 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.emptyvals/out @@ -0,0 +1,4 @@ +{ +[2] = [b=], +[1] = [b=T] +} diff --git a/testing/btest/scripts/base/frameworks/input/emptyvals.bro b/testing/btest/scripts/base/frameworks/input/emptyvals.bro new file mode 100644 index 0000000000..77659d13ec --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/emptyvals.bro @@ -0,0 +1,37 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i +##types bool int +T 1 +- 2 +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global servers: table[int] of Val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::add_table([$source="input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); +} + +event Input::update_finished(name: string, source:string) { + print servers; +} From 5f5209fcfb12cf96dbb9bb027a93133657f0d9ac Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 11:00:51 -0700 Subject: [PATCH 157/651] ...forgotten file. --- src/input/Manager.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index c1fa060b2b..32b60f05f2 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -760,8 +760,13 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { hash_t valhash = 0; if ( filter->num_val_fields > 0 ) { HashKey* valhashkey = HashValues(filter->num_val_fields, vals+filter->num_idx_fields); - valhash = valhashkey->Hash(); - delete(valhashkey); + if ( valhashkey == 0 ) { + // empty line. index, but no values. + // hence we also have no hash value... + } else { + valhash = valhashkey->Hash(); + delete(valhashkey); + } } InputHash *h = filter->lastDict->Lookup(idxhash); From 7e4cbbc0735a13343062c9bc18e01f8d8d7b17fb Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 12:45:11 -0700 Subject: [PATCH 158/651] remove forgotten debug statements --- src/input/readers/Raw.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index fb9243e713..f416fbe94a 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -198,7 +198,6 @@ bool Raw::DoUpdate() { // fallthrough case MANUAL: case STREAM: - Debug(DBG_INPUT, "Updating"); if ( mode == STREAM && file != NULL && in != NULL ) { fpurge(file); in->clear(); // remove end of file evil bits @@ -238,8 +237,6 @@ bool Raw::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - Debug(DBG_INPUT, "Heartbeat"); - switch ( mode ) { case MANUAL: // yay, we do nothing :) From 6c4a40f176c74735d33bb8afe66ac429d9f416ed Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 13:09:53 -0700 Subject: [PATCH 159/651] missing include on linux --- src/input/Manager.cc | 2 +- src/input/readers/Raw.cc | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 32b60f05f2..d8bf63505b 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1540,7 +1540,7 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { memcpy(data + startpos, (const char*) &(val->val.subnet_val.prefix.in.in4), length); break; case IPv6: - length += sizeof(val->val.addr_val.in.in6); + length = sizeof(val->val.addr_val.in.in6); memcpy(data + startpos, (const char*) &(val->val.subnet_val.prefix.in.in4), length); break; default: diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index f416fbe94a..cba0a29f3e 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -17,6 +17,7 @@ #include #include #include +#include using namespace input::reader; using threading::Value; @@ -199,7 +200,7 @@ bool Raw::DoUpdate() { case MANUAL: case STREAM: if ( mode == STREAM && file != NULL && in != NULL ) { - fpurge(file); + //fpurge(file); in->clear(); // remove end of file evil bits break; } From f73de0bc8c531f9b2ec0d06b9c63054dc4cf3589 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 15:11:42 -0700 Subject: [PATCH 160/651] fix small memory leak (field description given to readers was never freed). --- src/input/ReaderBackend.cc | 13 +++++++++++++ src/input/ReaderBackend.h | 3 +++ src/input/readers/Ascii.cc | 1 - 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 0a6ff37dc2..ce79ecfd39 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -169,6 +169,9 @@ bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, source = arg_source; SetName("InputReader/"+source); + num_fields = arg_num_fields; + fields = arg_fields; + // disable if DoInit returns error. int success = DoInit(arg_source, mode, arg_num_fields, arg_fields); @@ -188,6 +191,16 @@ void ReaderBackend::Finish() disabled = true; DisableFrontend(); SendOut(new ReaderFinishedMessage(frontend)); + + if ( fields != 0 ) { + + for ( unsigned int i = 0; i < num_fields; i++ ) { + delete(fields[i]); + } + + delete[] (fields); + fields = 0; + } } bool ReaderBackend::Update() diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 28fd99f2b9..5742f72368 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -228,6 +228,9 @@ private: char* buf; unsigned int buf_len; bool autostart; + + unsigned int num_fields; + const threading::Field* const * fields; // raw mapping }; } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 20ae79ab19..553a4ada81 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -73,7 +73,6 @@ Ascii::Ascii(ReaderFrontend *frontend) : ReaderBackend(frontend) Ascii::~Ascii() { DoFinish(); - } void Ascii::DoFinish() From 94d439b0cba75fee93a3b08bb3b57041e694b46e Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 17:17:41 -0700 Subject: [PATCH 161/651] enable predicate modification of index of value which is currently being added/removed Todo: test if this works for removal ( I think it should ). --- src/input/Manager.cc | 45 +++++++++++++++-- src/input/Manager.h | 1 + .../out | 4 ++ .../base/frameworks/input/predicatemodify.bro | 50 +++++++++++++++++++ 4 files changed, 95 insertions(+), 5 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.predicatemodify/out create mode 100644 testing/btest/scripts/base/frameworks/input/predicatemodify.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index d8bf63505b..2201c69995 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -692,6 +692,31 @@ bool Manager::ForceUpdate(const string &name) return true; // update is async :( } + +Val* Manager::RecordValToIndexVal(RecordVal *r) { + Val* idxval; + + RecordType *type = r->Type()->AsRecordType(); + + int num_fields = type->NumFields(); + + if ( num_fields == 1 && type->FieldDecl(0)->type->Tag() != TYPE_RECORD ) { + idxval = r->Lookup(0); + } else { + ListVal *l = new ListVal(TYPE_ANY); + for ( int j = 0 ; j < num_fields; j++ ) { + Val* rval = r->Lookup(j); + assert(rval != 0); + l->Append(r->LookupWithDefault(j)); + } + idxval = l; + } + + + return idxval; +} + + Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Value* const *vals) { Val* idxval; int position = 0; @@ -788,8 +813,8 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { } - Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; + RecordVal* predidx = 0; int position = filter->num_idx_fields; if ( filter->num_val_fields == 0 ) { @@ -806,8 +831,10 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { if ( filter->pred ) { EnumVal* ev; //Ref(idxval); - int startpos = 0; - Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + int startpos = 0; + //Val* predidx = ListValToRecordVal(idxval->AsListVal(), filter->itype, &startpos); + predidx = ValueToRecordVal(vals, filter->itype, &startpos); + //ValueToRecordVal(vals, filter->itype, &startpos); Ref(valval); if ( updated ) { @@ -818,13 +845,14 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { bool result; if ( filter->num_val_fields > 0 ) { // we have values - result = CallPred(filter->pred, 3, ev, predidx, valval); + result = CallPred(filter->pred, 3, ev, predidx->Ref(), valval); } else { // no values - result = CallPred(filter->pred, 2, ev, predidx); + result = CallPred(filter->pred, 2, ev, predidx->Ref()); } if ( result == false ) { + Unref(predidx); if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... delete(filter->currDict->RemoveEntry(idxhash)); @@ -839,6 +867,13 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { } + Val* idxval; + if ( predidx != 0 ) { + idxval = RecordValToIndexVal(predidx); + Unref(predidx); + } else { + idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); + } Val* oldval = 0; if ( updated == true ) { assert(filter->num_val_fields > 0); diff --git a/src/input/Manager.h b/src/input/Manager.h index 71169c4bc2..c6dd40bd95 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -158,6 +158,7 @@ private: // Converts a threading::value to a record type. mostly used by ValueToVal RecordVal* ValueToRecordVal(const threading::Value* const *vals, RecordType *request_type, int* position); + Val* RecordValToIndexVal(RecordVal *r); // Converts a Bro ListVal to a RecordVal given the record type RecordVal* ListValToRecordVal(ListVal* list, RecordType *request_type, int* position); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.predicatemodify/out b/testing/btest/Baseline/scripts.base.frameworks.input.predicatemodify/out new file mode 100644 index 0000000000..c648e63710 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.predicatemodify/out @@ -0,0 +1,4 @@ +{ +[2, idxmodified] = [b=T, s=test2], +[1, idx1] = [b=T, s=testmodified] +} diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodify.bro b/testing/btest/scripts/base/frameworks/input/predicatemodify.bro new file mode 100644 index 0000000000..c3198d8483 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicatemodify.bro @@ -0,0 +1,50 @@ +# +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 T test1 idx1 +2 T test2 idx2 +@TEST-END-FILE + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; + ss: string; +}; + +type Val: record { + b: bool; + s: string; +}; + +global servers: table[int, string] of Val = table(); + +event bro_init() +{ + # first read in the old stuff into the table... + Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { + if ( left$i == 1 ) { + right$s = "testmodified"; + } + + if ( left$i == 2 ) { + left$ss = "idxmodified"; + } + return T; + } + ]); + Input::remove("input"); +} + +event Input::update_finished(name: string, source: string) { + print servers; +} From 03116d779eef498d2fcc0ab7e2822a94cbfb43f4 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 22 Mar 2012 18:08:59 -0700 Subject: [PATCH 162/651] one unref to many ... apparently --- src/input/Manager.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 2201c69995..8eaca07e78 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -705,8 +705,8 @@ Val* Manager::RecordValToIndexVal(RecordVal *r) { } else { ListVal *l = new ListVal(TYPE_ANY); for ( int j = 0 ; j < num_fields; j++ ) { - Val* rval = r->Lookup(j); - assert(rval != 0); + //Val* rval = r->Lookup(j); + //assert(rval != 0); l->Append(r->LookupWithDefault(j)); } idxval = l; @@ -870,7 +870,7 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { Val* idxval; if ( predidx != 0 ) { idxval = RecordValToIndexVal(predidx); - Unref(predidx); + // I think there is an unref missing here. But if I insert is, it crashes :) } else { idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); } From 315948dbc8fc3b34a0add6a46686814474ff2fb7 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 23 Mar 2012 11:40:59 -0700 Subject: [PATCH 163/651] add test for update functionality of tables where a predicate modifies values / indexes. Seems to work fine for all cases... --- .../out | 23 ++++ .../input/predicatemodifyandreread.bro | 107 ++++++++++++++++++ 2 files changed, 130 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.predicatemodifyandreread/out create mode 100644 testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.predicatemodifyandreread/out b/testing/btest/Baseline/scripts.base.frameworks.input.predicatemodifyandreread/out new file mode 100644 index 0000000000..0adccc1856 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.predicatemodifyandreread/out @@ -0,0 +1,23 @@ +Update_finished for input, try 1 +{ +[2, idxmodified] = [b=T, s=test2], +[1, idx1] = [b=T, s=testmodified] +} +Update_finished for input, try 2 +{ +[2, idxmodified] = [b=T, s=test2], +[1, idx1] = [b=F, s=testmodified] +} +Update_finished for input, try 3 +{ +[2, idxmodified] = [b=F, s=test2], +[1, idx1] = [b=F, s=testmodified] +} +Update_finished for input, try 4 +{ +[2, idxmodified] = [b=F, s=test2] +} +Update_finished for input, try 5 +{ +[1, idx1] = [b=T, s=testmodified] +} diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro b/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro new file mode 100644 index 0000000000..1606ff6a27 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro @@ -0,0 +1,107 @@ +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run bro bro %INPUT +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input2.log input.log +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input3.log input.log +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input4.log input.log +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input5.log input.log +# @TEST-EXEC: btest-bg-wait -k 3 +# @TEST-EXEC: btest-diff out +# + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 T test1 idx1 +2 T test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input2.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 F test1 idx1 +2 T test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input3.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 F test1 idx1 +2 F test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input4.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +2 F test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input5.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 T test1 idx1 +@TEST-END-FILE + +@load frameworks/communication/listen + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; + ss: string; +}; + +type Val: record { + b: bool; + s: string; +}; + +global servers: table[int, string] of Val = table(); +global outfile: file; +global try: count; + +event bro_init() +{ + try = 0; + outfile = open ("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $mode=Input::REREAD, + $pred(typ: Input::Event, left: Idx, right: Val) = { + if ( left$i == 1 ) { + right$s = "testmodified"; + } + + if ( left$i == 2 ) { + left$ss = "idxmodified"; + } + return T; + } + ]); +} + +event Input::update_finished(name: string, source: string) { + try = try + 1; + print outfile, fmt("Update_finished for %s, try %d", name, try); + print outfile, servers; + + if ( try == 5 ) { + close (outfile); + Input::remove("input"); + } +} From 872ad195f789a155c3dd79a4f3786388203f3dce Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 23 Mar 2012 12:30:54 -0700 Subject: [PATCH 164/651] prevent several remove operations for the same thread to be queued and output errors in that case. --- src/input/Manager.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 8eaca07e78..6d97c2f50b 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -33,6 +33,7 @@ class Manager::Filter { public: string name; string source; + bool removed; int mode; @@ -51,6 +52,7 @@ Manager::Filter::Filter() { type = 0; reader = 0; description = 0; + removed = false; } Manager::Filter::~Filter() { @@ -597,6 +599,13 @@ bool Manager::RemoveStream(const string &name) { return false; // not found } + if ( i->removed ) { + reporter->Error("Stream %s is already queued for removal. Ignoring", name.c_str()); + return false; + } + + i->removed = true; + i->reader->Finish(); #ifdef DEBUG From d7c9471818ed60453fc319388277ebaf43939b27 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 23 Mar 2012 15:57:25 -0700 Subject: [PATCH 165/651] Extending queue statistics. --- .../frameworks/cluster/setup-connections.bro | 2 +- src/Stats.cc | 9 ++++- src/logging/WriterFrontend.h | 2 +- src/threading/MsgThread.cc | 2 + src/threading/MsgThread.h | 4 ++ src/threading/Queue.h | 39 +++++++++++++++++++ 6 files changed, 54 insertions(+), 4 deletions(-) diff --git a/scripts/base/frameworks/cluster/setup-connections.bro b/scripts/base/frameworks/cluster/setup-connections.bro index b5a0d25e1f..20646525be 100644 --- a/scripts/base/frameworks/cluster/setup-connections.bro +++ b/scripts/base/frameworks/cluster/setup-connections.bro @@ -44,7 +44,7 @@ event bro_init() &priority=9 { if ( n$node_type == WORKER && n$proxy == node ) Communication::nodes[i] = - [$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events]; + [$host=n$ip, $connect=F, $class=i, $sync=F, $auth=T, $events=worker2proxy_events]; # accepts connections from the previous one. # (This is not ideal for setups with many proxies) diff --git a/src/Stats.cc b/src/Stats.cc index a2e7496c5f..c3035231e9 100644 --- a/src/Stats.cc +++ b/src/Stats.cc @@ -210,11 +210,16 @@ void ProfileLogger::Log() i != thread_stats.end(); ++i ) { threading::MsgThread::Stats s = i->second; - file->Write(fmt("%0.6f %-25s in=%" PRIu64 " out=%" PRIu64 " pending=%" PRIu64 "/%" PRIu64 "\n", + file->Write(fmt("%0.6f %-25s in=%" PRIu64 " out=%" PRIu64 " pending=%" PRIu64 "/%" PRIu64 + " (#queue r/w: in=%" PRIu64 "/%" PRIu64 " out=%" PRIu64 "/%" PRIu64 ")" + "\n", network_time, i->first.c_str(), s.sent_in, s.sent_out, - s.pending_in, s.pending_out)); + s.pending_in, s.pending_out, + s.queue_in_stats.num_reads, s.queue_in_stats.num_writes, + s.queue_out_stats.num_reads, s.queue_out_stats.num_writes + )); } // Script-level state. diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index 3e05d17c9e..4d22bd9b1f 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -212,7 +212,7 @@ protected: const threading::Field* const* fields; // The log fields. // Buffer for bulk writes. - static const int WRITER_BUFFER_SIZE = 50; + static const int WRITER_BUFFER_SIZE = 1000; int write_buffer_pos; // Position of next write in buffer. threading::Value*** write_buffer; // Buffer of size WRITER_BUFFER_SIZE. }; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 145e16c57b..ddcd3df1dd 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -283,5 +283,7 @@ void MsgThread::GetStats(Stats* stats) stats->sent_out = cnt_sent_out; stats->pending_in = queue_in.Size(); stats->pending_out = queue_out.Size(); + queue_in.GetStats(&stats->queue_in_stats); + queue_out.GetStats(&stats->queue_out_stats); } diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 28c7690dfa..5ac1c0f780 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -154,6 +154,10 @@ public: uint64_t sent_out; //! Number of messages sent from the child thread to the main thread uint64_t pending_in; //! Number of messages sent to the child but not yet processed. uint64_t pending_out; //! Number of messages sent from the child but not yet processed by the main thread. + + /// Statistics from our queues. + Queue::Stats queue_in_stats; + Queue::Stats queue_out_stats; }; /** diff --git a/src/threading/Queue.h b/src/threading/Queue.h index a25f897d23..985ba31714 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -58,6 +58,22 @@ public: */ uint64_t Size(); + /** + * Statistics about inter-thread communication. + */ + struct Stats + { + uint64_t num_reads; //! Number of messages read from the queue. + uint64_t num_writes; //! Number of messages written to the queue. + }; + + /** + * Returns statistics about the queue's usage. + * + * @param stats A pointer to a structure that will be filled with + * current numbers. */ + void GetStats(Stats* stats); + private: static const int NUM_QUEUES = 8; @@ -67,6 +83,10 @@ private: int read_ptr; // Where the next operation will read from int write_ptr; // Where the next operation will write to + + // Statistics. + uint64_t num_reads; + uint64_t num_writes; }; inline static void safe_lock(pthread_mutex_t* mutex) @@ -86,6 +106,7 @@ inline Queue::Queue() { read_ptr = 0; write_ptr = 0; + num_reads = num_writes = 0; for( int i = 0; i < NUM_QUEUES; ++i ) { @@ -121,6 +142,7 @@ inline T Queue::Get() messages[read_ptr].pop(); read_ptr = (read_ptr + 1) % NUM_QUEUES; + ++num_reads; safe_unlock(&mutex[old_read_ptr]); @@ -142,6 +164,7 @@ inline void Queue::Put(T data) pthread_cond_signal(&has_data[write_ptr]); write_ptr = (write_ptr + 1) % NUM_QUEUES; + ++num_writes; safe_unlock(&mutex[old_write_ptr]); } @@ -177,7 +200,23 @@ inline uint64_t Queue::Size() return size; } +template +inline void Queue::GetStats(Stats* stats) + { + // To be safe, we look all queues. That's probably unneccessary, but + // doesn't really hurt. + for ( int i = 0; i < NUM_QUEUES; i++ ) + safe_lock(&mutex[i]); + + stats->num_reads = num_reads; + stats->num_writes = num_writes; + + for ( int i = 0; i < NUM_QUEUES; i++ ) + safe_unlock(&mutex[i]); + } + } + #endif From 1d65f2da42648ac3fdde466509bf22fe3f8dafda Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 23 Mar 2012 16:08:22 -0700 Subject: [PATCH 166/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- cmake | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aux/binpac b/aux/binpac index 3034da8f08..dd1a3a95f0 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 3034da8f082b61157e234237993ffd7a95be6e62 +Subproject commit dd1a3a95f07082efcd5274b21104a038d523d132 diff --git a/aux/bro-aux b/aux/bro-aux index f53bcb2b49..a59b35bdad 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit f53bcb2b492cb0db3dd288384040abc2ab711767 +Subproject commit a59b35bdada8f70fb1a59bf7bb2976534c86d378 diff --git a/aux/broccoli b/aux/broccoli index a08ca90727..0128c72cbd 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit a08ca90727c5c4b90aa8633106ec33a5cf7378d4 +Subproject commit 0128c72cbdf29925dd146842a9077c631d2cc85c diff --git a/aux/broctl b/aux/broctl index 954538514d..66e9e87bee 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 954538514d71983e7ef3f0e109960466096e1c1d +Subproject commit 66e9e87beebce983fa0f479b0284d5690b0290d4 diff --git a/cmake b/cmake index 2cc1055770..550ab2c8d9 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 2cc105577044a2d214124568f3f2496ed2ccbb34 +Subproject commit 550ab2c8d95b1d3e18e40a903152650e6c7a3c45 From 4321f635acd4bd7f83899c3e4ec7cf2d4e3d1468 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 23 Mar 2012 16:40:14 -0700 Subject: [PATCH 167/651] Removing remaining comments. Looks fine. --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- cmake | 2 +- scripts/base/init-bare.bro | 28 +--------------------------- src/Frag.cc | 4 ---- src/IP.h | 16 ---------------- src/Net.cc | 1 - src/Sessions.cc | 25 +++---------------------- 10 files changed, 9 insertions(+), 75 deletions(-) diff --git a/aux/binpac b/aux/binpac index 3034da8f08..dd1a3a95f0 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 3034da8f082b61157e234237993ffd7a95be6e62 +Subproject commit dd1a3a95f07082efcd5274b21104a038d523d132 diff --git a/aux/bro-aux b/aux/bro-aux index f53bcb2b49..a59b35bdad 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit f53bcb2b492cb0db3dd288384040abc2ab711767 +Subproject commit a59b35bdada8f70fb1a59bf7bb2976534c86d378 diff --git a/aux/broccoli b/aux/broccoli index a08ca90727..612e95ac62 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit a08ca90727c5c4b90aa8633106ec33a5cf7378d4 +Subproject commit 612e95ac62a06b32b2e9e627f30527012a89a12c diff --git a/aux/broctl b/aux/broctl index 954538514d..66e9e87bee 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 954538514d71983e7ef3f0e109960466096e1c1d +Subproject commit 66e9e87beebce983fa0f479b0284d5690b0290d4 diff --git a/cmake b/cmake index 2cc1055770..550ab2c8d9 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 2cc105577044a2d214124568f3f2496ed2ccbb34 +Subproject commit 550ab2c8d95b1d3e18e40a903152650e6c7a3c45 diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index b3c997a750..7b1b304405 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -933,11 +933,7 @@ const ICMP_UNREACH_ADMIN_PROHIB = 13; ##< Adminstratively prohibited. # Definitions for access to packet headers. Currently only used for # discarders. # todo::these should go into an enum to make them autodoc'able -const IPPROTO_IP = 0; ##< Dummy for IP. [Robin] Rename to IPPROTO_IP4? -# [Jon] I'd say leave it be or remove it because from -# IPPROTO_IPV4 can actually be the same as IPPROTO_IPIP (4)... -# IPPROTO_IP seems to be just for use with the socket API and not -# actually identifying protocol numbers in packet headers +const IPPROTO_IP = 0; ##< Dummy for IP. const IPPROTO_ICMP = 1; ##< Control message protocol. const IPPROTO_IGMP = 2; ##< Group management protocol. const IPPROTO_IPIP = 4; ##< IP encapsulation in IP. @@ -947,14 +943,6 @@ const IPPROTO_IPV6 = 41; ##< IPv6 header. const IPPROTO_RAW = 255; ##< Raw IP packet. # Definitions for IPv6 extension headers. -# [Robin] Do we need a constant for unknown extensions? -# [Jon] I don't think so, these constants are just conveniences to improve -# script readability, but they also identify the actual assigned protocol -# number of the header type. If the core were to actually pass to the -# script-layer a next-header value of something we don't know about yet, -# that value would be the actual value seen in the packet, not something -# we should make up. We could provide a "KNOWN_PROTOCOLS" set for -# convenience that one could check membership against. const IPPROTO_HOPOPTS = 0; ##< IPv6 hop-by-hop-options header. const IPPROTO_ROUTING = 43; ##< IPv6 routing header. const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. @@ -1068,15 +1056,6 @@ type ip6_esp: record { ## ## .. bro:see:: pkt_hdr ip4_hdr ip6_hopopts ip6_dstopts ip6_routing ip6_fragment ## ip6_ah ip6_esp -# -# [Robin] What happens to unknown extension headers? We should keep them too so that -# one can at least identify what one can't analyze. -# [Jon] Currently, they show up as "unknown_protocol" weirds and those packets -# are skipped before any "new_packet" or "ipv6_ext_headers" events are -# raised as those depend on a connection parameter which can't be -# created since we can't parse past unknown extension headers to get -# at the upper layer protocol. Does that seem reasonable for at -# being able to identify things that couldn't be analyzed? type ip6_ext_hdr: record { ## The RFC 1700 et seq. IANA assigned number identifying the type of ## the extension header. @@ -1170,11 +1149,6 @@ type icmp_hdr: record { ## A packet header, consisting of an IP header and transport-layer header. ## ## .. bro:see:: new_packet -# -# [Robin] Add flags saying whether it's v4/v6, tcp/udp/icmp? The day will come where -# we can't infer that from the connection anymore (tunnels). -# [Jon] I'm not sure what you mean, doesn't checking result of ?$ operator -# always work for finding out protocols involved? type pkt_hdr: record { ip: ip4_hdr &optional; ##< The IPv4 header if an IPv4 packet. ip6: ip6_hdr &optional; ##< The IPv6 header if an IPv6 packet. diff --git a/src/Frag.cc b/src/Frag.cc index 5fcad35560..a744526921 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -33,10 +33,6 @@ FragReassembler::FragReassembler(NetSessions* arg_s, s = arg_s; key = k; - // [Robin] Can't we merge these two cases now? - // [Jon] I think we'll always have to check v4 versus v6 to get the correct - // proto_hdr_len unless IP_Hdr::HdrLen itself makes a special case for - // IPv6 fragments (but that seems more confusing to me) const struct ip* ip4 = ip->IP4_Hdr(); if ( ip4 ) { diff --git a/src/IP.h b/src/IP.h index a989b04d76..f3e8272080 100644 --- a/src/IP.h +++ b/src/IP.h @@ -14,22 +14,6 @@ #include #include -// [Robin] I'm concerced about the virtual methods here. These methods will -// be called *a lot* and that may add to some significant overhead I'm afraid -// (at least eventually as IPv6 is picking up). -// -// [Robin] Similar concern for the vector and ip6_hdrs data -// members: we're creating/allocating those for every IPv6 packet, right? -// -// Any idea how to avoid these? -// -// [Jon] Seems fair enough to just remove the virtual method concern at this -// point by replacing the class hierarchy with some inline functions that -// do switch statements. I don't know what to do about the -// vector and ip6_hdrs data members being allocated for every -// IPv6 packet, maybe it's too early to try to optimize before we know -// the frequency at which extension headers appear in real IPv6 traffic? - /** * Base class for IPv6 header/extensions. */ diff --git a/src/Net.cc b/src/Net.cc index c92545cb87..35c3b383f6 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -42,7 +42,6 @@ extern int select(int, fd_set *, fd_set *, fd_set *, struct timeval *); PList(PktSrc) pkt_srcs; // FIXME: We should really merge PktDumper and PacketDumper. -// It's on my to-do [Robin]. PktDumper* pkt_dumper = 0; int reading_live = 0; diff --git a/src/Sessions.cc b/src/Sessions.cc index 9e91fdc304..4f31d29346 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -430,13 +430,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( discarder && discarder->NextPacket(ip_hdr, len, caplen) ) return; - // [Robin] dump_this_packet = 1 for non-ICMP/UDP/TCP removed here. Why? - // [Jon] The default case of the "switch ( proto )" calls Weird() which - // should set dump_this_packet = 1. The old code also returned - // at this point for non-ICMP/UDP/TCP, but for IPv6 fragments - // we need to do the reassembly first before knowing for sure what - // upper-layer protocol it is. - FragReassembler* f = 0; if ( ip_hdr->IsFragment() ) @@ -472,10 +465,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, len -= ip_hdr_len; // remove IP header caplen -= ip_hdr_len; - // [Robin] Does ESP need to be the last header? - // [Jon] In terms of what we try to parse, yes, we can't go any further - // in parsing a header chain once we reach an ESP one since - // encrypted payload immediately follows. + // We stop building the chain when seeing IPPROTO_ESP so if it's + // there, it's always the last. if ( ip_hdr->LastHeader() == IPPROTO_ESP ) { dump_this_packet = 1; @@ -498,16 +489,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } - // [Robin] The Remove(f) used to be here, while it's now before every - // return statement. I'm not seeing why? - // [Jon] That Remove(f) is still here above in the CheckHeaderTrunc() - // conditional that's just a refactoring of the old code. - // The reason why it's not done unconditionally after the reassembly - // is because doing that could cause the object that ip_hdr points - // to to be freed when we still need to use that below. - // I added Remove(f)'s before other "abnormal" return points that - // looked like they'd otherwise leak the memory. - const u_char* data = ip_hdr->Payload(); ConnID id; @@ -553,7 +534,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } default: - Weird(fmt("unknown_protocol %d", proto), hdr, pkt); + Weird(fmt("unknown_protocol_%d", proto), hdr, pkt); Remove(f); return; } From 30014ac92010bdf1dca6534303ecee8945c0e657 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 23 Mar 2012 16:49:29 -0700 Subject: [PATCH 168/651] Cosmetics in preparation for merge. --- scripts/base/init-bare.bro | 16 ++++++++-------- src/Frag.cc | 2 +- src/IP.cc | 5 ++++- src/IP.h | 4 +++- src/PacketSort.cc | 2 +- src/Sessions.cc | 6 +++++- src/bro.bif | 1 + 7 files changed, 23 insertions(+), 13 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 7b1b304405..b9eca66d24 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -945,7 +945,7 @@ const IPPROTO_RAW = 255; ##< Raw IP packet. # Definitions for IPv6 extension headers. const IPPROTO_HOPOPTS = 0; ##< IPv6 hop-by-hop-options header. const IPPROTO_ROUTING = 43; ##< IPv6 routing header. -const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. +const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. const IPPROTO_ESP = 50; ##< IPv6 encapsulating security payload header. const IPPROTO_AH = 51; ##< IPv6 authentication header. const IPPROTO_NONE = 59; ##< IPv6 no next header. @@ -1081,14 +1081,14 @@ type ip6_ext_hdr: record { type ip6_hdr: record { class: count; ##< Traffic class. flow: count; ##< Flow label. - len: count; ##< Payload length. - nxt: count; ##< Protocol number of the next header - ##< (RFC 1700 et seq., IANA assigned number) - ##< e.g. :bro:id:`IPPROTO_ICMP`. + len: count; ##< Payload length. + nxt: count; ##< Protocol number of the next header + ##< (RFC 1700 et seq., IANA assigned number) + ##< e.g. :bro:id:`IPPROTO_ICMP`. hlim: count; ##< Hop limit. - src: addr; ##< Source address. - dst: addr; ##< Destination address. - exts: vector of ip6_ext_hdr; ##< Extension header chain. + src: addr; ##< Source address. + dst: addr; ##< Destination address. + exts: vector of ip6_ext_hdr; ##< Extension header chain. }; ## Values extracted from an IPv4 header. diff --git a/src/Frag.cc b/src/Frag.cc index a744526921..9bd16a71c9 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -90,7 +90,7 @@ void FragReassembler::AddFragment(double t, const IP_Hdr* ip, const u_char* pkt) if ( ip->NextProto() != next_proto || ip->HdrLen() - 8 != proto_hdr_len ) s->Weird("fragment_protocol_inconsistency", ip); - //TODO: more detailed unfrag header consistency checks? + // TODO: more detailed unfrag header consistency checks? } if ( ip->DF() ) diff --git a/src/IP.cc b/src/IP.cc index d6d1df0c31..4148c58a33 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -18,7 +18,9 @@ static RecordType* ip6_esp_type = 0; static inline RecordType* hdrType(RecordType*& type, const char* name) { - if ( ! type ) type = internal_type(name)->AsRecordType(); + if ( ! type ) + type = internal_type(name)->AsRecordType(); + return type; } @@ -54,6 +56,7 @@ static VectorVal* BuildOptionsVal(const u_char* data, uint16 len) vv->Assign(vv->Size(), rv, 0); } + return vv; } diff --git a/src/IP.h b/src/IP.h index f3e8272080..cb5bcf77c7 100644 --- a/src/IP.h +++ b/src/IP.h @@ -229,7 +229,9 @@ public: ~IP_Hdr() { - if ( ip6 ) delete ip6_hdrs; + if ( ip6 ) + delete ip6_hdrs; + if ( del ) { if ( ip4 ) diff --git a/src/PacketSort.cc b/src/PacketSort.cc index aec7639f4a..04c525c4d1 100644 --- a/src/PacketSort.cc +++ b/src/PacketSort.cc @@ -31,7 +31,7 @@ PacketSortElement::PacketSortElement(PktSrc* arg_src, else if ( ip->ip_v == 6 ) ip_hdr = new IP_Hdr((const struct ip6_hdr*) ip, false); else - // weird will be generated later in NetSessions::NextPacket + // Weird will be generated later in NetSessions::NextPacket. return; if ( ip_hdr->NextProto() == IPPROTO_TCP && diff --git a/src/Sessions.cc b/src/Sessions.cc index 4f31d29346..f03b6d4c63 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -640,20 +640,24 @@ bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, min_hdr_len = sizeof(struct udphdr); break; case IPPROTO_ICMP: - default: min_hdr_len = ICMP_MINLEN; break; + default: + internal_error("unknown protocol"); } + if ( len < min_hdr_len ) { Weird("truncated_header", h, p); return true; } + if ( caplen < min_hdr_len ) { Weird("internally_truncated_header", h, p); return true; } + return false; } diff --git a/src/bro.bif b/src/bro.bif index 375a1c64c1..64ed7d1f2f 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -2067,6 +2067,7 @@ function routing0_data_to_addrs%(s: string%): addr_set const u_char* bytes = s->Bytes(); bytes += 4; // go past 32-bit reserved field len -= 4; + if ( ( len % 16 ) != 0 ) reporter->Warning("Bad ip6_routing data length: %d", s->Len()); From 72f098cb5955c16e33ca474257fa2598b2b10766 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 23 Mar 2012 17:39:27 -0700 Subject: [PATCH 169/651] Adding btest state file to gitignore. --- testing/btest/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/btest/.gitignore b/testing/btest/.gitignore index 5282177d90..b4c1b7a858 100644 --- a/testing/btest/.gitignore +++ b/testing/btest/.gitignore @@ -1,3 +1,4 @@ .tmp +.btest.failed.dat diag.log coverage.log From d889f1463800c218b4422bf70bfaff4a297d87bf Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 23 Mar 2012 17:43:31 -0700 Subject: [PATCH 170/651] Updating submodule(s). [nomail] --- CHANGES | 46 ++++++++++++++++++++++++++++++++++++++++++++++ VERSION | 2 +- aux/broccoli | 2 +- aux/btest | 2 +- 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 8bbd14fde9..9d5c6dc05f 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,50 @@ +2.0-179 | 2012-03-23 17:43:31 -0700 + + * Remove the default "tcp or udp or icmp" filter. In default mode, + Bro would load the packet filter script framework which installs a + filter that allows all packets, but in bare mode (the -b option), + this old filter would not follow IPv6 protocol chains and thus + filter out packets with extension headers. (Jon Siwek) + + * Update PacketFilter/Discarder code for IP version independence. + (Jon Siwek) + + * Fix some IPv6 header related bugs. (Jon Siwek) + + * Add IPv6 fragment reassembly. (Jon Siwek) + + * Add handling for IPv6 extension header chains. Addresses #531. + (Jon Siwek) + + - The script-layer 'pkt_hdr' type is extended with a new 'ip6' field + representing the full IPv6 header chain. + + - The 'new_packet' event is now raised for IPv6 packets. Addresses + #523. + + - A new event called 'ipv6_ext_header' is raised for any IPv6 + packet containing extension headers. + + - A new event called 'esp_packet' is raised for any packets using + ESP ('new_packet' and 'ipv6_ext_header' events provide + connection info, but that info can't be provided here since the + upper-layer payload is encrypted). + + - The 'unknown_protocol' weird is now raised more reliably when + Bro sees a transport protocol or IPv6 extension header it can't + handle. Addresses #522. + + * Add unit tests for IPv6 fragment reassembly, ipv6_ext_headers and + esp_packet events. (Jon Siwek) + + * Adapt FreeBSD's inet_ntop implementation for internal use. Now we + get consistent text representations of IPv6 addresses across + platforms. (Jon Siwek) + + * Update documentation for new syntax of IPv6 literals. (Jon Siwek) + + 2.0-150 | 2012-03-13 16:16:22 -0700 * Changing the regular expression to allow Site::local_nets in diff --git a/VERSION b/VERSION index aeb2df7379..db8d11e293 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-150 +2.0-179 diff --git a/aux/broccoli b/aux/broccoli index 612e95ac62..a4046c2f79 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 612e95ac62a06b32b2e9e627f30527012a89a12c +Subproject commit a4046c2f79b6ab0ac19ae8be94b79c6ce578bea7 diff --git a/aux/btest b/aux/btest index 9c9fde204d..dc78a3ebf5 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 9c9fde204dd5518bdfdb8b4a86d38ed06e597209 +Subproject commit dc78a3ebf5cd8fbd1b3034990e36fa21a51d1a19 From 9732859d44ea66098f541fd879c0781b8362b718 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 26 Mar 2012 12:20:39 -0700 Subject: [PATCH 171/651] add first simple benchmark reader (it simply spews random data, amount of lines specified in source). --- src/CMakeLists.txt | 2 +- src/input/Manager.cc | 9 +- src/input/readers/Ascii.cc | 10 +- src/input/readers/Benchmark.cc | 198 +++++++++++++++++++++++++++++++++ src/input/readers/Benchmark.h | 47 ++++++++ src/types.bif | 1 + 6 files changed, 256 insertions(+), 11 deletions(-) create mode 100644 src/input/readers/Benchmark.cc create mode 100644 src/input/readers/Benchmark.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d9ec76f8d2..9b075decd5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -425,7 +425,7 @@ set(bro_SRCS input/ReaderFrontend.cc input/readers/Ascii.cc input/readers/Raw.cc - + input/readers/Benchmark.cc ${dns_SRCS} ${openssl_SRCS} diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 6d97c2f50b..f8ad493e11 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -7,6 +7,7 @@ #include "ReaderBackend.h" #include "readers/Ascii.h" #include "readers/Raw.h" +#include "readers/Benchmark.h" #include "Event.h" #include "EventHandler.h" @@ -149,6 +150,7 @@ struct ReaderDefinition { ReaderDefinition input_readers[] = { { BifEnum::Input::READER_ASCII, "Ascii", 0, reader::Ascii::Instantiate }, { BifEnum::Input::READER_RAW, "Raw", 0, reader::Raw::Instantiate }, + { BifEnum::Input::READER_BENCHMARK, "Benchmark", 0, reader::Benchmark::Instantiate }, // End marker { BifEnum::Input::READER_DEFAULT, "None", 0, (ReaderBackend* (*)(ReaderFrontend* frontend))0 } @@ -600,7 +602,7 @@ bool Manager::RemoveStream(const string &name) { } if ( i->removed ) { - reporter->Error("Stream %s is already queued for removal. Ignoring", name.c_str()); + reporter->Error("Stream %s is already queued for removal. Ignoring remove.", name.c_str()); return false; } @@ -690,6 +692,11 @@ bool Manager::ForceUpdate(const string &name) reporter->Error("Stream %s not found", name.c_str()); return false; } + + if ( i->removed ) { + reporter->Error("Stream %s is already queued for removal. Ignoring force update.", name.c_str()); + return false; + } i->reader->Update(); diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 553a4ada81..17391afe73 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -110,15 +110,7 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c return false; } - switch ( mode ) { - case MANUAL: - case REREAD: - case STREAM: - DoUpdate(); - break; - default: - assert(false); - } + DoUpdate(); return true; } diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc new file mode 100644 index 0000000000..b48074c146 --- /dev/null +++ b/src/input/readers/Benchmark.cc @@ -0,0 +1,198 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Benchmark.h" +#include "NetVar.h" + +#include "../../threading/SerialTypes.h" + +#define MANUAL 0 +#define REREAD 1 +#define STREAM 2 + +#include +#include +#include + +using namespace input::reader; +using threading::Value; +using threading::Field; + + + +Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) +{ +} + +Benchmark::~Benchmark() +{ + DoFinish(); +} + +void Benchmark::DoFinish() +{ +} + +bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) +{ + mode = arg_mode; + + num_fields = arg_num_fields; + fields = arg_fields; + num_lines = atoi(path.c_str()); + + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); + return false; + } + + DoUpdate(); + + return true; +} + +string Benchmark::RandomString(const int len) { + string s; + + s.reserve(len); + + static const char values[] = + "0123456789!@#$%^&*()-_=+{}[]\\|" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + + for (int i = 0; i < len; ++i) { + s[i] = values[rand() / (RAND_MAX / sizeof(values))]; + } + + return s; +} + +// read the entire file and send appropriate thingies back to InputMgr +bool Benchmark::DoUpdate() { + for ( int i = 0; i < num_lines; i++ ) { + Value** field = new Value*[num_fields]; + for (unsigned int j = 0; j < num_fields; j++ ) { + field[j] = EntryToVal(fields[j]->type, fields[j]->subtype); + } + SendEntry(field); + } + + EndCurrentSend(); + + return true; +} + +threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { + Value* val = new Value(type, true); + + // basically construct something random from the fields that we want. + + switch ( type ) { + case TYPE_ENUM: + assert(false); // no enums, please. + case TYPE_STRING: + val->val.string_val = new string(RandomString(10)); + break; + + case TYPE_BOOL: + val->val.int_val = 1; // we never lie. + break; + + case TYPE_INT: + val->val.int_val = rand(); + break; + + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + val->val.double_val = random(); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + val->val.uint_val = rand(); + break; + + case TYPE_PORT: + val->val.port_val.port = rand() / (RAND_MAX / 60000); + val->val.port_val.proto = TRANSPORT_UNKNOWN; + break; + + case TYPE_SUBNET: { + val->val.subnet_val.prefix = StringToAddr("192.168.17.1"); + val->val.subnet_val.length = 16; + } + break; + + case TYPE_ADDR: + val->val.addr_val = StringToAddr("192.168.17.1"); + break; + + case TYPE_TABLE: + case TYPE_VECTOR: + // First - common initialization + // Then - initialization for table. + // Then - initialization for vector. + // Then - common stuff + { + // how many entries do we have... + unsigned int length = rand() / (RAND_MAX / 15); + + Value** lvals = new Value* [length]; + + if ( type == TYPE_TABLE ) { + val->val.set_val.vals = lvals; + val->val.set_val.size = length; + } else if ( type == TYPE_VECTOR ) { + val->val.vector_val.vals = lvals; + val->val.vector_val.size = length; + } else { + assert(false); + } + + if ( length == 0 ) + break; //empty + + for ( unsigned int pos = 0; pos < length; pos++ ) { + + Value* newval = EntryToVal(subtype, TYPE_ENUM); + if ( newval == 0 ) { + Error("Error while reading set"); + return 0; + } + lvals[pos] = newval; + } + + break; + } + + + default: + Error(Fmt("unsupported field format %d", type)); + return 0; + } + + return val; + +} + + +bool Benchmark::DoHeartbeat(double network_time, double current_time) +{ + ReaderBackend::DoHeartbeat(network_time, current_time); + + switch ( mode ) { + case MANUAL: + // yay, we do nothing :) + break; + case REREAD: + case STREAM: + Update(); // call update and not DoUpdate, because update actually checks disabled. + break; + default: + assert(false); + } + + return true; +} + diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h new file mode 100644 index 0000000000..5a82c5b726 --- /dev/null +++ b/src/input/readers/Benchmark.h @@ -0,0 +1,47 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUT_READERS_BENCHMARK_H +#define INPUT_READERS_BENCHMARK_H + + +#include "../ReaderBackend.h" + +namespace input { namespace reader { + +class Benchmark : public ReaderBackend { +public: + Benchmark(ReaderFrontend* frontend); + ~Benchmark(); + + static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Benchmark(frontend); } + +protected: + + virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); + + virtual void DoFinish(); + + virtual bool DoUpdate(); + +private: + + virtual bool DoHeartbeat(double network_time, double current_time); + + unsigned int num_fields; + + const threading::Field* const * fields; // raw mapping + + threading::Value* EntryToVal(TypeTag Type, TypeTag subtype); + + int mode; + int num_lines; + + string RandomString(const int len); + +}; + + +} +} + +#endif /* INPUT_READERS_BENCHMARK_H */ diff --git a/src/types.bif b/src/types.bif index 26850bfa93..682170e3a6 100644 --- a/src/types.bif +++ b/src/types.bif @@ -174,6 +174,7 @@ enum Reader %{ READER_DEFAULT, READER_ASCII, READER_RAW, + READER_BENCHMARK, %} enum Event %{ From 0ceca706f6d1a465bcb00b28164751e16e7ca0ff Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 26 Mar 2012 14:35:01 -0500 Subject: [PATCH 172/651] Change routing0_data_to_addrs BIF to return vector of addresses. Because the order of addresses in type 0 routing headers is interesting/important. --- scripts/base/init-bare.bro | 7 +++++++ src/bro.bif | 15 ++++++--------- .../Baseline/bifs.routing0_data_to_addrs/output | 5 +---- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index b9eca66d24..b2237d7af8 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -46,6 +46,13 @@ type index_vec: vector of count; ## then remove this alias. type string_vec: vector of string; +## A vector of addresses. +## +## .. todo:: We need this type definition only for declaring builtin functions via +## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and +## then remove this alias. +type addr_vec: vector of addr; + ## A table of strings indexed by strings. ## ## .. todo:: We need this type definition only for declaring builtin functions via diff --git a/src/bro.bif b/src/bro.bif index 64ed7d1f2f..5ecc582a07 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -2050,18 +2050,15 @@ function is_v6_addr%(a: addr%): bool # =========================================================================== ## Converts the *data* field of :bro:type:`ip6_routing` records that have -## *rtype* of 0 into a set of addresses. +## *rtype* of 0 into a vector of addresses. ## ## s: The *data* field of an :bro:type:`ip6_routing` record that has ## an *rtype* of 0. ## -## Returns: The set of addresses contained in the routing header data. -function routing0_data_to_addrs%(s: string%): addr_set +## Returns: The vector of addresses contained in the routing header data. +function routing0_data_to_addrs%(s: string%): addr_vec %{ - BroType* index_type = base_type(TYPE_ADDR); - TypeList* set_index = new TypeList(index_type); - set_index->Append(index_type); - TableVal* tv = new TableVal(new SetType(set_index, 0)); + VectorVal* rval = new VectorVal(new VectorType(base_type(TYPE_ADDR))); int len = s->Len(); const u_char* bytes = s->Bytes(); @@ -2074,12 +2071,12 @@ function routing0_data_to_addrs%(s: string%): addr_set while ( len > 0 ) { IPAddr a(IPAddr::IPv6, (const uint32*) bytes, IPAddr::Network); - tv->Assign(new AddrVal(a), 0); + rval->Assign(rval->Size(), new AddrVal(a), 0); bytes += 16; len -= 16; } - return tv; + return rval; %} ## Converts a :bro:type:`addr` to a :bro:type:`index_vec`. diff --git a/testing/btest/Baseline/bifs.routing0_data_to_addrs/output b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output index 7179bf8564..c79aef89d0 100644 --- a/testing/btest/Baseline/bifs.routing0_data_to_addrs/output +++ b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output @@ -1,4 +1 @@ -{ -2001:78:1:32::1, -2001:78:1:32::2 -} +[2001:78:1:32::1, 2001:78:1:32::2] From 016a2540a5afc867bbfb68dd92e5dad667bf920d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 26 Mar 2012 12:41:59 -0700 Subject: [PATCH 173/651] ...and spread out streaming reads over time. --- src/input/readers/Benchmark.cc | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index b48074c146..a4cf5f6818 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -51,9 +51,7 @@ bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Fiel } string Benchmark::RandomString(const int len) { - string s; - - s.reserve(len); + string s(len, ' '); static const char values[] = "0123456789!@#$%^&*()-_=+{}[]\\|" @@ -74,10 +72,19 @@ bool Benchmark::DoUpdate() { for (unsigned int j = 0; j < num_fields; j++ ) { field[j] = EntryToVal(fields[j]->type, fields[j]->subtype); } - SendEntry(field); + + if ( mode == STREAM ) { + // do not do tracking, spread out elements over the second that we have... + Put(field); + usleep(900000/num_lines); + } else { + SendEntry(field); + } } - EndCurrentSend(); + //if ( mode != STREAM ) { // well, does not really make sense in the streaming sense - but I like getting the event. + EndCurrentSend(); + //} return true; } From 28f3fa01444b0c5595bafc6585320cdd11168b0f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 26 Mar 2012 13:52:58 -0700 Subject: [PATCH 174/651] make time types always return current time for benchmark reader --- src/input/readers/Benchmark.cc | 13 ++++++++++++- src/input/readers/Benchmark.h | 2 ++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index a4cf5f6818..07ee7eb9bc 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -65,6 +65,14 @@ string Benchmark::RandomString(const int len) { return s; } +double Benchmark::CurrTime() { + struct timeval tv; + assert ( gettimeofday(&tv, 0) >= 0 ); + + return double(tv.tv_sec) + double(tv.tv_usec) / 1e6; +} + + // read the entire file and send appropriate thingies back to InputMgr bool Benchmark::DoUpdate() { for ( int i = 0; i < num_lines; i++ ) { @@ -109,8 +117,11 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { val->val.int_val = rand(); break; - case TYPE_DOUBLE: case TYPE_TIME: + val->val.double_val = CurrTime(); + break; + + case TYPE_DOUBLE: case TYPE_INTERVAL: val->val.double_val = random(); break; diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index 5a82c5b726..e8de4ac773 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -29,6 +29,8 @@ private: unsigned int num_fields; + double CurrTime(); + const threading::Field* const * fields; // raw mapping threading::Value* EntryToVal(TypeTag Type, TypeTag subtype); From f4101b52659e19bea11a94c7e51fcfa501e4317c Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 27 Mar 2012 16:05:45 -0500 Subject: [PATCH 175/651] Improve handling of IPv6 routing type 0 extension headers. - flow_weird event with name argument value of "routing0_hdr" is raised for packets containing an IPv6 routing type 0 header because this type of header is now deprecated according to RFC 5095. - packets with a routing type 0 header and non-zero segments left now use the last address in that header in order to associate with a connection/flow and for calculating TCP/UDP checksums. - added a set of IPv4/IPv6 TCP/UDP checksum unit tests --- src/EventHandler.h | 1 - src/Frag.cc | 2 +- src/IP.cc | 14 ++++ src/IP.h | 56 ++++++++++++- src/PacketFilter.cc | 2 +- src/PacketSort.cc | 2 +- src/Reassem.cc | 3 +- src/Reassem.h | 3 +- src/Reporter.h | 1 - src/Serializer.cc | 2 +- src/Sessions.cc | 6 +- src/TCP_Endpoint.cc | 20 ++--- src/TCP_Endpoint.h | 1 - src/TCP_Reassembler.cc | 2 +- src/UDP.cc | 32 +++++-- src/UDP.h | 5 ++ src/bro.bif | 3 + src/net_util.cc | 79 ------------------ src/net_util.h | 12 +-- testing/btest/Baseline/core.checksums/bad.out | 9 ++ .../btest/Baseline/core.checksums/good.out | 2 + .../Baseline/core.ipv6_ext_headers/output | 2 + .../btest/Traces/chksums/ip4-bad-chksum.pcap | Bin 0 -> 86 bytes .../Traces/chksums/ip4-tcp-bad-chksum.pcap | Bin 0 -> 94 bytes .../Traces/chksums/ip4-tcp-good-chksum.pcap | Bin 0 -> 94 bytes .../Traces/chksums/ip4-udp-bad-chksum.pcap | Bin 0 -> 86 bytes .../Traces/chksums/ip4-udp-good-chksum.pcap | Bin 0 -> 86 bytes .../chksums/ip6-route0-tcp-bad-chksum.pcap | Bin 0 -> 154 bytes .../chksums/ip6-route0-tcp-good-chksum.pcap | Bin 0 -> 154 bytes .../chksums/ip6-route0-udp-bad-chksum.pcap | Bin 0 -> 146 bytes .../chksums/ip6-route0-udp-good-chksum.pcap | Bin 0 -> 146 bytes .../Traces/chksums/ip6-tcp-bad-chksum.pcap | Bin 0 -> 114 bytes .../Traces/chksums/ip6-tcp-good-chksum.pcap | Bin 0 -> 114 bytes .../Traces/chksums/ip6-udp-bad-chksum.pcap | Bin 0 -> 106 bytes .../Traces/chksums/ip6-udp-good-chksum.pcap | Bin 0 -> 106 bytes ..._routing.trace => ipv6-hbh-routing0.trace} | Bin .../btest/bifs/routing0_data_to_addrs.test | 2 +- testing/btest/core/checksums.test | 15 ++++ testing/btest/core/ipv6_ext_headers.test | 16 +++- 39 files changed, 171 insertions(+), 121 deletions(-) create mode 100644 testing/btest/Baseline/core.checksums/bad.out create mode 100644 testing/btest/Baseline/core.checksums/good.out create mode 100644 testing/btest/Traces/chksums/ip4-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip4-tcp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip4-tcp-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip4-udp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip4-udp-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-route0-tcp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-route0-tcp-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-route0-udp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-route0-udp-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-tcp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-tcp-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-udp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-udp-good-chksum.pcap rename testing/btest/Traces/{ext_hdr_hbh_routing.trace => ipv6-hbh-routing0.trace} (100%) create mode 100644 testing/btest/core/checksums.test diff --git a/src/EventHandler.h b/src/EventHandler.h index 2aebe87584..a86b8a285c 100644 --- a/src/EventHandler.h +++ b/src/EventHandler.h @@ -7,7 +7,6 @@ #include "List.h" #include "BroList.h" -#include "net_util.h" class Func; class FuncType; diff --git a/src/Frag.cc b/src/Frag.cc index 9bd16a71c9..6c27450f37 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -28,7 +28,7 @@ void FragTimer::Dispatch(double t, int /* is_expire */) FragReassembler::FragReassembler(NetSessions* arg_s, const IP_Hdr* ip, const u_char* pkt, HashKey* k, double t) -: Reassembler(0, ip->DstAddr(), REASSEM_IP) +: Reassembler(0, REASSEM_IP) { s = arg_s; key = k; diff --git a/src/IP.cc b/src/IP.cc index 4148c58a33..620b294d40 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -305,6 +305,20 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) chain.push_back(p); + // Check for routing type 0 header. + if ( current_type == IPPROTO_ROUTING && + ((const struct ip6_rthdr*)hdrs)->ip6r_type == 0 ) + { + if ( ((const struct ip6_rthdr*)hdrs)->ip6r_segleft > 0 ) + // Remember the index for later so we can determine the final + // destination of the packet. + route0_hdr_idx = chain.size() - 1; + + // RFC 5095 deprecates routing type 0 headers, so raise weirds + IPAddr src(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_src); + reporter->Weird(src, FinalDst(), "routing0_hdr"); + } + hdrs += len; length += len; } while ( current_type != IPPROTO_FRAGMENT && diff --git a/src/IP.h b/src/IP.h index cb5bcf77c7..7ed0968ef3 100644 --- a/src/IP.h +++ b/src/IP.h @@ -117,7 +117,8 @@ public: /** * Initializes the header chain from an IPv6 header structure. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6) { Init(ip6, false); } + IPv6_Hdr_Chain(const struct ip6_hdr* ip6) : route0_hdr_idx(0) + { Init(ip6, false); } ~IPv6_Hdr_Chain() { for ( size_t i = 0; i < chain.size(); ++i ) delete chain[i]; } @@ -171,6 +172,27 @@ public: { return IsFragment() ? (ntohs(GetFragHdr()->ip6f_offlg) & 0x0001) != 0 : 0; } + /** + * Returns the final destination of the packet this chain belongs to. + * If the chain doesn't contain any routing type 0 header with non-zero + * segments left, this is the destination in the main IP header, else + * it's the last address in the routing header. (If there were to be + * more than one routing type 0 header with non-zero segments left, the + * last one would be the one referenced). + */ + IPAddr FinalDst() const + { + if ( route0_hdr_idx ) + { + const struct in6_addr* a = (const struct in6_addr*) + (chain[route0_hdr_idx]->Data() + + chain[route0_hdr_idx]->Length() - 16); + return IPAddr(*a); + } + else + return IPAddr(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_dst); + } + /** * Returns a vector of ip6_ext_hdr RecordVals that includes script-layer * representation of all extension headers in the chain. @@ -186,13 +208,24 @@ protected: * Initializes the header chain from an IPv6 header structure, and replaces * the first next protocol pointer field that points to a fragment header. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) + IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) : route0_hdr_idx(0) { Init(ip6, true, next); } void Init(const struct ip6_hdr* ip6, bool set_next, uint16 next = 0); vector chain; - uint16 length; // The summation of all header lengths in the chain in bytes. + + /** + * The summation of all header lengths in the chain in bytes. + */ + uint16 length; + + /** + * Index of routing type 0 header with non-zero segments left in the header + * chain or zero if none exists (it's fine since the main IP header must + * always be at index zero). + */ + uint8 route0_hdr_idx; }; class IP_Hdr { @@ -248,7 +281,22 @@ public: IPAddr SrcAddr() const { return ip4 ? IPAddr(ip4->ip_src) : IPAddr(ip6->ip6_src); } - IPAddr DstAddr() const + /** + * Returns the final destination address of the header's packet, which + * for IPv6 packets without a routing type 0 extension header and IPv4 + * packets is the destination address in the IP header. For IPv6 packets + * with a routing type 0 extension header and a non-zero number of + * segments left, the final destination is the last address in the routing + * header. If the segments left of a routing type 0 header were zero, + * then the final destination is in the IP header itself. + */ + IPAddr FinalDstAddr() const + { return ip4 ? IPAddr(ip4->ip_dst) : ip6_hdrs->FinalDst(); } + + /** + * Returns the destination address held in the IP header. + */ + IPAddr IPHeaderDstAddr() const { return ip4 ? IPAddr(ip4->ip_dst) : IPAddr(ip6->ip6_dst); } /** diff --git a/src/PacketFilter.cc b/src/PacketFilter.cc index 4fb3b1c8f7..412bf14587 100644 --- a/src/PacketFilter.cc +++ b/src/PacketFilter.cc @@ -58,7 +58,7 @@ bool PacketFilter::Match(const IP_Hdr* ip, int len, int caplen) if ( f ) return MatchFilter(*f, *ip, len, caplen); - f = (Filter*) dst_filter.Lookup(ip->DstAddr(), 128); + f = (Filter*) dst_filter.Lookup(ip->FinalDstAddr(), 128); if ( f ) return MatchFilter(*f, *ip, len, caplen); diff --git a/src/PacketSort.cc b/src/PacketSort.cc index 04c525c4d1..3fb0e9ccbf 100644 --- a/src/PacketSort.cc +++ b/src/PacketSort.cc @@ -45,7 +45,7 @@ PacketSortElement::PacketSortElement(PktSrc* arg_src, (pkt + tcp_offset); id.src_addr = ip_hdr->SrcAddr(); - id.dst_addr = ip_hdr->DstAddr(); + id.dst_addr = ip_hdr->FinalDstAddr(); id.src_port = tp->th_sport; id.dst_port = tp->th_dport; id.is_one_way = 0; diff --git a/src/Reassem.cc b/src/Reassem.cc index fb445c08f7..c3c19ff0e6 100644 --- a/src/Reassem.cc +++ b/src/Reassem.cc @@ -43,8 +43,7 @@ DataBlock::DataBlock(const u_char* data, int size, int arg_seq, unsigned int Reassembler::total_size = 0; -Reassembler::Reassembler(int init_seq, const IPAddr& ip_addr, - ReassemblerType arg_type) +Reassembler::Reassembler(int init_seq, ReassemblerType arg_type) { blocks = last_block = 0; trim_seq = last_reassem_seq = init_seq; diff --git a/src/Reassem.h b/src/Reassem.h index c9590ea949..1f65059e02 100644 --- a/src/Reassem.h +++ b/src/Reassem.h @@ -26,8 +26,7 @@ enum ReassemblerType { REASSEM_IP, REASSEM_TCP }; class Reassembler : public BroObj { public: - Reassembler(int init_seq, const IPAddr& ip_addr, - ReassemblerType arg_type); + Reassembler(int init_seq, ReassemblerType arg_type); virtual ~Reassembler(); void NewBlock(double t, int seq, int len, const u_char* data); diff --git a/src/Reporter.h b/src/Reporter.h index 210dd241d2..e610e1519e 100644 --- a/src/Reporter.h +++ b/src/Reporter.h @@ -9,7 +9,6 @@ #include #include "util.h" -#include "net_util.h" #include "EventHandler.h" #include "IPAddr.h" diff --git a/src/Serializer.cc b/src/Serializer.cc index 06bbf73f48..6aa554cc2b 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -1105,7 +1105,7 @@ void Packet::Describe(ODesc* d) const const IP_Hdr ip = IP(); d->Add(ip.SrcAddr()); d->Add("->"); - d->Add(ip.DstAddr()); + d->Add(ip.FinalDstAddr()); } bool Packet::Serialize(SerialInfo* info) const diff --git a/src/Sessions.cc b/src/Sessions.cc index 84b57bdc62..4b5f201db5 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -493,7 +493,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, ConnID id; id.src_addr = ip_hdr->SrcAddr(); - id.dst_addr = ip_hdr->DstAddr(); + id.dst_addr = ip_hdr->FinalDstAddr(); Dictionary* d = 0; switch ( proto ) { @@ -667,7 +667,7 @@ FragReassembler* NetSessions::NextFragment(double t, const IP_Hdr* ip, ListVal* key = new ListVal(TYPE_ANY); key->Append(new AddrVal(ip->SrcAddr())); - key->Append(new AddrVal(ip->DstAddr())); + key->Append(new AddrVal(ip->FinalDstAddr())); key->Append(new Val(frag_id, TYPE_COUNT)); HashKey* h = ch->ComputeHash(key, 1); @@ -1177,7 +1177,7 @@ void NetSessions::Weird(const char* name, void NetSessions::Weird(const char* name, const IP_Hdr* ip) { - reporter->Weird(ip->SrcAddr(), ip->DstAddr(), name); + reporter->Weird(ip->SrcAddr(), ip->FinalDstAddr(), name); } unsigned int NetSessions::ConnectionMemoryUsage() diff --git a/src/TCP_Endpoint.cc b/src/TCP_Endpoint.cc index 69c08870d9..d6f5d1bb84 100644 --- a/src/TCP_Endpoint.cc +++ b/src/TCP_Endpoint.cc @@ -31,14 +31,6 @@ TCP_Endpoint::TCP_Endpoint(TCP_Analyzer* arg_analyzer, int arg_is_orig) tcp_analyzer->Conn()->OrigAddr(); dst_addr = is_orig ? tcp_analyzer->Conn()->OrigAddr() : tcp_analyzer->Conn()->RespAddr(); - - checksum_base = ones_complement_checksum(src_addr, 0); - checksum_base = ones_complement_checksum(dst_addr, checksum_base); - // Note, for IPv6, strictly speaking this field is 32 bits - // rather than 16 bits. But because the upper bits are all zero, - // we get the same checksum either way. The same applies to - // later when we add in the data length in ValidChecksum(). - checksum_base += htons(IPPROTO_TCP); } TCP_Endpoint::~TCP_Endpoint() @@ -108,13 +100,21 @@ void TCP_Endpoint::SizeBufferedData(int& waiting_on_hole, int& waiting_on_ack) int TCP_Endpoint::ValidChecksum(const struct tcphdr* tp, int len) const { - uint32 sum = checksum_base; + uint32 sum; int tcp_len = tp->th_off * 4 + len; if ( len % 2 == 1 ) // Add in pad byte. - sum += htons(((const u_char*) tp)[tcp_len - 1] << 8); + sum = htons(((const u_char*) tp)[tcp_len - 1] << 8); + else + sum = 0; + sum = ones_complement_checksum(src_addr, sum); + sum = ones_complement_checksum(dst_addr, sum); + // Note, for IPv6, strictly speaking the protocol and length fields are + // 32 bits rather than 16 bits. But because the upper bits are all zero, + // we get the same checksum either way. + sum += htons(IPPROTO_TCP); sum += htons((unsigned short) tcp_len); // fill out pseudo header sum = ones_complement_checksum((void*) tp, tcp_len, sum); diff --git a/src/TCP_Endpoint.h b/src/TCP_Endpoint.h index 52a757b256..28a114adf3 100644 --- a/src/TCP_Endpoint.h +++ b/src/TCP_Endpoint.h @@ -127,7 +127,6 @@ public: TCP_Reassembler* contents_processor; TCP_Analyzer* tcp_analyzer; BroFile* contents_file; - uint32 checksum_base; double start_time, last_time; IPAddr src_addr; // the other endpoint diff --git a/src/TCP_Reassembler.cc b/src/TCP_Reassembler.cc index ba31ab68d0..215af07bd7 100644 --- a/src/TCP_Reassembler.cc +++ b/src/TCP_Reassembler.cc @@ -29,7 +29,7 @@ TCP_Reassembler::TCP_Reassembler(Analyzer* arg_dst_analyzer, TCP_Analyzer* arg_tcp_analyzer, TCP_Reassembler::Type arg_type, bool arg_is_orig, TCP_Endpoint* arg_endp) -: Reassembler(1, arg_endp->dst_addr, REASSEM_TCP) +: Reassembler(1, REASSEM_TCP) { dst_analyzer = arg_dst_analyzer; tcp_analyzer = arg_tcp_analyzer; diff --git a/src/UDP.cc b/src/UDP.cc index c5dfe2c316..fc559bf59d 100644 --- a/src/UDP.cc +++ b/src/UDP.cc @@ -57,12 +57,14 @@ void UDP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, { bool bad = false; - if ( ip->IP4_Hdr() && chksum && - udp_checksum(ip->IP4_Hdr(), up, len) != 0xffff ) - bad = true; + if ( ip->IP4_Hdr() ) + { + if ( chksum && ! ValidateChecksum(ip, up, len) ) + bad = true; + } - if ( ip->IP6_Hdr() && /* checksum is not optional for IPv6 */ - udp6_checksum(ip->IP6_Hdr(), up, len) != 0xffff ) + /* checksum is not optional for IPv6 */ + else if ( ! ValidateChecksum(ip, up, len) ) bad = true; if ( bad ) @@ -204,4 +206,24 @@ unsigned int UDP_Analyzer::MemoryAllocation() const return Analyzer::MemoryAllocation() + padded_sizeof(*this) - 24; } +bool UDP_Analyzer::ValidateChecksum(const IP_Hdr* ip, const udphdr* up, int len) + { + uint32 sum; + if ( len % 2 == 1 ) + // Add in pad byte. + sum = htons(((const u_char*) up)[len - 1] << 8); + else + sum = 0; + + sum = ones_complement_checksum(ip->SrcAddr(), sum); + sum = ones_complement_checksum(ip->FinalDstAddr(), sum); + // Note, for IPv6, strictly speaking the protocol and length fields are + // 32 bits rather than 16 bits. But because the upper bits are all zero, + // we get the same checksum either way. + sum += htons(IPPROTO_UDP); + sum += htons((unsigned short) len); + sum = ones_complement_checksum((void*) up, len, sum); + + return sum == 0xffff; + } diff --git a/src/UDP.h b/src/UDP.h index 5124adf4cd..b93d4da97f 100644 --- a/src/UDP.h +++ b/src/UDP.h @@ -4,6 +4,7 @@ #define udp_h #include "Analyzer.h" +#include typedef enum { UDP_INACTIVE, // no packet seen @@ -31,6 +32,10 @@ protected: virtual bool IsReuse(double t, const u_char* pkt); virtual unsigned int MemoryAllocation() const; + // Returns true if the checksum is valid, false if not + static bool ValidateChecksum(const IP_Hdr* ip, const struct udphdr* up, + int len); + bro_int_t request_len, reply_len; private: diff --git a/src/bro.bif b/src/bro.bif index 64ed7d1f2f..025a238b00 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -4798,6 +4798,9 @@ function uninstall_src_net_filter%(snet: subnet%) : bool ## Installs a filter to drop packets destined to a given IP address with ## a certain probability if none of a given set of TCP flags are set. +## Note that for IPv6 packets with a routing type 0 header and non-zero +## segments left, this filters out against the final destination of the +## packet according to the routing extension header. ## ## ip: Drop packets to this IP address. ## diff --git a/src/net_util.cc b/src/net_util.cc index 1a4e9f1a7f..ef59154304 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -31,85 +31,6 @@ int ones_complement_checksum(const void* p, int b, uint32 sum) return sum; } -int ones_complement_checksum(const IPAddr& a, uint32 sum) - { - const uint32* bytes; - int len = a.GetBytes(&bytes); - return ones_complement_checksum(bytes, len*4, sum); - } - -int tcp_checksum(const struct ip* ip, const struct tcphdr* tp, int len) - { - // ### Note, this is only correct for IPv4. This routine is only - // used by the connection compressor (which we turn off for IPv6 - // traffic). - - int tcp_len = tp->th_off * 4 + len; - uint32 sum; - - if ( len % 2 == 1 ) - // Add in pad byte. - sum = htons(((const u_char*) tp)[tcp_len - 1] << 8); - else - sum = 0; - - sum = ones_complement_checksum((void*) &ip->ip_src.s_addr, 4, sum); - sum = ones_complement_checksum((void*) &ip->ip_dst.s_addr, 4, sum); - - uint32 addl_pseudo = - (htons(IPPROTO_TCP) << 16) | htons((unsigned short) tcp_len); - - sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); - sum = ones_complement_checksum((void*) tp, tcp_len, sum); - - return sum; - } - -int udp_checksum(const struct ip* ip, const struct udphdr* up, int len) - { - uint32 sum; - - if ( len % 2 == 1 ) - // Add in pad byte. - sum = htons(((const u_char*) up)[len - 1] << 8); - else - sum = 0; - - sum = ones_complement_checksum((void*) &ip->ip_src.s_addr, 4, sum); - sum = ones_complement_checksum((void*) &ip->ip_dst.s_addr, 4, sum); - - uint32 addl_pseudo = - (htons(IPPROTO_UDP) << 16) | htons((unsigned short) len); - - sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); - sum = ones_complement_checksum((void*) up, len, sum); - - return sum; - } - -int udp6_checksum(const struct ip6_hdr* ip6, const struct udphdr* up, int len) - { - uint32 sum; - - if ( len % 2 == 1 ) - // Add in pad byte. - sum = htons(((const u_char*) up)[len - 1] << 8); - else - sum = 0; - - sum = ones_complement_checksum((void*) ip6->ip6_src.s6_addr, 16, sum); - sum = ones_complement_checksum((void*) ip6->ip6_dst.s6_addr, 16, sum); - - uint32 l = htonl(len); - sum = ones_complement_checksum((void*) &l, 4, sum); - - uint32 addl_pseudo = htons(IPPROTO_UDP); - sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); - sum = ones_complement_checksum((void*) up, len, sum); - - return sum; - } - int icmp_checksum(const struct icmp* icmpp, int len) { uint32 sum; diff --git a/src/net_util.h b/src/net_util.h index 8787340328..a10b283ca1 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -60,12 +60,14 @@ inline int seq_delta(uint32 a, uint32 b) // Returns the ones-complement checksum of a chunk of b short-aligned bytes. extern int ones_complement_checksum(const void* p, int b, uint32 sum); -extern int ones_complement_checksum(const IPAddr& a, uint32 sum); -extern int tcp_checksum(const struct ip* ip, const struct tcphdr* tp, int len); -extern int udp_checksum(const struct ip* ip, const struct udphdr* up, int len); -extern int udp6_checksum(const struct ip6_hdr* ip, const struct udphdr* up, - int len); +inline int ones_complement_checksum(const IPAddr& a, uint32 sum) + { + const uint32* bytes; + int len = a.GetBytes(&bytes); + return ones_complement_checksum(bytes, len*4, sum); + } + extern int icmp_checksum(const struct icmp* icmpp, int len); // Returns 'A', 'B', 'C' or 'D' diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out new file mode 100644 index 0000000000..cd3c799277 --- /dev/null +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -0,0 +1,9 @@ +1332784981.078396 weird: bad_IP_checksum +1332784885.686428 weird: bad_TCP_checksum +1332784933.501023 weird: bad_UDP_checksum +1332785210.013051 weird: routing0_hdr +1332785210.013051 weird: bad_TCP_checksum +1332782580.798420 weird: routing0_hdr +1332782580.798420 weird: bad_UDP_checksum +1332785250.469132 weird: bad_TCP_checksum +1332781342.923813 weird: bad_UDP_checksum diff --git a/testing/btest/Baseline/core.checksums/good.out b/testing/btest/Baseline/core.checksums/good.out new file mode 100644 index 0000000000..627a330928 --- /dev/null +++ b/testing/btest/Baseline/core.checksums/good.out @@ -0,0 +1,2 @@ +1332785125.596793 weird: routing0_hdr +1332782508.592037 weird: routing0_hdr diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index a5a0caf7c6..c6ebddc7e1 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1 +1,3 @@ +weird routing0_hdr from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:78:1:32::2 +[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=53/udp, resp_h=2001:78:1:32::2, resp_p=53/udp] [ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/Traces/chksums/ip4-bad-chksum.pcap b/testing/btest/Traces/chksums/ip4-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..6d8b9dd27daeda798a5fa7c56f4b419f10b83ab1 GIT binary patch literal 86 zcmca|c+)~A1{MYwaA0F#U<7hP*BAKP7%?*F0ofq@9}FO>I2c?R7!*Lt90VB{>wy|T QxYWSt1Ov}Xtq33h0Oq<8UH||9 literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip4-tcp-bad-chksum.pcap b/testing/btest/Traces/chksums/ip4-tcp-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..b9ccd9e6b26cecfe37ce796a8503940e338096dc GIT binary patch literal 94 zcmca|c+)~A1{MYwaA0F#U<7i$t}F15spMiX1F}K*KNvt%aWJ?tFld03Ik457tp{oV S;Zg&J0Ej#TLjaQk*cbr7LlH~> literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip4-tcp-good-chksum.pcap b/testing/btest/Traces/chksums/ip4-tcp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..ff3f01188473b75e4c4521c9d4944e54ebff953c GIT binary patch literal 94 zcmca|c+)~A1{MYwaA0F#U<7g|tt;@i;N)g71F}K*KNvt%aWJ?tFld03Ik457tp{oV V;Zg&J0Ej#TLjaQkgG>Sg0|0?W5YYet literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip4-udp-bad-chksum.pcap b/testing/btest/Traces/chksums/ip4-udp-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..f3998c7e1c5a45f744b76e80226f7481916ef424 GIT binary patch literal 86 zcmca|c+)~A1{MYwaA0F#U<7hh*BAK9FJ))Y1F}K*KNvt%aWJ?tFereOISAIAst0NS Q;Zg&m6AU~Ij1fQp00ra`{Qv*} literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip4-udp-good-chksum.pcap b/testing/btest/Traces/chksums/ip4-udp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..3aec5073295329666899654a9ff79d613d77eb5b GIT binary patch literal 86 zcmca|c+)~A1{MYwaA0F#U<7gm*BAIRP3K_H1F}K*KNvt%aWJ?tFereOISAIAst0NS Q;Zg&m6AU~nwIYB30Q^W2kpKVy literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-route0-tcp-bad-chksum.pcap b/testing/btest/Traces/chksums/ip6-route0-tcp-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..0f5711fe2ed6e8b3620518a98e145e909bc3d811 GIT binary patch literal 154 zcmca|c+)~A1{MYwaA0F#U<7ikHWc{(Hez5X0JIQ6c|~405!8S zJ!t&@FX;b&RQ{}NHWmgZkWvLkh6UVqgFOQoA42 literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-route0-tcp-good-chksum.pcap b/testing/btest/Traces/chksums/ip6-route0-tcp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..18f9a366c6b72243ce5cb98e634e25975055b4b6 GIT binary patch literal 154 zcmca|c+)~A1{MYwaA0F#U<7iWt}pPnl;&h80JIQ6c|~405!8S zJ!t&@FX;b&RQ{}NHWmgZkWvLkh683QjX-Q5U_|3Hl^PhGVBpyk5&;ANrB5PK literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-route0-udp-good-chksum.pcap b/testing/btest/Traces/chksums/ip6-route0-udp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..deb1310107c9999ae5c90d11fe4b9ff42c0d68dc GIT binary patch literal 146 zcmca|c+)~A1{MYwaA0F#U<7j3EG_U~%E8Hy1!RNpe=vZkYP*{NVwh+83QjX-Q5U_|3Hl^PhGVBopu5dj1MTPq>R literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-tcp-bad-chksum.pcap b/testing/btest/Traces/chksums/ip6-tcp-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..38d8abf18fc0fa1c61048d80819f854c543f3c98 GIT binary patch literal 114 zcmca|c+)~A1{MYwaA0F#U<7iKHWc{xXs|PQ0ofq@9}FO>+U_QR7$R&A3XCj2fSTEv d9yI>{7xe!>Dt}gXsR2U(L=OW)0FwgPXaH!=9F710 literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-tcp-good-chksum.pcap b/testing/btest/Traces/chksums/ip6-tcp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..9ab19b0ad86b11cbf3e10406df3c14772959324e GIT binary patch literal 114 zcmca|c+)~A1{MYwaA0F#U<7gsHx&5Kw%}s$0Ed8;06B7gt@!gL?h literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/ext_hdr_hbh_routing.trace b/testing/btest/Traces/ipv6-hbh-routing0.trace similarity index 100% rename from testing/btest/Traces/ext_hdr_hbh_routing.trace rename to testing/btest/Traces/ipv6-hbh-routing0.trace diff --git a/testing/btest/bifs/routing0_data_to_addrs.test b/testing/btest/bifs/routing0_data_to_addrs.test index 4bf15cae87..a20bb3bf59 100644 --- a/testing/btest/bifs/routing0_data_to_addrs.test +++ b/testing/btest/bifs/routing0_data_to_addrs.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output +# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT >output # @TEST-EXEC: btest-diff output event ipv6_ext_headers(c: connection, p: pkt_hdr) diff --git a/testing/btest/core/checksums.test b/testing/btest/core/checksums.test new file mode 100644 index 0000000000..c01ab710af --- /dev/null +++ b/testing/btest/core/checksums.test @@ -0,0 +1,15 @@ +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: btest-diff bad.out +# @TEST-EXEC: btest-diff good.out diff --git a/testing/btest/core/ipv6_ext_headers.test b/testing/btest/core/ipv6_ext_headers.test index 170a67bc72..32a0f5d558 100644 --- a/testing/btest/core/ipv6_ext_headers.test +++ b/testing/btest/core/ipv6_ext_headers.test @@ -1,10 +1,22 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output +# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the event is raised correctly for a packet containing # extension headers. - event ipv6_ext_headers(c: connection, p: pkt_hdr) { print p; } + +# Also check the weird for routing type 0 extensions headers +event flow_weird(name: string, src: addr, dst: addr) + { + print fmt("weird %s from %s to %s", name, src, dst); + } + +# And the connection for routing type 0 packets with non-zero segments left +# should use the last address in that extension header. +event new_connection(c: connection) + { + print c$id; + } From 169b3c833fc66fde49fbd653c87fbf56d951b846 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 27 Mar 2012 17:55:39 -0500 Subject: [PATCH 176/651] Add more data to icmp events --- src/ICMP.cc | 101 ++++++++++++++++++++++++++++++++++++++++++-------- src/ICMP.h | 8 ++++ src/event.bif | 8 ++-- 3 files changed, 98 insertions(+), 19 deletions(-) diff --git a/src/ICMP.cc b/src/ICMP.cc index 4df9cc049e..a5cfdbcb64 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -149,12 +149,20 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c break; // Router related messages. - case ND_NEIGHBOR_SOLICIT: - case ND_NEIGHBOR_ADVERT: case ND_REDIRECT: + Redirect(t, icmpp, len, caplen, data, ip_hdr); + break; + case ND_ROUTER_ADVERT: + RouterAdvert(t, icmpp, len, caplen, data, ip_hdr); + break; + case ND_NEIGHBOR_ADVERT: + NeighborAdvert(t, icmpp, len, caplen, data, ip_hdr); + break; + case ND_NEIGHBOR_SOLICIT: + NeighborSolicit(t, icmpp, len, caplen, data, ip_hdr); + break; case ND_ROUTER_SOLICIT: case ICMP6_ROUTER_RENUMBERING: - case ND_ROUTER_ADVERT: Router(t, icmpp, len, caplen, data, ip_hdr); break; @@ -489,6 +497,81 @@ void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, } +void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + { + EventHandlerPtr f = icmp_router_advertisement; + uint32 reachable, retrans; + + memcpy(&reachable, data, sizeof(reachable)); + memcpy(&retrans, data + sizeof(reachable), sizeof(retrans)); + + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(new Val(icmpp->icmp_num_addrs, TYPE_COUNT)); + vl->append(new Val(icmpp->icmp_wpa & 0x80, TYPE_BOOL)); + vl->append(new Val(htons(icmpp->icmp_lifetime), TYPE_COUNT)); + vl->append(new Val(reachable, TYPE_INTERVAL)); + vl->append(new Val(retrans, TYPE_INTERVAL)); + + ConnectionEvent(f, vl); + } + + +void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + { + EventHandlerPtr f = icmp_neighbor_advertisement; + in6_addr tgtaddr; + + memcpy(&tgtaddr.s6_addr, data, sizeof(tgtaddr.s6_addr)); + + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(new AddrVal(IPAddr(tgtaddr))); + + ConnectionEvent(f, vl); + } + + +void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + { + EventHandlerPtr f = icmp_neighbor_solicitation; + in6_addr tgtaddr; + + memcpy(&tgtaddr.s6_addr, data, sizeof(tgtaddr.s6_addr)); + + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(new AddrVal(IPAddr(tgtaddr))); + + ConnectionEvent(f, vl); + } + + +void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + { + EventHandlerPtr f = icmp_redirect; + in6_addr tgtaddr, dstaddr; + + memcpy(&tgtaddr.s6_addr, data, sizeof(tgtaddr.s6_addr)); + memcpy(&dstaddr.s6_addr, data + sizeof(tgtaddr.s6_addr), sizeof(dstaddr.s6_addr)); + + val_list* vl = new val_list; + vl->append(BuildConnVal()); + vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(new AddrVal(IPAddr(tgtaddr))); + vl->append(new AddrVal(IPAddr(dstaddr))); + + ConnectionEvent(f, vl); + } + + void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) { @@ -496,21 +579,9 @@ void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, switch ( icmpp->icmp_type ) { - case ND_NEIGHBOR_ADVERT: - f = icmp_neighbor_advertisement; - break; - case ND_NEIGHBOR_SOLICIT: - f = icmp_neighbor_solicitation; - break; - case ND_ROUTER_ADVERT: - f = icmp_router_advertisement; - break; case ND_ROUTER_SOLICIT: f = icmp_router_solicitation; break; - case ND_REDIRECT: - f = icmp_redirect; - break; case ICMP6_ROUTER_RENUMBERING: default: ICMPEvent(icmp_sent, icmpp, len, 1); diff --git a/src/ICMP.h b/src/ICMP.h index 427c183612..59a399f74f 100644 --- a/src/ICMP.h +++ b/src/ICMP.h @@ -39,6 +39,14 @@ protected: int caplen, const u_char*& data, const IP_Hdr* ip_hdr); void Context(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + void Redirect(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + void RouterAdvert(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + void NeighborAdvert(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + void NeighborSolicit(double t, const struct icmp* icmpp, int len, + int caplen, const u_char*& data, const IP_Hdr* ip_hdr); void Router(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); diff --git a/src/event.bif b/src/event.bif index 7d0d4b2ef5..8e7b0be8a8 100644 --- a/src/event.bif +++ b/src/event.bif @@ -955,7 +955,7 @@ event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); ## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent ## icmp_time_exceeded icmp_unreachable -event icmp_router_advertisement%(c: connection, icmp: icmp_conn%); +event icmp_router_advertisement%(c: connection, icmp: icmp_conn, hop_limit: count, managed: bool, router_lifetime: count, reachable_time: interval, retrans_timer: interval%); ## Generated for ICMP *neighbor solicitation* messages. ## @@ -970,7 +970,7 @@ event icmp_router_advertisement%(c: connection, icmp: icmp_conn%); ## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent ## icmp_time_exceeded icmp_unreachable -event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn%); +event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); ## Generated for ICMP *neighbor advertisement* messages. ## @@ -985,7 +985,7 @@ event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn%); ## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent ## icmp_time_exceeded icmp_unreachable -event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn%); +event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, tgt:addr%); ## Generated for ICMP *redirect* messages. ## @@ -1002,7 +1002,7 @@ event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn%); ## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent ## icmp_time_exceeded icmp_unreachable -event icmp_redirect%(c: connection, icmp: icmp_conn, a: addr%); +event icmp_redirect%(c: connection, icmp: icmp_conn, tgt: addr, dest: addr%); ## Generated when a TCP connection terminated, passing on statistics about the ## two endpoints. This event is always generated when Bro flushes the internal From 256cd592a7d4c0bdbf43c3f2e9c4e1cdb0fe995a Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 28 Mar 2012 13:49:28 -0500 Subject: [PATCH 177/651] Improve handling of IPv6 Routing Type 0 headers. - For RH0 headers with non-zero segments left, a "routing0_segleft" flow_weird event is raised (with a destination indicating the last address in the routing header), and an "rh0_segleft" event can also be handled if the other contents of the packet header are of interest. No further analysis is done as the complexity required to correctly identify destination endpoints of connections doesn't seem worth it as RH0 has been deprecated by RFC 5095. - For RH0 headers without any segments left, a "routing0_header" flow_weird event is raised, but further analysis still occurs as normal. --- src/IP.cc | 18 ++++++++++++++ src/IP.h | 21 +++++++++++++++++ src/Sessions.cc | 16 +++++++++++++ src/event.bif | 8 +++++++ .../Baseline/core.ipv6_ext_headers/output | 2 +- .../btest/Baseline/core.ipv6_rh0/segleft.out | 2 ++ .../btest/Baseline/core.ipv6_rh0/segleft0.out | 2 ++ ...uting.trace => ipv6-hbh-rh0-segleft.trace} | Bin .../btest/Traces/ipv6-hbh-rh0-segleft0.trace | Bin 0 -> 162 bytes .../btest/bifs/routing0_data_to_addrs.test | 4 ++-- testing/btest/core/ipv6_ext_headers.test | 2 +- testing/btest/core/ipv6_rh0.test | 22 ++++++++++++++++++ 12 files changed, 93 insertions(+), 4 deletions(-) create mode 100644 testing/btest/Baseline/core.ipv6_rh0/segleft.out create mode 100644 testing/btest/Baseline/core.ipv6_rh0/segleft0.out rename testing/btest/Traces/{ext_hdr_hbh_routing.trace => ipv6-hbh-rh0-segleft.trace} (100%) create mode 100644 testing/btest/Traces/ipv6-hbh-rh0-segleft0.trace create mode 100644 testing/btest/core/ipv6_rh0.test diff --git a/src/IP.cc b/src/IP.cc index 4148c58a33..f82b7a0fd7 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -305,6 +305,24 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) chain.push_back(p); + // RFC 5095 deprecates routing type 0 headers, so raise weirds for that + if ( current_type == IPPROTO_ROUTING && + ((const struct ip6_rthdr*)hdrs)->ip6r_type == 0 ) + { + IPAddr src(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_src); + + if ( ((const struct ip6_rthdr*)hdrs)->ip6r_segleft > 0 ) + { + const in6_addr* a = (const in6_addr*)(hdrs+len-16); + reporter->Weird(src, *a, "routing0_segleft"); + } + else + { + IPAddr dst(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_dst); + reporter->Weird(src, dst, "routing0_header"); + } + } + hdrs += len; length += len; } while ( current_type != IPPROTO_FRAGMENT && diff --git a/src/IP.h b/src/IP.h index cb5bcf77c7..daa508db7f 100644 --- a/src/IP.h +++ b/src/IP.h @@ -171,6 +171,20 @@ public: { return IsFragment() ? (ntohs(GetFragHdr()->ip6f_offlg) & 0x0001) != 0 : 0; } + /** + * Returns whether the chain contains a routing type 0 extension header + * with nonzero segments left. + */ + bool RH0SegLeft() const + { + for ( size_t i = 0; i < chain.size(); ++i ) + if ( chain[i]->Type() == IPPROTO_ROUTING && + ((const struct ip6_rthdr*)chain[i]->Data())->ip6r_type == 0 && + ((const struct ip6_rthdr*)chain[i]->Data())->ip6r_segleft > 0 ) + return true; + return false; + } + /** * Returns a vector of ip6_ext_hdr RecordVals that includes script-layer * representation of all extension headers in the chain. @@ -343,6 +357,13 @@ public: size_t NumHeaders() const { return ip4 ? 1 : ip6_hdrs->Size(); } + /** + * Returns true if this is an IPv6 header containing a routing type 0 + * extension with nonzero segments left, else returns false. + */ + bool RH0SegLeft() const + { return ip4 ? false : ip6_hdrs->RH0SegLeft(); } + /** * Returns an ip_hdr or ip6_hdr_chain RecordVal. */ diff --git a/src/Sessions.cc b/src/Sessions.cc index 84b57bdc62..b5bb485d72 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -481,6 +481,22 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } + // Stop analyzing IPv6 packets that use routing type 0 headers with segments + // left since RH0 headers are deprecated by RFC 5095 and we'd have to make + // extra effort to get the destination in the connection/flow endpoint right + if ( ip_hdr->RH0SegLeft() ) + { + dump_this_packet = 1; + if ( rh0_segleft ) + { + val_list* vl = new val_list(); + vl->append(ip_hdr->BuildPktHdrVal()); + mgr.QueueEvent(rh0_segleft, vl); + } + Remove(f); + return; + } + int proto = ip_hdr->NextProto(); if ( CheckHeaderTrunc(proto, len, caplen, hdr, pkt) ) diff --git a/src/event.bif b/src/event.bif index 113c003e37..20714c0931 100644 --- a/src/event.bif +++ b/src/event.bif @@ -478,6 +478,14 @@ event ipv6_ext_headers%(c: connection, p: pkt_hdr%); ## .. bro:see:: new_packet tcp_packet ipv6_ext_headers event esp_packet%(p: pkt_hdr%); +## Generated for any packets using an IPv6 Routing Type 0 extension header +## with non-zero segments left. +## +## p: Information from the header of the packet that triggered the event. +## +## .. bro:see:: new_packet tcp_packet ipv6_ext_headers +event rh0_segleft%(p: pkt_hdr%); + ## Generated for every packet that has non-empty transport-layer payload. This is a ## very low-level and expensive event that should be avoided when at all possible. ## It's usually infeasible to handle when processing even medium volumes of diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index a5a0caf7c6..58332ca900 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1 +1 @@ -[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] +[ip=, ip6=[class=0, flow=0, len=68, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=], [id=43, hopopts=, dstopts=, routing=[nxt=6, len=4, rtype=0, segleft=0, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=]]], tcp=[sport=30000/tcp, dport=80/tcp, seq=0, ack=0, hl=20, dl=0, flags=2, win=8192], udp=, icmp=] diff --git a/testing/btest/Baseline/core.ipv6_rh0/segleft.out b/testing/btest/Baseline/core.ipv6_rh0/segleft.out new file mode 100644 index 0000000000..3c722ee3b4 --- /dev/null +++ b/testing/btest/Baseline/core.ipv6_rh0/segleft.out @@ -0,0 +1,2 @@ +flow_weird routing0_segleft from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:78:1:32::2 +rh0 w/ segments left from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:4f8:4:7:2e0:81ff:fe52:9a6b diff --git a/testing/btest/Baseline/core.ipv6_rh0/segleft0.out b/testing/btest/Baseline/core.ipv6_rh0/segleft0.out new file mode 100644 index 0000000000..ae57c7cc8d --- /dev/null +++ b/testing/btest/Baseline/core.ipv6_rh0/segleft0.out @@ -0,0 +1,2 @@ +flow_weird routing0_header from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:4f8:4:7:2e0:81ff:fe52:9a6b +new_connection: [orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=30000/tcp, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=80/tcp] diff --git a/testing/btest/Traces/ext_hdr_hbh_routing.trace b/testing/btest/Traces/ipv6-hbh-rh0-segleft.trace similarity index 100% rename from testing/btest/Traces/ext_hdr_hbh_routing.trace rename to testing/btest/Traces/ipv6-hbh-rh0-segleft.trace diff --git a/testing/btest/Traces/ipv6-hbh-rh0-segleft0.trace b/testing/btest/Traces/ipv6-hbh-rh0-segleft0.trace new file mode 100644 index 0000000000000000000000000000000000000000..35f5b3afe633cc81fc2444b10fa278a29d81783f GIT binary patch literal 162 zcmca|c+)~A1{MYwaA0F#U<7gw`4#(HHgGdk0ofq@9}FO>+U_QR7%mJB3XCj2fSTEv z9yI>{7xe!>Dt}hCHUlHrXf~(?3XBXDK;w-d<}fg#@tH~u7y_Vj3;|3E4EkLR3;-CU BAFlua literal 0 HcmV?d00001 diff --git a/testing/btest/bifs/routing0_data_to_addrs.test b/testing/btest/bifs/routing0_data_to_addrs.test index 4bf15cae87..de10dd80e0 100644 --- a/testing/btest/bifs/routing0_data_to_addrs.test +++ b/testing/btest/bifs/routing0_data_to_addrs.test @@ -1,7 +1,7 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output +# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft.trace %INPUT >output # @TEST-EXEC: btest-diff output -event ipv6_ext_headers(c: connection, p: pkt_hdr) +event rh0_segleft(p: pkt_hdr) { for ( h in p$ip6$exts ) if ( p$ip6$exts[h]$id == IPPROTO_ROUTING ) diff --git a/testing/btest/core/ipv6_ext_headers.test b/testing/btest/core/ipv6_ext_headers.test index 170a67bc72..0cf3f2f3fb 100644 --- a/testing/btest/core/ipv6_ext_headers.test +++ b/testing/btest/core/ipv6_ext_headers.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output +# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft0.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the event is raised correctly for a packet containing diff --git a/testing/btest/core/ipv6_rh0.test b/testing/btest/core/ipv6_rh0.test new file mode 100644 index 0000000000..18c23ed3b7 --- /dev/null +++ b/testing/btest/core/ipv6_rh0.test @@ -0,0 +1,22 @@ +# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft0.trace %INPUT >segleft0.out +# @TEST-EXEC: btest-diff segleft0.out +# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft.trace %INPUT >segleft.out +# @TEST-EXEC: btest-diff segleft.out + +# This will be raised only by the packet with RH0 and segments left. +event rh0_segleft(p: pkt_hdr) + { + print fmt("rh0 w/ segments left from %s to %s", p$ip6$src, p$ip6$dst); + } + +# This will be raised only by the packet with RH0 and no segments left. +event new_connection(c: connection) + { + print fmt("new_connection: %s", c$id); + } + +# This will be raised by any packet with RH0 regardless of segments left. +event flow_weird(name: string, src: addr, dst: addr) + { + print fmt("flow_weird %s from %s to %s", name, src, dst); + } From 8a1d71dc0864d33aff81d9ab5e6f5b4265ed7d21 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 28 Mar 2012 14:14:20 -0500 Subject: [PATCH 178/651] Remove dead tcp_checksum function from net_util --- src/net_util.cc | 27 --------------------------- src/net_util.h | 1 - 2 files changed, 28 deletions(-) diff --git a/src/net_util.cc b/src/net_util.cc index 1a4e9f1a7f..9e023a5fc1 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -38,33 +38,6 @@ int ones_complement_checksum(const IPAddr& a, uint32 sum) return ones_complement_checksum(bytes, len*4, sum); } -int tcp_checksum(const struct ip* ip, const struct tcphdr* tp, int len) - { - // ### Note, this is only correct for IPv4. This routine is only - // used by the connection compressor (which we turn off for IPv6 - // traffic). - - int tcp_len = tp->th_off * 4 + len; - uint32 sum; - - if ( len % 2 == 1 ) - // Add in pad byte. - sum = htons(((const u_char*) tp)[tcp_len - 1] << 8); - else - sum = 0; - - sum = ones_complement_checksum((void*) &ip->ip_src.s_addr, 4, sum); - sum = ones_complement_checksum((void*) &ip->ip_dst.s_addr, 4, sum); - - uint32 addl_pseudo = - (htons(IPPROTO_TCP) << 16) | htons((unsigned short) tcp_len); - - sum = ones_complement_checksum((void*) &addl_pseudo, 4, sum); - sum = ones_complement_checksum((void*) tp, tcp_len, sum); - - return sum; - } - int udp_checksum(const struct ip* ip, const struct udphdr* up, int len) { uint32 sum; diff --git a/src/net_util.h b/src/net_util.h index 8787340328..5e39a11714 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -62,7 +62,6 @@ inline int seq_delta(uint32 a, uint32 b) extern int ones_complement_checksum(const void* p, int b, uint32 sum); extern int ones_complement_checksum(const IPAddr& a, uint32 sum); -extern int tcp_checksum(const struct ip* ip, const struct tcphdr* tp, int len); extern int udp_checksum(const struct ip* ip, const struct udphdr* up, int len); extern int udp6_checksum(const struct ip6_hdr* ip, const struct udphdr* up, int len); From 42066cc1fd35ca7b63daeaf588271ec3c040385e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 28 Mar 2012 14:53:59 -0700 Subject: [PATCH 179/651] Teaching cmake to always link in tcmalloc if it finds it. Also renaming --enable-perftools to --enable-perftool-debug to indicate that the switch is only relevant for debugging the heap. It's not needed to pick up tcmalloc for better performance. --with-perftools can still (and always) be used to give a hint where to find the libraries. With the threading, using tcmalloc improves memory usage on FreeBSD significantly when running on a trace. If it fixes the live problems, remains to be seen ... --- CMakeLists.txt | 20 +++++++++++++++----- cmake | 2 +- configure | 9 ++++----- src/DPM.cc | 2 +- src/File.cc | 4 ++-- src/ID.cc | 2 +- src/Login.cc | 2 +- src/PersistenceSerializer.cc | 2 +- src/RemoteSerializer.cc | 4 ++-- src/RuleMatcher.cc | 2 +- src/StateAccess.cc | 2 +- src/main.cc | 18 +++++++++--------- src/util.h | 2 +- 13 files changed, 40 insertions(+), 31 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index de3138c20c..febc2d6ec1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -89,11 +89,20 @@ if (LIBGEOIP_FOUND) endif () set(USE_PERFTOOLS false) -if (ENABLE_PERFTOOLS) - find_package(GooglePerftools) - if (GOOGLEPERFTOOLS_FOUND) - set(USE_PERFTOOLS true) - include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR}) +set(USE_PERFTOOLS_DEBUG false) + +find_package(GooglePerftools) + +if (GOOGLEPERFTOOLS_FOUND) + include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR}) + set(USE_PERFTOOLS true) + + if (ENABLE_PERFTOOLS_DEBUG) + # Enable heap debugging with perftools. + set(USE_PERFTOOLS_DEBUG true) + list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES_DEBUG}) + else () + # Link in tcmalloc for better performance. list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES}) endif () endif () @@ -183,6 +192,7 @@ message( "\n" "\nGeoIP: ${USE_GEOIP}" "\nGoogle perftools: ${USE_PERFTOOLS}" + "\n debugging: ${USE_PERFTOOLS_DEBUG}" "\n" "\n================================================================\n" ) diff --git a/cmake b/cmake index 2cc1055770..4b573ed849 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 2cc105577044a2d214124568f3f2496ed2ccbb34 +Subproject commit 4b573ed849f131ebb8e34fa24786d56f9805e444 diff --git a/configure b/configure index 43afb4ae99..05aa12815b 100755 --- a/configure +++ b/configure @@ -27,7 +27,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... Optional Features: --enable-debug compile in debugging mode - --enable-perftools use Google's perftools + --enable-perftools-debug use Google's perftools for debugging --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl --disable-auxtools don't build or install auxilliary tools @@ -91,7 +91,7 @@ append_cache_entry BRO_ROOT_DIR PATH /usr/local/bro append_cache_entry PY_MOD_INSTALL_DIR PATH /usr/local/bro/lib/broctl append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING /usr/local/bro/share/bro append_cache_entry ENABLE_DEBUG BOOL false -append_cache_entry ENABLE_PERFTOOLS BOOL false +append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true @@ -132,8 +132,8 @@ while [ $# -ne 0 ]; do --enable-debug) append_cache_entry ENABLE_DEBUG BOOL true ;; - --enable-perftools) - append_cache_entry ENABLE_PERFTOOLS BOOL true + --enable-perftools-debug) + append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true ;; --disable-broccoli) append_cache_entry INSTALL_BROCCOLI BOOL false @@ -178,7 +178,6 @@ while [ $# -ne 0 ]; do append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg ;; --with-perftools=*) - append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg ;; --with-python=*) diff --git a/src/DPM.cc b/src/DPM.cc index 595ee42ec8..0902ae9a45 100644 --- a/src/DPM.cc +++ b/src/DPM.cc @@ -74,7 +74,7 @@ void DPM::PostScriptInit() void DPM::AddConfig(const Analyzer::Config& cfg) { -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG HeapLeakChecker::Disabler disabler; #endif diff --git a/src/File.cc b/src/File.cc index 080923ad37..d4e31bcc16 100644 --- a/src/File.cc +++ b/src/File.cc @@ -232,7 +232,7 @@ BroFile::~BroFile() delete [] access; delete [] cipher_buffer; -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG heap_checker->UnIgnoreObject(this); #endif } @@ -255,7 +255,7 @@ void BroFile::Init() cipher_ctx = 0; cipher_buffer = 0; -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG heap_checker->IgnoreObject(this); #endif } diff --git a/src/ID.cc b/src/ID.cc index 3f5c76ca1d..a70aa3fd0e 100644 --- a/src/ID.cc +++ b/src/ID.cc @@ -372,7 +372,7 @@ ID* ID::Unserialize(UnserialInfo* info) Ref(id); global_scope()->Insert(id->Name(), id); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG heap_checker->IgnoreObject(id); #endif } diff --git a/src/Login.cc b/src/Login.cc index 56efd12f53..e626fb3a0a 100644 --- a/src/Login.cc +++ b/src/Login.cc @@ -38,7 +38,7 @@ Login_Analyzer::Login_Analyzer(AnalyzerTag::Tag tag, Connection* conn) if ( ! re_skip_authentication ) { -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG HeapLeakChecker::Disabler disabler; #endif re_skip_authentication = init_RE(skip_authentication); diff --git a/src/PersistenceSerializer.cc b/src/PersistenceSerializer.cc index c757467f90..d9baad05bb 100644 --- a/src/PersistenceSerializer.cc +++ b/src/PersistenceSerializer.cc @@ -137,7 +137,7 @@ bool PersistenceSerializer::CheckForFile(UnserialInfo* info, const char* file, bool PersistenceSerializer::ReadAll(bool is_init, bool delete_files) { -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG HeapLeakChecker::Disabler disabler; #endif diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index f29e907790..e9fbe0aab8 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -2645,7 +2645,7 @@ bool RemoteSerializer::ProcessLogCreateWriter() if ( current_peer->state == Peer::CLOSING ) return false; -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG // Don't track allocations here, they'll be released only after the // main loop exists. And it's just a tiny amount anyway. HeapLeakChecker::Disabler disabler; @@ -2866,7 +2866,7 @@ void RemoteSerializer::GotID(ID* id, Val* val) (desc && *desc) ? desc : "not set"), current_peer); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG // May still be cached, but we don't care. heap_checker->IgnoreObject(id); #endif diff --git a/src/RuleMatcher.cc b/src/RuleMatcher.cc index da12b1b679..c9cf1f5c11 100644 --- a/src/RuleMatcher.cc +++ b/src/RuleMatcher.cc @@ -191,7 +191,7 @@ void RuleMatcher::Delete(RuleHdrTest* node) bool RuleMatcher::ReadFiles(const name_list& files) { -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG HeapLeakChecker::Disabler disabler; #endif diff --git a/src/StateAccess.cc b/src/StateAccess.cc index 7abef72c46..2d0a8dfc5a 100644 --- a/src/StateAccess.cc +++ b/src/StateAccess.cc @@ -678,7 +678,7 @@ bool StateAccess::DoUnserialize(UnserialInfo* info) target.id = new ID(name, SCOPE_GLOBAL, true); Ref(target.id); global_scope()->Insert(name, target.id); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG heap_checker->IgnoreObject(target.id); #endif } diff --git a/src/main.cc b/src/main.cc index e484b58fe2..17a798ea81 100644 --- a/src/main.cc +++ b/src/main.cc @@ -65,7 +65,7 @@ extern "C" { #include "setsignal.h" }; -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG HeapLeakChecker* heap_checker = 0; int perftools_leaks = 0; int perftools_profile = 0; @@ -177,7 +177,7 @@ void usage() fprintf(stderr, " -W|--watchdog | activate watchdog timer\n"); fprintf(stderr, " -Z|--doc-scripts | generate documentation for all loaded scripts\n"); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG fprintf(stderr, " -m|--mem-leaks | show leaks [perftools]\n"); fprintf(stderr, " -M|--mem-profile | record heap [perftools]\n"); #endif @@ -244,7 +244,7 @@ void done_with_network() net_finish(1); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG if ( perftools_profile ) { @@ -424,7 +424,7 @@ int main(int argc, char** argv) #ifdef USE_IDMEF {"idmef-dtd", required_argument, 0, 'n'}, #endif -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG {"mem-leaks", no_argument, 0, 'm'}, {"mem-profile", no_argument, 0, 'M'}, #endif @@ -466,7 +466,7 @@ int main(int argc, char** argv) safe_strncpy(opts, "B:D:e:f:I:i:K:l:n:p:R:r:s:T:t:U:w:x:X:y:Y:z:CFGLOPSWbdghvZ", sizeof(opts)); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG strncat(opts, "mM", 2); #endif @@ -622,7 +622,7 @@ int main(int argc, char** argv) exit(0); break; -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG case 'm': perftools_leaks = 1; break; @@ -758,14 +758,14 @@ int main(int argc, char** argv) // nevertheless reported; see perftools docs), thus // we suppress some messages here. -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG { HeapLeakChecker::Disabler disabler; #endif yyparse(); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG } #endif @@ -1019,7 +1019,7 @@ int main(int argc, char** argv) if ( profiling_logger ) profiling_logger->Log(); -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG if ( perftools_leaks ) heap_checker = new HeapLeakChecker("net_run"); diff --git a/src/util.h b/src/util.h index 498bdf00e4..a4e3aa71b8 100644 --- a/src/util.h +++ b/src/util.h @@ -37,7 +37,7 @@ #endif -#ifdef USE_PERFTOOLS +#ifdef USE_PERFTOOLS_DEBUG #include #include extern HeapLeakChecker* heap_checker; From c382439079755f1ca613881b1699ba77bfe9f246 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 28 Mar 2012 15:39:56 -0700 Subject: [PATCH 180/651] Switching log buffer size back to normal --- src/RemoteSerializer.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index e9fbe0aab8..017f260bdf 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -234,7 +234,7 @@ static const int PRINT_BUFFER_SIZE = 10 * 1024; static const int SOCKBUF_SIZE = 1024 * 1024; // Buffer size for remote-log data. -static const int LOG_BUFFER_SIZE = 512; +static const int LOG_BUFFER_SIZE = 50 * 1024; struct ping_args { uint32 seq; From 76af3cf825037353b2a8ae09e59ab0cf7333128f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 28 Mar 2012 15:52:13 -0700 Subject: [PATCH 181/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- aux/btest | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/broctl b/aux/broctl index 66e9e87bee..c86b7e990b 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 66e9e87beebce983fa0f479b0284d5690b0290d4 +Subproject commit c86b7e990b4d39cd48c0cb692077aa081b418149 diff --git a/aux/btest b/aux/btest index dc78a3ebf5..120c978a12 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit dc78a3ebf5cd8fbd1b3034990e36fa21a51d1a19 +Subproject commit 120c978a1236db4f48c6f38a3a99199d85bb904e From 97652bc144df90b63f3ab075e4e3b4e7932ab3ac Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 28 Mar 2012 16:15:52 -0700 Subject: [PATCH 182/651] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 120c978a12..c8e8fe477b 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 120c978a1236db4f48c6f38a3a99199d85bb904e +Subproject commit c8e8fe477b5dec635e5ce00f3f764fad069c549c From 6a60f484f9faa3925cfc297e38f1f06561f41607 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 29 Mar 2012 09:03:33 -0700 Subject: [PATCH 183/651] make heart beat interval for threading configureable from scripting layer --- scripts/base/init-bare.bro | 8 ++++++++ src/const.bif | 1 + src/threading/Manager.cc | 6 +++++- src/threading/Manager.h | 2 +- 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 9f4e0355f0..77f90cae5f 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1484,6 +1484,14 @@ export { }; } # end export +module Threading; + +export { + ## The heart beat interval used by the threading framework. + ## Changing this should usually not be neccessary and will break several tests. + const heart_beat_interval = 1.0 &redef; +} + module GLOBAL; ## An NTP message. diff --git a/src/const.bif b/src/const.bif index bc960caeb6..aadb1e1ab7 100644 --- a/src/const.bif +++ b/src/const.bif @@ -12,3 +12,4 @@ const NFS3::return_data: bool; const NFS3::return_data_max: count; const NFS3::return_data_first_only: bool; +const Threading::heart_beat_interval: double; diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index d008d2e5e8..8546ca3948 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -1,5 +1,6 @@ #include "Manager.h" +#include "NetVar.h" using namespace threading; @@ -11,6 +12,9 @@ Manager::Manager() next_beat = 0; terminating = false; idle = false; + + heart_beat_interval = double(BifConst::Threading::heart_beat_interval); + DBG_LOG(DBG_THREADING, "Heart beat interval set to %f", heart_beat_interval); } Manager::~Manager() @@ -73,7 +77,7 @@ void Manager::GetFds(int* read, int* write, int* except) double Manager::NextTimestamp(double* network_time) { if ( ::network_time && ! next_beat ) - next_beat = ::network_time + HEART_BEAT_INTERVAL; + next_beat = ::network_time + heart_beat_interval; // fprintf(stderr, "N %.6f %.6f did_process=%d next_next=%.6f\n", ::network_time, timer_mgr->Time(), (int)did_process, next_beat); diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 7d9ba766d4..29729f6a7a 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -120,7 +120,7 @@ protected: virtual const char* Tag() { return "threading::Manager"; } private: - static const int HEART_BEAT_INTERVAL = 1; + int heart_beat_interval; typedef std::list all_thread_list; all_thread_list all_threads; From 3a4d03560340a955c625ac1c89b8f6b4ba6e86d9 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 29 Mar 2012 14:29:33 -0500 Subject: [PATCH 184/651] Revert "Improve handling of IPv6 Routing Type 0 headers." This reverts commit 256cd592a7d4c0bdbf43c3f2e9c4e1cdb0fe995a. Conflicts: src/IP.cc src/Sessions.cc --- src/IP.cc | 18 -------------- src/IP.h | 21 ----------------- src/Sessions.cc | 16 ------------- src/event.bif | 8 ------- .../Baseline/core.ipv6_ext_headers/output | 2 +- .../btest/Baseline/core.ipv6_rh0/segleft.out | 2 -- .../btest/Baseline/core.ipv6_rh0/segleft0.out | 2 -- ...egleft.trace => ext_hdr_hbh_routing.trace} | Bin .../btest/Traces/ipv6-hbh-rh0-segleft0.trace | Bin 162 -> 0 bytes .../btest/bifs/routing0_data_to_addrs.test | 4 ++-- testing/btest/core/ipv6_ext_headers.test | 2 +- testing/btest/core/ipv6_rh0.test | 22 ------------------ 12 files changed, 4 insertions(+), 93 deletions(-) delete mode 100644 testing/btest/Baseline/core.ipv6_rh0/segleft.out delete mode 100644 testing/btest/Baseline/core.ipv6_rh0/segleft0.out rename testing/btest/Traces/{ipv6-hbh-rh0-segleft.trace => ext_hdr_hbh_routing.trace} (100%) delete mode 100644 testing/btest/Traces/ipv6-hbh-rh0-segleft0.trace delete mode 100644 testing/btest/core/ipv6_rh0.test diff --git a/src/IP.cc b/src/IP.cc index 7f616fbbb0..4148c58a33 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -305,24 +305,6 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) chain.push_back(p); - // RFC 5095 deprecates routing type 0 headers, so raise weirds for that. - if ( current_type == IPPROTO_ROUTING && - ((const struct ip6_rthdr*)hdrs)->ip6r_type == 0 ) - { - IPAddr src(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_src); - - if ( ((const struct ip6_rthdr*)hdrs)->ip6r_segleft > 0 ) - { - const in6_addr* a = (const in6_addr*)(hdrs+len-16); - reporter->Weird(src, *a, "routing0_segleft"); - } - else - { - IPAddr dst(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_dst); - reporter->Weird(src, dst, "routing0_header"); - } - } - hdrs += len; length += len; } while ( current_type != IPPROTO_FRAGMENT && diff --git a/src/IP.h b/src/IP.h index daa508db7f..cb5bcf77c7 100644 --- a/src/IP.h +++ b/src/IP.h @@ -171,20 +171,6 @@ public: { return IsFragment() ? (ntohs(GetFragHdr()->ip6f_offlg) & 0x0001) != 0 : 0; } - /** - * Returns whether the chain contains a routing type 0 extension header - * with nonzero segments left. - */ - bool RH0SegLeft() const - { - for ( size_t i = 0; i < chain.size(); ++i ) - if ( chain[i]->Type() == IPPROTO_ROUTING && - ((const struct ip6_rthdr*)chain[i]->Data())->ip6r_type == 0 && - ((const struct ip6_rthdr*)chain[i]->Data())->ip6r_segleft > 0 ) - return true; - return false; - } - /** * Returns a vector of ip6_ext_hdr RecordVals that includes script-layer * representation of all extension headers in the chain. @@ -357,13 +343,6 @@ public: size_t NumHeaders() const { return ip4 ? 1 : ip6_hdrs->Size(); } - /** - * Returns true if this is an IPv6 header containing a routing type 0 - * extension with nonzero segments left, else returns false. - */ - bool RH0SegLeft() const - { return ip4 ? false : ip6_hdrs->RH0SegLeft(); } - /** * Returns an ip_hdr or ip6_hdr_chain RecordVal. */ diff --git a/src/Sessions.cc b/src/Sessions.cc index 9ab7d1d1fa..84b57bdc62 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -481,22 +481,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } - // Stop analyzing IPv6 packets that use routing type 0 headers with segments - // left since RH0 headers are deprecated by RFC 5095 and we'd have to make - // extra effort to get the destination in the connection/flow endpoint right. - if ( ip_hdr->RH0SegLeft() ) - { - dump_this_packet = 1; - if ( rh0_segleft ) - { - val_list* vl = new val_list(); - vl->append(ip_hdr->BuildPktHdrVal()); - mgr.QueueEvent(rh0_segleft, vl); - } - Remove(f); - return; - } - int proto = ip_hdr->NextProto(); if ( CheckHeaderTrunc(proto, len, caplen, hdr, pkt) ) diff --git a/src/event.bif b/src/event.bif index 20714c0931..113c003e37 100644 --- a/src/event.bif +++ b/src/event.bif @@ -478,14 +478,6 @@ event ipv6_ext_headers%(c: connection, p: pkt_hdr%); ## .. bro:see:: new_packet tcp_packet ipv6_ext_headers event esp_packet%(p: pkt_hdr%); -## Generated for any packets using an IPv6 Routing Type 0 extension header -## with non-zero segments left. -## -## p: Information from the header of the packet that triggered the event. -## -## .. bro:see:: new_packet tcp_packet ipv6_ext_headers -event rh0_segleft%(p: pkt_hdr%); - ## Generated for every packet that has non-empty transport-layer payload. This is a ## very low-level and expensive event that should be avoided when at all possible. ## It's usually infeasible to handle when processing even medium volumes of diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index 58332ca900..a5a0caf7c6 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1 +1 @@ -[ip=, ip6=[class=0, flow=0, len=68, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=], [id=43, hopopts=, dstopts=, routing=[nxt=6, len=4, rtype=0, segleft=0, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=]]], tcp=[sport=30000/tcp, dport=80/tcp, seq=0, ack=0, hl=20, dl=0, flags=2, win=8192], udp=, icmp=] +[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/Baseline/core.ipv6_rh0/segleft.out b/testing/btest/Baseline/core.ipv6_rh0/segleft.out deleted file mode 100644 index 3c722ee3b4..0000000000 --- a/testing/btest/Baseline/core.ipv6_rh0/segleft.out +++ /dev/null @@ -1,2 +0,0 @@ -flow_weird routing0_segleft from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:78:1:32::2 -rh0 w/ segments left from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:4f8:4:7:2e0:81ff:fe52:9a6b diff --git a/testing/btest/Baseline/core.ipv6_rh0/segleft0.out b/testing/btest/Baseline/core.ipv6_rh0/segleft0.out deleted file mode 100644 index ae57c7cc8d..0000000000 --- a/testing/btest/Baseline/core.ipv6_rh0/segleft0.out +++ /dev/null @@ -1,2 +0,0 @@ -flow_weird routing0_header from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:4f8:4:7:2e0:81ff:fe52:9a6b -new_connection: [orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=30000/tcp, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=80/tcp] diff --git a/testing/btest/Traces/ipv6-hbh-rh0-segleft.trace b/testing/btest/Traces/ext_hdr_hbh_routing.trace similarity index 100% rename from testing/btest/Traces/ipv6-hbh-rh0-segleft.trace rename to testing/btest/Traces/ext_hdr_hbh_routing.trace diff --git a/testing/btest/Traces/ipv6-hbh-rh0-segleft0.trace b/testing/btest/Traces/ipv6-hbh-rh0-segleft0.trace deleted file mode 100644 index 35f5b3afe633cc81fc2444b10fa278a29d81783f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 162 zcmca|c+)~A1{MYwaA0F#U<7gw`4#(HHgGdk0ofq@9}FO>+U_QR7%mJB3XCj2fSTEv z9yI>{7xe!>Dt}hCHUlHrXf~(?3XBXDK;w-d<}fg#@tH~u7y_Vj3;|3E4EkLR3;-CU BAFlua diff --git a/testing/btest/bifs/routing0_data_to_addrs.test b/testing/btest/bifs/routing0_data_to_addrs.test index de10dd80e0..4bf15cae87 100644 --- a/testing/btest/bifs/routing0_data_to_addrs.test +++ b/testing/btest/bifs/routing0_data_to_addrs.test @@ -1,7 +1,7 @@ -# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft.trace %INPUT >output +# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output # @TEST-EXEC: btest-diff output -event rh0_segleft(p: pkt_hdr) +event ipv6_ext_headers(c: connection, p: pkt_hdr) { for ( h in p$ip6$exts ) if ( p$ip6$exts[h]$id == IPPROTO_ROUTING ) diff --git a/testing/btest/core/ipv6_ext_headers.test b/testing/btest/core/ipv6_ext_headers.test index 0cf3f2f3fb..170a67bc72 100644 --- a/testing/btest/core/ipv6_ext_headers.test +++ b/testing/btest/core/ipv6_ext_headers.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft0.trace %INPUT >output +# @TEST-EXEC: bro -C -b -r $TRACES/ext_hdr_hbh_routing.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the event is raised correctly for a packet containing diff --git a/testing/btest/core/ipv6_rh0.test b/testing/btest/core/ipv6_rh0.test deleted file mode 100644 index 18c23ed3b7..0000000000 --- a/testing/btest/core/ipv6_rh0.test +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft0.trace %INPUT >segleft0.out -# @TEST-EXEC: btest-diff segleft0.out -# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-rh0-segleft.trace %INPUT >segleft.out -# @TEST-EXEC: btest-diff segleft.out - -# This will be raised only by the packet with RH0 and segments left. -event rh0_segleft(p: pkt_hdr) - { - print fmt("rh0 w/ segments left from %s to %s", p$ip6$src, p$ip6$dst); - } - -# This will be raised only by the packet with RH0 and no segments left. -event new_connection(c: connection) - { - print fmt("new_connection: %s", c$id); - } - -# This will be raised by any packet with RH0 regardless of segments left. -event flow_weird(name: string, src: addr, dst: addr) - { - print fmt("flow_weird %s from %s to %s", name, src, dst); - } From 7d7cadfb562f68f5cd0165818968a11b3d2108c3 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 29 Mar 2012 14:41:44 -0500 Subject: [PATCH 185/651] Revert TCP checksumming to cache common data, like it did before. --- src/TCP_Endpoint.cc | 20 ++++++++++---------- src/TCP_Endpoint.h | 1 + 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/TCP_Endpoint.cc b/src/TCP_Endpoint.cc index d6f5d1bb84..69c08870d9 100644 --- a/src/TCP_Endpoint.cc +++ b/src/TCP_Endpoint.cc @@ -31,6 +31,14 @@ TCP_Endpoint::TCP_Endpoint(TCP_Analyzer* arg_analyzer, int arg_is_orig) tcp_analyzer->Conn()->OrigAddr(); dst_addr = is_orig ? tcp_analyzer->Conn()->OrigAddr() : tcp_analyzer->Conn()->RespAddr(); + + checksum_base = ones_complement_checksum(src_addr, 0); + checksum_base = ones_complement_checksum(dst_addr, checksum_base); + // Note, for IPv6, strictly speaking this field is 32 bits + // rather than 16 bits. But because the upper bits are all zero, + // we get the same checksum either way. The same applies to + // later when we add in the data length in ValidChecksum(). + checksum_base += htons(IPPROTO_TCP); } TCP_Endpoint::~TCP_Endpoint() @@ -100,21 +108,13 @@ void TCP_Endpoint::SizeBufferedData(int& waiting_on_hole, int& waiting_on_ack) int TCP_Endpoint::ValidChecksum(const struct tcphdr* tp, int len) const { - uint32 sum; + uint32 sum = checksum_base; int tcp_len = tp->th_off * 4 + len; if ( len % 2 == 1 ) // Add in pad byte. - sum = htons(((const u_char*) tp)[tcp_len - 1] << 8); - else - sum = 0; + sum += htons(((const u_char*) tp)[tcp_len - 1] << 8); - sum = ones_complement_checksum(src_addr, sum); - sum = ones_complement_checksum(dst_addr, sum); - // Note, for IPv6, strictly speaking the protocol and length fields are - // 32 bits rather than 16 bits. But because the upper bits are all zero, - // we get the same checksum either way. - sum += htons(IPPROTO_TCP); sum += htons((unsigned short) tcp_len); // fill out pseudo header sum = ones_complement_checksum((void*) tp, tcp_len, sum); diff --git a/src/TCP_Endpoint.h b/src/TCP_Endpoint.h index 28a114adf3..52a757b256 100644 --- a/src/TCP_Endpoint.h +++ b/src/TCP_Endpoint.h @@ -127,6 +127,7 @@ public: TCP_Reassembler* contents_processor; TCP_Analyzer* tcp_analyzer; BroFile* contents_file; + uint32 checksum_base; double start_time, last_time; IPAddr src_addr; // the other endpoint From ead30e423d0316e1e2e05ae5334f7771d1582f5c Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 30 Mar 2012 08:40:38 -0700 Subject: [PATCH 186/651] change type of heart_beat_interval to interval (makes much more sese) --- scripts/base/init-bare.bro | 2 +- src/const.bif | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 3a5b2023dd..4637580337 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1489,7 +1489,7 @@ module Threading; export { ## The heart beat interval used by the threading framework. ## Changing this should usually not be neccessary and will break several tests. - const heart_beat_interval = 1.0 &redef; + const heart_beat_interval = 1.0 secs &redef; } module GLOBAL; diff --git a/src/const.bif b/src/const.bif index aadb1e1ab7..f9e5f61644 100644 --- a/src/const.bif +++ b/src/const.bif @@ -12,4 +12,4 @@ const NFS3::return_data: bool; const NFS3::return_data_max: count; const NFS3::return_data_first_only: bool; -const Threading::heart_beat_interval: double; +const Threading::heart_beat_interval: interval; From 355b85fcd7fc638ad54cf8b8d3614fe3c6ecc9e5 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 30 Mar 2012 09:08:08 -0700 Subject: [PATCH 187/651] most of the stuff we should need for benchmarking. next: search memory leaks, after 1.5million simulated inputs we are leaking about 1Gb of ram... --- scripts/base/frameworks/input/__load__.bro | 1 + .../base/frameworks/input/readers/benchmark.bro | 8 ++++++++ src/input.bif | 3 +++ src/input/ReaderBackend.cc | 7 ++++++- src/input/readers/Benchmark.cc | 15 ++++++++++++++- src/input/readers/Benchmark.h | 2 ++ 6 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 scripts/base/frameworks/input/readers/benchmark.bro diff --git a/scripts/base/frameworks/input/__load__.bro b/scripts/base/frameworks/input/__load__.bro index b41fe5e95f..0e7d8ffb73 100644 --- a/scripts/base/frameworks/input/__load__.bro +++ b/scripts/base/frameworks/input/__load__.bro @@ -1,4 +1,5 @@ @load ./main @load ./readers/ascii @load ./readers/raw +@load ./readers/benchmark diff --git a/scripts/base/frameworks/input/readers/benchmark.bro b/scripts/base/frameworks/input/readers/benchmark.bro new file mode 100644 index 0000000000..3293201cea --- /dev/null +++ b/scripts/base/frameworks/input/readers/benchmark.bro @@ -0,0 +1,8 @@ +##! Interface for the ascii input reader. + +module InputBenchmark; + +export { + ## multiplication factor for each second + const factor = 1 &redef; +} diff --git a/src/input.bif b/src/input.bif index 1157b7b62b..e4ecf4d020 100644 --- a/src/input.bif +++ b/src/input.bif @@ -45,3 +45,6 @@ const unset_field: string; module InputRaw; const record_separator: string; + +module InputBenchmark; +const factor: count; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index ce79ecfd39..f0b4f8e7e9 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -58,7 +58,12 @@ public: name(name), num_vals(num_vals), val(val) {} virtual bool Process() { - return input_mgr->SendEvent(name, num_vals, val); + bool success = input_mgr->SendEvent(name, num_vals, val); + + if ( !success ) + reporter->Error("SendEvent for event %s failed", name.c_str()); + + return true; // we do not want to die if sendEvent fails because the event did not return. } private: diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 07ee7eb9bc..de77ba1afa 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -21,6 +21,7 @@ using threading::Field; Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) { + multiplication_factor = int(BifConst::InputBenchmark::factor); } Benchmark::~Benchmark() @@ -198,13 +199,25 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { bool Benchmark::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - + num_lines = num_lines*multiplication_factor; + switch ( mode ) { case MANUAL: // yay, we do nothing :) break; case REREAD: case STREAM: + if ( multiplication_factor != 1 ) { + // we have to document at what time we changed the factor to what value. + Value** v = new Value*[2]; + v[0] = new Value(TYPE_COUNT, true); + v[0]->val.uint_val = num_lines; + v[1] = new Value(TYPE_TIME, true); + v[1]->val.double_val = CurrTime(); + + SendEvent("lines_changed", 2, v); + } + Update(); // call update and not DoUpdate, because update actually checks disabled. break; default: diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index e8de4ac773..e0d3f124af 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -38,6 +38,8 @@ private: int mode; int num_lines; + int multiplication_factor; + string RandomString(const int len); }; From 3405cbdfbd5eb9c00dc4f782e9cc875da3c26d8f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 26 Mar 2012 19:53:01 -0700 Subject: [PATCH 188/651] Introducing - the check if a thread queue might have data. Without locks. Who needs those anyways. --- src/threading/Manager.cc | 6 ++++++ src/threading/MsgThread.h | 6 ++++++ src/threading/Queue.h | 5 +++++ 3 files changed, 17 insertions(+) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index db86caa26f..6a539861be 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -82,6 +82,12 @@ double Manager::NextTimestamp(double* network_time) // If we had something to process last time (or out heartbeat // is due), we want to check for more asap. return timer_mgr->Time(); + + for ( msg_thread_list::iterator i = msg_threads.begin(); i != msg_threads.end(); i++ ) + { + if ( (*i)->MightHaveOut() ) + return timer_mgr->Time(); + } return -1.0; } diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 28c7690dfa..4220230a71 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -261,6 +261,12 @@ private: */ bool HasOut() { return queue_out.Ready(); } + /** + * Returns true if there might be at least one message pending for the main + * thread. + */ + bool MightHaveOut() { return queue_out.MaybeReady(); } + Queue queue_in; Queue queue_out; diff --git a/src/threading/Queue.h b/src/threading/Queue.h index a25f897d23..64d6e7cd93 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -53,6 +53,11 @@ public: */ bool Ready(); + /** + * Returns true if the next Get() operation might succeed. + */ + bool MaybeReady() { return ( ( read_ptr - write_ptr) != 0 ); } + /** * Returns the number of queued items not yet retrieved. */ From 579a10d060241f99275e72505230eb642ee2cc47 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 27 Mar 2012 09:18:01 -0700 Subject: [PATCH 189/651] make benchmark reader more configureable --- .../frameworks/input/readers/benchmark.bro | 8 ++++- src/input.bif | 4 ++- src/input/readers/Benchmark.cc | 30 +++++++++++++++---- src/input/readers/Benchmark.h | 5 +++- 4 files changed, 39 insertions(+), 8 deletions(-) diff --git a/scripts/base/frameworks/input/readers/benchmark.bro b/scripts/base/frameworks/input/readers/benchmark.bro index 3293201cea..c6a6e88fca 100644 --- a/scripts/base/frameworks/input/readers/benchmark.bro +++ b/scripts/base/frameworks/input/readers/benchmark.bro @@ -4,5 +4,11 @@ module InputBenchmark; export { ## multiplication factor for each second - const factor = 1 &redef; + const factor = 1.0 &redef; + + ## spread factor between lines + const spread = 0 &redef; + + ## spreading where usleep = 1000000 / autospread * num_lines + const autospread = 0.0 &redef; } diff --git a/src/input.bif b/src/input.bif index e4ecf4d020..059a7ec8bf 100644 --- a/src/input.bif +++ b/src/input.bif @@ -47,4 +47,6 @@ module InputRaw; const record_separator: string; module InputBenchmark; -const factor: count; +const factor: double; +const spread: count; +const autospread: double; diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index de77ba1afa..d8de8c2538 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -21,7 +21,11 @@ using threading::Field; Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) { - multiplication_factor = int(BifConst::InputBenchmark::factor); + multiplication_factor = double(BifConst::InputBenchmark::factor); + autospread = double(BifConst::InputBenchmark::autospread); + spread = int(BifConst::InputBenchmark::spread); + autospread_time = 0; + } Benchmark::~Benchmark() @@ -40,6 +44,9 @@ bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Fiel num_fields = arg_num_fields; fields = arg_fields; num_lines = atoi(path.c_str()); + + if ( autospread != 0.0 ) + autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); @@ -85,15 +92,21 @@ bool Benchmark::DoUpdate() { if ( mode == STREAM ) { // do not do tracking, spread out elements over the second that we have... Put(field); - usleep(900000/num_lines); } else { SendEntry(field); } + + if ( spread != 0 ) + usleep(spread); + + if ( autospread_time != 0 ) { + usleep( autospread_time ); + } } - //if ( mode != STREAM ) { // well, does not really make sense in the streaming sense - but I like getting the event. + if ( mode != STREAM ) { EndCurrentSend(); - //} + } return true; } @@ -199,7 +212,7 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { bool Benchmark::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - num_lines = num_lines*multiplication_factor; + num_lines = (int) ( (double) num_lines*multiplication_factor); switch ( mode ) { case MANUAL: @@ -217,8 +230,15 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) SendEvent("lines_changed", 2, v); } + + if ( autospread != 0.0 ) { + autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); + // because executing this in every loop is apparently too expensive. + } Update(); // call update and not DoUpdate, because update actually checks disabled. + + SendEvent("HeartbeatDone", 0, 0); break; default: assert(false); diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index e0d3f124af..f0bd0c752d 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -38,7 +38,10 @@ private: int mode; int num_lines; - int multiplication_factor; + double multiplication_factor; + int spread; + double autospread; + int autospread_time; string RandomString(const int len); From ed5374b6d7b4d4601d8e8ee11f547692ebd3fffc Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 28 Mar 2012 09:35:45 -0700 Subject: [PATCH 190/651] and another option for the benchmark reader (constant addition of lines) --- scripts/base/frameworks/input/readers/benchmark.bro | 3 +++ src/input.bif | 1 + src/input/readers/Benchmark.cc | 13 +++++++++++++ src/input/readers/Benchmark.h | 1 + 4 files changed, 18 insertions(+) diff --git a/scripts/base/frameworks/input/readers/benchmark.bro b/scripts/base/frameworks/input/readers/benchmark.bro index c6a6e88fca..0f3553b117 100644 --- a/scripts/base/frameworks/input/readers/benchmark.bro +++ b/scripts/base/frameworks/input/readers/benchmark.bro @@ -11,4 +11,7 @@ export { ## spreading where usleep = 1000000 / autospread * num_lines const autospread = 0.0 &redef; + + ## addition factor for each heartbeat + const addfactor = 0 &redef; } diff --git a/src/input.bif b/src/input.bif index 059a7ec8bf..798759ab66 100644 --- a/src/input.bif +++ b/src/input.bif @@ -50,3 +50,4 @@ module InputBenchmark; const factor: double; const spread: count; const autospread: double; +const addfactor: count; diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index d8de8c2538..a17c8a7ff6 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -24,6 +24,7 @@ Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) multiplication_factor = double(BifConst::InputBenchmark::factor); autospread = double(BifConst::InputBenchmark::autospread); spread = int(BifConst::InputBenchmark::spread); + add = int(BifConst::InputBenchmark::addfactor); autospread_time = 0; } @@ -213,6 +214,7 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); num_lines = (int) ( (double) num_lines*multiplication_factor); + num_lines += add; switch ( mode ) { case MANUAL: @@ -230,6 +232,17 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) SendEvent("lines_changed", 2, v); } + + if ( add != 0 ) { + // we have to document at what time we changed the factor to what value. + Value** v = new Value*[2]; + v[0] = new Value(TYPE_COUNT, true); + v[0]->val.uint_val = num_lines; + v[1] = new Value(TYPE_TIME, true); + v[1]->val.double_val = CurrTime(); + + SendEvent("lines_changed", 2, v); + } if ( autospread != 0.0 ) { autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index f0bd0c752d..182adcd1af 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -42,6 +42,7 @@ private: int spread; double autospread; int autospread_time; + int add; string RandomString(const int len); From 719540414f6bcf80bd1efee9d6dcfce0b2ba7e7e Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 28 Mar 2012 09:41:00 -0700 Subject: [PATCH 191/651] repair general stupidity --- src/input/readers/Benchmark.cc | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index a17c8a7ff6..a914f1a2e8 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -222,7 +222,7 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) break; case REREAD: case STREAM: - if ( multiplication_factor != 1 ) { + if ( multiplication_factor != 1 || add != 0 ) { // we have to document at what time we changed the factor to what value. Value** v = new Value*[2]; v[0] = new Value(TYPE_COUNT, true); @@ -233,17 +233,6 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) SendEvent("lines_changed", 2, v); } - if ( add != 0 ) { - // we have to document at what time we changed the factor to what value. - Value** v = new Value*[2]; - v[0] = new Value(TYPE_COUNT, true); - v[0]->val.uint_val = num_lines; - v[1] = new Value(TYPE_TIME, true); - v[1]->val.double_val = CurrTime(); - - SendEvent("lines_changed", 2, v); - } - if ( autospread != 0.0 ) { autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); // because executing this in every loop is apparently too expensive. From b47620e501944acddde0277fd2e9c7ab4de6cfec Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 30 Mar 2012 09:18:44 -0700 Subject: [PATCH 192/651] add a couple more configuration options --- .../frameworks/input/readers/benchmark.bro | 6 +++++ src/input.bif | 2 ++ src/input/readers/Benchmark.cc | 23 +++++++++++++++---- src/input/readers/Benchmark.h | 3 +++ 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/scripts/base/frameworks/input/readers/benchmark.bro b/scripts/base/frameworks/input/readers/benchmark.bro index 0f3553b117..b5adc70861 100644 --- a/scripts/base/frameworks/input/readers/benchmark.bro +++ b/scripts/base/frameworks/input/readers/benchmark.bro @@ -14,4 +14,10 @@ export { ## addition factor for each heartbeat const addfactor = 0 &redef; + + ## stop spreading at x lines per heartbeat + const stopspreadat = 0 &redef; + + ## 1 -> enable timed spreading + const timedspread = 0 &redef; } diff --git a/src/input.bif b/src/input.bif index 798759ab66..63cbb2796d 100644 --- a/src/input.bif +++ b/src/input.bif @@ -51,3 +51,5 @@ const factor: double; const spread: count; const autospread: double; const addfactor: count; +const stopspreadat: count; +const timedspread: count; diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index a914f1a2e8..391fdd7435 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -26,6 +26,8 @@ Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) spread = int(BifConst::InputBenchmark::spread); add = int(BifConst::InputBenchmark::addfactor); autospread_time = 0; + stopspreadat = int(BifConst::InputBenchmark::stopspreadat); + timedspread = int(BifConst::InputBenchmark::timedspread); } @@ -54,6 +56,7 @@ bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Fiel return false; } + heartbeatstarttime = CurrTime(); DoUpdate(); return true; @@ -97,12 +100,23 @@ bool Benchmark::DoUpdate() { SendEntry(field); } - if ( spread != 0 ) - usleep(spread); + if ( stopspreadat == 0 || num_lines < stopspreadat ) { + if ( spread != 0 ) + usleep(spread); - if ( autospread_time != 0 ) { - usleep( autospread_time ); + if ( autospread_time != 0 ) + usleep( autospread_time ); } + + if ( timedspread == 1 ) { + double diff; + do { + diff = CurrTime() - heartbeatstarttime; + //printf("%d %f\n", i, diff); + } while ( diff < i/(num_lines + (num_lines * 0.15) ) ); + //} while ( diff < 0.8); + } + } if ( mode != STREAM ) { @@ -215,6 +229,7 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) ReaderBackend::DoHeartbeat(network_time, current_time); num_lines = (int) ( (double) num_lines*multiplication_factor); num_lines += add; + heartbeatstarttime = CurrTime(); switch ( mode ) { case MANUAL: diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index 182adcd1af..e5dca66889 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -43,6 +43,9 @@ private: double autospread; int autospread_time; int add; + int stopspreadat; + double heartbeatstarttime; + int timedspread; string RandomString(const int len); From 1170a877693f01d03963227c5d3a2f1aeeec53e1 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 28 Mar 2012 15:37:32 -0700 Subject: [PATCH 193/651] make benchmark reader hartbeat inverval aware fix small memleak on tablereader destruction make timespread better configureable --- .../frameworks/input/readers/benchmark.bro | 2 +- src/input.bif | 2 +- src/input/Manager.cc | 11 ++++++---- src/input/readers/Benchmark.cc | 21 +++++++++++++++---- src/input/readers/Benchmark.h | 2 +- src/threading/Manager.h | 8 ++++++- 6 files changed, 34 insertions(+), 12 deletions(-) diff --git a/scripts/base/frameworks/input/readers/benchmark.bro b/scripts/base/frameworks/input/readers/benchmark.bro index b5adc70861..fe44914271 100644 --- a/scripts/base/frameworks/input/readers/benchmark.bro +++ b/scripts/base/frameworks/input/readers/benchmark.bro @@ -19,5 +19,5 @@ export { const stopspreadat = 0 &redef; ## 1 -> enable timed spreading - const timedspread = 0 &redef; + const timedspread = 0.0 &redef; } diff --git a/src/input.bif b/src/input.bif index 63cbb2796d..0749ac0287 100644 --- a/src/input.bif +++ b/src/input.bif @@ -52,4 +52,4 @@ const spread: count; const autospread: double; const addfactor: count; const stopspreadat: count; -const timedspread: count; +const timedspread: double; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index f8ad493e11..218a9209ee 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -133,11 +133,15 @@ Manager::TableFilter::~TableFilter() { if ( rtype ) // can be 0 for sets Unref(rtype); - if ( currDict != 0 ) + if ( currDict != 0 ) { + currDict->Clear(); delete currDict; + } - if ( lastDict != 0 ) + if ( lastDict != 0 ) { + lastDict->Clear();; delete lastDict; + } } struct ReaderDefinition { @@ -898,6 +902,7 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { } //i->tab->Assign(idxval, valval); + assert(idxval); HashKey* k = filter->tab->ComputeHash(idxval); if ( !k ) { reporter->InternalError("could not hash"); @@ -1067,8 +1072,6 @@ void Manager::Put(ReaderFrontend* reader, Value* *vals) { } int Manager::SendEventFilterEvent(Filter* i, EnumVal* type, const Value* const *vals) { - bool updated = false; - assert(i); assert(i->filter_type == EVENT_FILTER); diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 391fdd7435..118b57f616 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -13,6 +13,8 @@ #include #include +#include "../../threading/Manager.h" + using namespace input::reader; using threading::Value; using threading::Field; @@ -27,7 +29,7 @@ Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) add = int(BifConst::InputBenchmark::addfactor); autospread_time = 0; stopspreadat = int(BifConst::InputBenchmark::stopspreadat); - timedspread = int(BifConst::InputBenchmark::timedspread); + timedspread = double(BifConst::InputBenchmark::timedspread); } @@ -87,7 +89,8 @@ double Benchmark::CurrTime() { // read the entire file and send appropriate thingies back to InputMgr bool Benchmark::DoUpdate() { - for ( int i = 0; i < num_lines; i++ ) { + int linestosend = num_lines * threading::Manager::HEART_BEAT_INTERVAL; + for ( int i = 0; i < linestosend; i++ ) { Value** field = new Value*[num_fields]; for (unsigned int j = 0; j < num_fields; j++ ) { field[j] = EntryToVal(fields[j]->type, fields[j]->subtype); @@ -108,12 +111,13 @@ bool Benchmark::DoUpdate() { usleep( autospread_time ); } - if ( timedspread == 1 ) { + if ( timedspread != 0.0 ) { double diff; do { diff = CurrTime() - heartbeatstarttime; //printf("%d %f\n", i, diff); - } while ( diff < i/(num_lines + (num_lines * 0.15) ) ); + //} while ( diff < i/threading::Manager::HEART_BEAT_INTERVAL*(num_lines + (num_lines * timedspread) ) ); + } while ( diff/threading::Manager::HEART_BEAT_INTERVAL < i/(linestosend + (linestosend * timedspread) ) ); //} while ( diff < 0.8); } @@ -226,6 +230,15 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { bool Benchmark::DoHeartbeat(double network_time, double current_time) { + /* + * This does not work the way I envisioned it, because the queueing is the problem. + printf("%f\n", CurrTime() - current_time); + if ( CurrTime() - current_time > 0.25 ) { + // event has hung for a time. refuse. + SendEvent("EndBenchmark", 0, 0); + return true; + } */ + ReaderBackend::DoHeartbeat(network_time, current_time); num_lines = (int) ( (double) num_lines*multiplication_factor); num_lines += add; diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index e5dca66889..ca248586da 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -45,7 +45,7 @@ private: int add; int stopspreadat; double heartbeatstarttime; - int timedspread; + double timedspread; string RandomString(const int len); diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 7d9ba766d4..d5d78b288a 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -9,6 +9,10 @@ #include "BasicThread.h" #include "MsgThread.h" +namespace input { namespace reader { + class Benchmark; +}} + namespace threading { /** @@ -80,6 +84,7 @@ public: protected: friend class BasicThread; friend class MsgThread; + friend class input::reader::Benchmark; // needs heartbeat /** * Registers a new basic thread with the manager. This is @@ -118,9 +123,10 @@ protected: * Part of the IOSource interface. */ virtual const char* Tag() { return "threading::Manager"; } + + static const int HEART_BEAT_INTERVAL = 10; private: - static const int HEART_BEAT_INTERVAL = 1; typedef std::list all_thread_list; all_thread_list all_threads; From 7a71a74994a1271384e6fdc5b6c54e82e1a6761c Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 28 Mar 2012 16:31:11 -0700 Subject: [PATCH 194/651] fix largest leak in manager. --- src/input/Manager.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 218a9209ee..ed59900608 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1663,10 +1663,11 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { position += CopyValue(data, position, val); } - hash_t key = HashKey::HashBytes(data, length); + HashKey *key = new HashKey(data, length); + delete data; assert(position == length); - return new HashKey(data, length, key, true); + return key; } // convert threading value to Bro value From b7bbda724404273e38865b02b8e6c4dc767cdc51 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 28 Mar 2012 23:18:40 -0700 Subject: [PATCH 195/651] fix a couple more leaks. But - still leaking quite a lot with tables. --- src/input/Manager.cc | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index ed59900608..009fdb0bbb 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -25,9 +25,15 @@ using threading::Field; struct InputHash { hash_t valhash; - HashKey* idxkey; // does not need ref or whatever - if it is present here, it is also still present in the TableVal. + HashKey* idxkey; + ~InputHash(); }; +InputHash::~InputHash() { + if ( idxkey ) + delete idxkey; +} + declare(PDict, InputHash); class Manager::Filter { @@ -821,6 +827,7 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { // ok, exact duplicate filter->lastDict->Remove(idxhash); filter->currDict->Insert(idxhash, h); + delete idxhash; return filter->num_val_fields + filter->num_idx_fields; } else { assert( filter->num_val_fields > 0 ); @@ -855,7 +862,6 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { //Val* predidx = ListValToRecordVal(idxval->AsListVal(), filter->itype, &startpos); predidx = ValueToRecordVal(vals, filter->itype, &startpos); //ValueToRecordVal(vals, filter->itype, &startpos); - Ref(valval); if ( updated ) { ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); @@ -865,7 +871,7 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { bool result; if ( filter->num_val_fields > 0 ) { // we have values - result = CallPred(filter->pred, 3, ev, predidx->Ref(), valval); + result = CallPred(filter->pred, 3, ev, predidx->Ref(), valval->Ref()); } else { // no values result = CallPred(filter->pred, 2, ev, predidx->Ref()); @@ -876,10 +882,12 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... delete(filter->currDict->RemoveEntry(idxhash)); + delete idxhash; return filter->num_val_fields + filter->num_idx_fields; } else { // keep old one filter->currDict->Insert(idxhash, h); + delete idxhash; return filter->num_val_fields + filter->num_idx_fields; } } @@ -916,8 +924,10 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { if ( filter->event && updated ) Ref(oldval); // otherwise it is no longer accessible after the assignment filter->tab->Assign(idxval, k, valval); + Unref(idxval); // asssign does not consume idxval. filter->currDict->Insert(idxhash, ih); + delete idxhash; if ( filter->event ) { EnumVal* ev; @@ -931,12 +941,11 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - Ref(valval); if ( filter->num_val_fields == 0 ) { Ref(filter->description); SendEvent(filter->event, 3, filter->description->Ref(), ev, predidx); } else { - SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval); + SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval->Ref()); } } } From 8e526a7f835f514cf864c6bda172485e35b0ac60 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 29 Mar 2012 01:09:11 -0700 Subject: [PATCH 196/651] fix memory leak for tables... nearly completely. There is still a tiny where I have not yet found where the delete could be missing. For big table imports the memory footprint is significant nevertheless -- with tables of > 200000 entries, memory consumption can apparently reach in excess of 1.5Gb - and on a first glance this seems legitimate. (The reason for this is probably that we use several hash tables to keep the performance impact small). --- src/input/Manager.cc | 45 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 009fdb0bbb..a1a3410f5e 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -34,6 +34,11 @@ InputHash::~InputHash() { delete idxkey; } +static void input_hash_delete_func(void* val) { + InputHash* h = (InputHash*) val; + delete h; +} + declare(PDict, InputHash); class Manager::Filter { @@ -170,6 +175,14 @@ Manager::Manager() { } +Manager::~Manager() { + for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { + delete s->second; + delete s->first; + } + +} + ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) { ReaderDefinition* ir = input_readers; @@ -527,7 +540,9 @@ bool Manager::CreateTableStream(RecordVal* fval) { filter->itype = idx->AsRecordType(); filter->event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; filter->currDict = new PDict(InputHash); + filter->currDict->SetDeleteFunc(input_hash_delete_func); filter->lastDict = new PDict(InputHash); + filter->lastDict->SetDeleteFunc(input_hash_delete_func); filter->want_record = ( want_record->InternalInt() == 1 ); Unref(want_record); // ref'd by lookupwithdefault @@ -820,20 +835,20 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { } } - InputHash *h = filter->lastDict->Lookup(idxhash); + InputHash *h = filter->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before if ( filter->num_val_fields == 0 || h->valhash == valhash ) { - // ok, exact duplicate + // ok, exact duplicate, move entry to new dicrionary and do nothing else. filter->lastDict->Remove(idxhash); filter->currDict->Insert(idxhash, h); delete idxhash; return filter->num_val_fields + filter->num_idx_fields; } else { assert( filter->num_val_fields > 0 ); - // updated + // entry was updated in some way filter->lastDict->Remove(idxhash); - delete(h); + // keep h for predicates updated = true; } @@ -881,8 +896,10 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { Unref(predidx); if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... - delete(filter->currDict->RemoveEntry(idxhash)); + // (but why should it be in there? assert this). + assert ( filter->currDict->RemoveEntry(idxhash) == 0 ); delete idxhash; + delete h; return filter->num_val_fields + filter->num_idx_fields; } else { // keep old one @@ -893,6 +910,12 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { } } + + // now we don't need h anymore - if we are here, the entry is updated and a new h is created. + if ( h ) { + delete h; + h = 0; + } Val* idxval; @@ -1014,6 +1037,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { Unref(predidx); Unref(ev); filter->currDict->Insert(lastDictIdxKey, filter->lastDict->RemoveEntry(lastDictIdxKey)); + delete lastDictIdxKey; continue; } } @@ -1030,8 +1054,9 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { if ( ev ) Unref(ev); - filter->tab->Delete(ih->idxkey); - filter->lastDict->Remove(lastDictIdxKey); // deletex in next line + Unref(filter->tab->Delete(ih->idxkey)); + filter->lastDict->Remove(lastDictIdxKey); // delete in next line + delete lastDictIdxKey; delete(ih); } @@ -1040,6 +1065,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { filter->lastDict = filter->currDict; filter->currDict = new PDict(InputHash); + filter->currDict->SetDeleteFunc(input_hash_delete_func); #ifdef DEBUG DBG_LOG(DBG_INPUT, "EndCurrentSend complete for stream %s, queueing update_finished event", @@ -1284,9 +1310,12 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { // only if filter = true -> no filtering if ( filterresult ) { - success = ( filter->tab->Delete(idxval) != 0 ); + Val* retptr = filter->tab->Delete(idxval); + success = ( retptr != 0 ); if ( !success ) { reporter->Error("Internal error while deleting values from input table"); + } else { + Unref(retptr); } } } else if ( i->filter_type == EVENT_FILTER ) { From 6e7faafdb7bbf46306a186a033a85e3b34dd364d Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 30 Mar 2012 12:40:31 -0500 Subject: [PATCH 197/651] Fix compile errors due to now-explicit IPAddr ctors and global IPFamily enum. --- src/IP.cc | 6 +++--- src/bro.bif | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/IP.cc b/src/IP.cc index 7f616fbbb0..bb60d17f15 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -74,8 +74,8 @@ RecordVal* IPv6_Hdr::BuildRecordVal(VectorVal* chain) const rv->Assign(2, new Val(ntohs(ip6->ip6_plen), TYPE_COUNT)); rv->Assign(3, new Val(ip6->ip6_nxt, TYPE_COUNT)); rv->Assign(4, new Val(ip6->ip6_hlim, TYPE_COUNT)); - rv->Assign(5, new AddrVal(ip6->ip6_src)); - rv->Assign(6, new AddrVal(ip6->ip6_dst)); + rv->Assign(5, new AddrVal(IPAddr(ip6->ip6_src))); + rv->Assign(6, new AddrVal(IPAddr(ip6->ip6_dst))); if ( ! chain ) chain = new VectorVal(new VectorType( hdrType(ip6_ext_hdr_type, "ip6_ext_hdr")->Ref())); @@ -314,7 +314,7 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) if ( ((const struct ip6_rthdr*)hdrs)->ip6r_segleft > 0 ) { const in6_addr* a = (const in6_addr*)(hdrs+len-16); - reporter->Weird(src, *a, "routing0_segleft"); + reporter->Weird(src, IPAddr(*a), "routing0_segleft"); } else { diff --git a/src/bro.bif b/src/bro.bif index 6766a89142..fa6766a7bf 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -2070,7 +2070,7 @@ function routing0_data_to_addrs%(s: string%): addr_vec while ( len > 0 ) { - IPAddr a(IPAddr::IPv6, (const uint32*) bytes, IPAddr::Network); + IPAddr a(IPv6, (const uint32*) bytes, IPAddr::Network); rval->Assign(rval->Size(), new AddrVal(a), 0); bytes += 16; len -= 16; From 384fc730d472a69d921b18fa57a6db559a8faedd Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 1 Apr 2012 17:13:51 -0700 Subject: [PATCH 198/651] fix heart_beat_interval -- initialization in constructor does not work anymore (probably due to change in init ordering?) --- src/threading/Manager.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index ec7ab34d14..6c14fd65ca 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -60,6 +60,12 @@ void Manager::KillThreads() void Manager::AddThread(BasicThread* thread) { + if ( heart_beat_interval == 0 ) { + // sometimes initialization does not seem to work from constructor + heart_beat_interval = double(BifConst::Threading::heart_beat_interval); + DBG_LOG(DBG_THREADING, "Heart beat interval set to %f", heart_beat_interval); + } + DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name().c_str()); all_threads.push_back(thread); idle = false; From 25affe2c826c8ba93069685d6b64f68612c971e3 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 3 Apr 2012 00:52:41 +0200 Subject: [PATCH 199/651] fix missing get call for heart beat in benchmark reader. --- src/input/readers/Benchmark.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index f0cebd2dc1..9bba7d7831 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -30,6 +30,7 @@ Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) autospread_time = 0; stopspreadat = int(BifConst::InputBenchmark::stopspreadat); timedspread = double(BifConst::InputBenchmark::timedspread); + heart_beat_interval = double(BifConst::Threading::heart_beat_interval); } From 99e3c584942724946f6c54eb80213f4b84d88559 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 3 Apr 2012 22:12:44 -0700 Subject: [PATCH 200/651] Fixing threads' DoFinish() method. It wasn't called reliably. Now, it's always called before the thread is destroyed (assuming processing has went normally so far). --- src/threading/MsgThread.cc | 47 ++++++++++++++++++++++++++++++++------ src/threading/MsgThread.h | 10 ++++++++ 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index ddcd3df1dd..0b91f8790a 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -10,13 +10,21 @@ namespace threading { ////// Messages. -// Signals child thread to terminate. This is actually a no-op; its only -// purpose is unblock the current read operation so that the child's Run() -// methods can check the termination status. -class TerminateMessage : public InputMessage +// Signals child thread to shutdown operation. +class FinishMessage : public InputMessage { public: - TerminateMessage(MsgThread* thread) : InputMessage("Terminate", thread) { } + FinishMessage(MsgThread* thread) : InputMessage("Finish", thread) { } + + virtual bool Process() { return Object()->DoFinish(); } +}; + +// A dummy message that's only purpose is unblock the current read operation +// so that the child's Run() methods can check the termination status. +class UnblockMessage : public InputMessage +{ +public: + UnblockMessage(MsgThread* thread) : InputMessage("Unblock", thread) { } virtual bool Process() { return true; } }; @@ -130,13 +138,30 @@ bool ReporterMessage::Process() MsgThread::MsgThread() : BasicThread() { cnt_sent_in = cnt_sent_out = 0; + finished = false; thread_mgr->AddMsgThread(this); } void MsgThread::OnStop() { - // This is to unblock the current queue read operation. - SendIn(new TerminateMessage(this), true); + // Signal thread to terminate and wait until it has acknowledged. + SendIn(new FinishMessage(this), true); + + int cnt = 0; + while ( ! finished ) + { + if ( ++cnt > 1000 ) // Insurance against broken threads ... + { + abort(); + reporter->Warning("thread %s didn't finish in time", Name().c_str()); + break; + } + + usleep(1000); + } + + // One more message to make sure the current queue read operation unblocks. + SendIn(new UnblockMessage(this), true); } void MsgThread::Heartbeat() @@ -157,6 +182,14 @@ bool MsgThread::DoHeartbeat(double network_time, double current_time) return true; } +bool MsgThread::DoFinish() + { + // This is thread-safe "enough", we're the only one ever writing + // there. + finished = true; + return true; + } + void MsgThread::Info(const char* msg) { SendOut(new ReporterMessage(ReporterMessage::INFO, this, msg)); diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 5ac1c0f780..16e6a92772 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -171,6 +171,8 @@ public: protected: friend class Manager; friend class HeartbeatMessage; + friend class FinishMessage; + friend class FinishedMessage; /** * Pops a message sent by the child from the child-to-main queue. @@ -215,6 +217,12 @@ protected: */ virtual bool DoHeartbeat(double network_time, double current_time); + /** Triggered for execution in the child thread just before shutting threads down. + * The child thread shoudl finish its operations and then *must* + * call this class' implementation. + */ + virtual bool DoFinish(); + private: /** * Pops a message sent by the main thread from the main-to-chold @@ -270,6 +278,8 @@ private: uint64_t cnt_sent_in; // Counts message sent to child. uint64_t cnt_sent_out; // Counts message sent by child. + + bool finished; // Set to true by Finished message. }; /** From 952b6b293a6068ea9892efd61890047206bd60ae Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 3 Apr 2012 22:14:56 -0700 Subject: [PATCH 201/651] Merging in DataSeries support from topic/gilbert/logging. I copied the code over manually, no merging, because (1) it needed to be adapted to the new threading API, and (2) there's more stuff in the branch that I haven't ported yet. The DS output generally seems to work, but it has seen no further testing yet. Not unit tests yet either. --- CMakeLists.txt | 16 + cmake | 2 +- config.h.in | 3 + configure | 9 + scripts/base/frameworks/logging/__load__.bro | 1 + .../frameworks/logging/writers/dataseries.bro | 62 +++ src/CMakeLists.txt | 1 + src/logging.bif | 8 + src/logging/Manager.cc | 58 ++- src/logging/Manager.h | 8 +- src/logging/WriterBackend.cc | 11 - src/logging/WriterBackend.h | 33 +- src/logging/WriterFrontend.cc | 7 +- src/logging/writers/Ascii.cc | 5 +- src/logging/writers/DataSeries.cc | 476 ++++++++++++++++++ src/logging/writers/DataSeries.h | 69 +++ src/main.cc | 21 + src/types.bif | 1 + 18 files changed, 726 insertions(+), 65 deletions(-) create mode 100644 scripts/base/frameworks/logging/writers/dataseries.bro create mode 100644 src/logging/writers/DataSeries.cc create mode 100644 src/logging/writers/DataSeries.h diff --git a/CMakeLists.txt b/CMakeLists.txt index febc2d6ec1..04b28d2c32 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -107,6 +107,21 @@ if (GOOGLEPERFTOOLS_FOUND) endif () endif () +set(USE_DATASERIES false) +find_package(Lintel) +find_package(DataSeries) +find_package(LibXML2) + +if (LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) + set(USE_DATASERIES true) + include_directories(BEFORE ${Lintel_INCLUDE_DIR}) + include_directories(BEFORE ${DataSeries_INCLUDE_DIR}) + include_directories(BEFORE ${LibXML2_INCLUDE_DIR}) + list(APPEND OPTLIBS ${Lintel_LIBRARIES}) + list(APPEND OPTLIBS ${DataSeries_LIBRARIES}) + list(APPEND OPTLIBS ${LibXML2_LIBRARIES}) +endif() + set(brodeps ${BinPAC_LIBRARY} ${PCAP_LIBRARY} @@ -193,6 +208,7 @@ message( "\nGeoIP: ${USE_GEOIP}" "\nGoogle perftools: ${USE_PERFTOOLS}" "\n debugging: ${USE_PERFTOOLS_DEBUG}" + "\nDataSeries: ${USE_DATASERIES}" "\n" "\n================================================================\n" ) diff --git a/cmake b/cmake index 550ab2c8d9..60b2873937 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 550ab2c8d95b1d3e18e40a903152650e6c7a3c45 +Subproject commit 60b28739379da75f26c5c2a312b7886f5209a1cc diff --git a/config.h.in b/config.h.in index e744cb7dbd..558337d1bc 100644 --- a/config.h.in +++ b/config.h.in @@ -111,6 +111,9 @@ /* Use Google's perftools */ #cmakedefine USE_PERFTOOLS +/* Use the DataSeries writer. */ +#cmakedefine USE_DATASERIES + /* Version number of package */ #define VERSION "@VERSION@" diff --git a/configure b/configure index 05aa12815b..fe7db3b06d 100755 --- a/configure +++ b/configure @@ -54,6 +54,8 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --with-ruby-lib=PATH path to ruby library --with-ruby-inc=PATH path to ruby headers --with-swig=PATH path to SWIG executable + --with-dataseries=PATH path to DataSeries and Lintel libraries + --with-xml2=PATH path to libxml2 installation (for DataSeries) Packaging Options (for developers): --binary-package toggle special logic for binary packaging @@ -203,6 +205,13 @@ while [ $# -ne 0 ]; do --with-swig=*) append_cache_entry SWIG_EXECUTABLE PATH $optarg ;; + --with-dataseries=*) + append_cache_entry DataSeries_ROOT_DIR PATH $optarg + append_cache_entry Lintel_ROOT_DIR PATH $optarg + ;; + --with-xml2=*) + append_cache_entry LibXML2_ROOT_DIR PATH $optarg + ;; --binary-package) append_cache_entry BINARY_PACKAGING_MODE BOOL true ;; diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.bro index 42b2d7c564..17e03e2ef7 100644 --- a/scripts/base/frameworks/logging/__load__.bro +++ b/scripts/base/frameworks/logging/__load__.bro @@ -1,3 +1,4 @@ @load ./main @load ./postprocessors @load ./writers/ascii +@load ./writers/dataseries diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro new file mode 100644 index 0000000000..c8ba922d2a --- /dev/null +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -0,0 +1,62 @@ +##! Interface for the dataseries log writer. + +module LogDataSeries; + +export { + ## Compression to use with the DS output file. Options are: + ## + ## 'none' -- No compression. + ## 'lzf' -- LZF compression. Very quick, but leads to larger output files. + ## 'lzo' -- LZO compression. Very fast decompression times. + ## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output. + ## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output. + const ds_compression = "lzf" &redef; + + ## The extent buffer size. + ## Larger values here lead to better compression and more efficient writes, but + ## also increases the lag between the time events are received and the time they + ## are actually written to disk. + const ds_extent_size = 65536 &redef; + + ## Should we dump the XML schema we use for this ds file to disk? + ## If yes, the XML schema shares the name of the logfile, but has + ## an XML ending. + const ds_dump_schema = T &redef; + + ## How many threads should DataSeries spawn to perform compression? + ## Note that this dictates the number of threads per log stream. If + ## you're using a lot of streams, you may want to keep this number + ## relatively small. + ## + ## Default value is 1, which will spawn one thread / core / stream. + ## + ## MAX is 128, MIN is 1. + const ds_num_threads = 1 &redef; + + ## Should time be stored as an integer or a double? + ## Storing time as a double leads to possible precision issues and + ## could (significantly) increase the size of the resulting DS log. + ## That said, timestamps stored in double form are more consistent + ## with the rest of Bro and are more easily readable / understandable + ## when working with the raw DataSeries format. + ## + ## Double timestamps are used by default. + const ds_use_integer = F &redef; +} + +# Default function to postprocess a rotated DataSeries log file. It moves the +# rotated file to a new name that includes a timestamp with the opening time, and +# then runs the writer's default postprocessor command on it. +function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool + { + # Move file to name including both opening and closing time. + local dst = fmt("%s.%s.ds", info$path, + strftime(Log::default_rotation_date_format, info$open)); + + system(fmt("/bin/mv %s %s", info$fname, dst)); + + # Run default postprocessor. + return Log::run_rotation_postprocessor_cmd(info, dst); + } + +redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ce1b25dd42..ad40fc377c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -419,6 +419,7 @@ set(bro_SRCS logging/WriterBackend.cc logging/WriterFrontend.cc logging/writers/Ascii.cc + logging/writers/DataSeries.cc logging/writers/None.cc ${dns_SRCS} diff --git a/src/logging.bif b/src/logging.bif index c8960b4e38..6e66de8772 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -72,3 +72,11 @@ const set_separator: string; const empty_field: string; const unset_field: string; +# Options for the DataSeries writer. + +module LogDataSeries; + +const ds_compression: string; +const ds_extent_size: count; +const ds_dump_schema: bool; +const ds_num_threads: count; diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 74220ecde4..04b4ef4b86 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -16,9 +16,11 @@ #include "writers/Ascii.h" #include "writers/None.h" +#ifdef USE_DATASERIES +#include "writers/DataSeries.h" +#endif + using namespace logging; -using threading::Value; -using threading::Field; // Structure describing a log writer type. struct WriterDefinition { @@ -32,6 +34,9 @@ struct WriterDefinition { WriterDefinition log_writers[] = { { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, +#ifdef USE_DATASERIES + { BifEnum::Log::WRITER_DATASERIES, "DataSeries", 0, writer::DataSeries::Instantiate }, +#endif // End marker, don't touch. { BifEnum::Log::WRITER_DEFAULT, "None", 0, (WriterBackend* (*)(WriterFrontend* frontend))0 } @@ -51,7 +56,7 @@ struct Manager::Filter { Func* postprocessor; int num_fields; - Field** fields; + threading::Field** fields; // Vector indexed by field number. Each element is a list of record // indices defining a path leading to the value across potential @@ -127,6 +132,17 @@ Manager::~Manager() delete *s; } +list Manager::SupportedFormats() + { + list formats; + + for ( WriterDefinition* ld = log_writers; ld->type != BifEnum::Log::WRITER_DEFAULT; ++ld ) + formats.push_back(ld->name); + + return formats; + } + + WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) { WriterDefinition* ld = log_writers; @@ -135,7 +151,7 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) { if ( ld->type == BifEnum::Log::WRITER_DEFAULT ) { - reporter->Error("unknow writer when creating writer"); + reporter->Error("unknown writer type requested"); return 0; } @@ -159,10 +175,8 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) // function. ld->factory = 0; - DBG_LOG(DBG_LOGGING, "failed to init writer class %s", - ld->name); - - return false; + reporter->Error("initialization of writer %s failed", ld->name); + return 0; } } @@ -449,7 +463,7 @@ bool Manager::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, filter->indices.push_back(new_indices); - filter->fields = (Field**) + filter->fields = (threading::Field**) realloc(filter->fields, sizeof(Field) * ++filter->num_fields); @@ -459,7 +473,7 @@ bool Manager::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, return false; } - Field* field = new Field(); + threading::Field* field = new threading::Field(); field->name = new_path; field->type = t->Tag(); if ( field->type == TYPE_TABLE ) @@ -572,7 +586,7 @@ bool Manager::AddFilter(EnumVal* id, RecordVal* fval) for ( int i = 0; i < filter->num_fields; i++ ) { - Field* field = filter->fields[i]; + threading::Field* field = filter->fields[i]; DBG_LOG(DBG_LOGGING, " field %10s: %s", field->name.c_str(), type_name(field->type)); } @@ -744,10 +758,10 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) // Copy the fields for WriterFrontend::Init() as it // will take ownership. - Field** arg_fields = new Field*[filter->num_fields]; + threading::Field** arg_fields = new threading::Field*[filter->num_fields]; for ( int j = 0; j < filter->num_fields; ++j ) - arg_fields[j] = new Field(*filter->fields[j]); + arg_fields[j] = new threading::Field(*filter->fields[j]); writer = CreateWriter(stream->id, filter->writer, path, filter->num_fields, @@ -898,10 +912,10 @@ threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) return lval; } -Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, +threading::Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, RecordVal* columns) { - Value** vals = new Value*[filter->num_fields]; + threading::Value** vals = new threading::Value*[filter->num_fields]; for ( int i = 0; i < filter->num_fields; ++i ) { @@ -920,7 +934,7 @@ Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, if ( ! val ) { // Value, or any of its parents, is not set. - vals[i] = new Value(filter->fields[i]->type, false); + vals[i] = new threading::Value(filter->fields[i]->type, false); break; } } @@ -933,7 +947,7 @@ Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, } WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, const Field* const* fields, bool local, bool remote) + int num_fields, const threading::Field* const* fields, bool local, bool remote) { Stream* stream = FindStream(id); @@ -997,7 +1011,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, return writer_obj; } -void Manager::DeleteVals(int num_fields, Value** vals) +void Manager::DeleteVals(int num_fields, threading::Value** vals) { // Note this code is duplicated in WriterBackend::DeleteVals(). for ( int i = 0; i < num_fields; i++ ) @@ -1007,7 +1021,7 @@ void Manager::DeleteVals(int num_fields, Value** vals) } bool Manager::Write(EnumVal* id, EnumVal* writer, string path, int num_fields, - Value** vals) + threading::Value** vals) { Stream* stream = FindStream(id); @@ -1116,8 +1130,10 @@ void Manager::Terminate() { for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) { - if ( *s ) - Flush((*s)->id); + if ( ! *s ) + continue; + + Flush((*s)->id); } } diff --git a/src/logging/Manager.h b/src/logging/Manager.h index bf097c5e1a..5af3e55b4a 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -15,7 +15,6 @@ class RotationTimer; namespace logging { - class WriterBackend; class WriterFrontend; class RotationFinishedMessage; @@ -56,7 +55,7 @@ public: * logging.bif, which just forwards here. */ bool EnableStream(EnumVal* id); - + /** * Disables a log stream. * @@ -145,6 +144,11 @@ public: */ void Terminate(); + /** + * Returns a list of supported output formats. + */ + static list SupportedFormats(); + protected: friend class WriterFrontend; friend class RotationFinishedMessage; diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 7c71c09604..28b623988c 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -222,17 +222,6 @@ bool WriterBackend::Flush() return true; } -bool WriterBackend::Finish() - { - if ( ! DoFlush() ) - { - DisableFrontend(); - return false; - } - - return true; - } - bool WriterBackend::DoHeartbeat(double network_time, double current_time) { MsgThread::DoHeartbeat(network_time, current_time); diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index efb3b5d95e..cf58430e9a 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -101,15 +101,6 @@ public: */ bool Rotate(string rotated_path, double open, double close, bool terminating); - /** - * Finishes writing to this logger in a regularl fashion. Must not be - * called if an error has been indicated earlier. After calling this, - * no further writing must be performed. - * - * @return False if an error occured. - */ - bool Finish(); - /** * Disables the frontend that has instantiated this backend. Once * disabled,the frontend will not send any further message over. @@ -175,6 +166,8 @@ public: string Render(const threading::Value::subnet_t& subnet) const; protected: + friend class FinishMessage; + /** * Writer-specific intialization method. * @@ -272,26 +265,18 @@ protected: bool terminating) = 0; /** - * Writer-specific method implementing log output finalization at - * termination. Not called when any of the other methods has - * previously signaled an error, i.e., executing this method signals - * a regular shutdown of the writer. - * - * A writer implementation must override this method but it can just - * ignore calls if flushing doesn't align with its semantics. - * - * If the method returns false, it will be assumed that a fatal error - * has occured that prevents the writer from further operation; it - * will then be disabled and eventually deleted. When returning - * false, an implementation should also call Error() to indicate what - * happened. + * Writer-specific method called just before the threading system is + * going to shutdown. + * + * This method can be overridden but one must call + * WriterBackend::DoFinish(). */ - virtual bool DoFinish() = 0; + virtual bool DoFinish() { return MsgThread::DoFinish(); } /** * Triggered by regular heartbeat messages from the main thread. * - * This method can be overridden but once must call + * This method can be overridden but one must call * WriterBackend::DoHeartbeat(). */ virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 26e8eaf22e..c6a90c1fa5 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -90,7 +90,7 @@ public: FinishMessage(WriterBackend* backend) : threading::InputMessage("Finish", backend) {} - virtual bool Process() { return Object()->Finish(); } + virtual bool Process() { return Object()->DoFinish(); } }; } @@ -117,8 +117,9 @@ WriterFrontend::WriterFrontend(EnumVal* arg_stream, EnumVal* arg_writer, bool ar if ( local ) { backend = log_mgr->CreateBackend(this, writer->AsEnum()); - assert(backend); - backend->Start(); + + if ( backend ) + backend->Start(); } else diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 0759e60a82..2f25ac418f 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -69,8 +69,7 @@ bool Ascii::WriteHeaderField(const string& key, const string& val) return (fwrite(str.c_str(), str.length(), 1, file) == 1); } -bool Ascii::DoInit(string path, int num_fields, - const Field* const * fields) +bool Ascii::DoInit(string path, int num_fields, const Field* const * fields) { if ( output_to_stdout ) path = "/dev/stdout"; @@ -146,7 +145,7 @@ bool Ascii::DoFlush() bool Ascii::DoFinish() { - return true; + return WriterBackend::DoFinish(); } bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc new file mode 100644 index 0000000000..27c4cd6009 --- /dev/null +++ b/src/logging/writers/DataSeries.cc @@ -0,0 +1,476 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include +#include + +#include + +#include "NetVar.h" +#include "threading/SerialTypes.h" + +#include "DataSeries.h" + +using namespace logging; +using namespace writer; + +// NOTE: Naming conventions are a little bit scattershot at the moment. +// Within the scope of this file, a function name prefixed by '_' denotes a +// static function. + +// ************************ LOCAL PROTOTYPES ********************************* + +struct SchemaValue; + +/** + * Turns a log value into a std::string. Uses an ostringstream to do the + * heavy lifting, but still need to switch on the type to know which value + * in the union to give to the string string for processing. + * + * @param val The value we wish to convert to a string + * @return the string value of val + */ +static std::string _LogValueToString(threading::Value* val); + +/** + * Takes a field type and converts it to a relevant DataSeries type. + * + * @param field We extract the type from this and convert it into a relevant DS type. + * @return String representation of type that DataSeries can understand. + */ +static string _GetDSFieldType(const threading::Field* field); + +/** + * Takes a field type and converts it to a readable string. + * + * @param field We extract the type from this and convert it into a readable string. + * @return String representation of the field's type + */ +static string _GetBroTypeString(const threading::Field *field); + +/** + * Takes a list of types, a list of names, and a title, and uses it to construct a valid DataSeries XML schema + * thing, which is then returned as a std::string + * + * @param opts std::vector of strings containing a list of options to be appended to each field (e.g. "pack_relative=yes") + * @param sTitle Name of this schema. Ideally, these schemas would be aggregated and re-used. + */ +static string _BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle); + +/** + * Are there any options we should put into the XML schema? + * + * @param field We extract the type from this and return any options that make sense for that type. + * @return Options that can be added directly to the XML (e.g. "pack_relative=\"yes\"") + */ +static std::string _GetDSOptionsForType(const threading::Field *field); + +/** + * Internal helper structure; populate a vector of these which is passed to the XML generator for its use. + */ +struct SchemaValue +{ + string ds_type; + string bro_type; + string field_name; + string field_options; + + SchemaValue(const threading::Field *field) + { + ds_type = _GetDSFieldType(field); + field_name = string(field->name); + field_options = _GetDSOptionsForType(field); + bro_type = _GetBroTypeString(field); + } +}; + +// ************************ LOCAL IMPL ********************************* + +std::string DataSeries::LogValueToString(threading::Value *val) +{ + const int strsz = 1024; + char strbuf[strsz]; + + // In some cases, no value is attached. If this is the case, return an empty string. + if(!val->present) + return ""; + + std::ostringstream ostr; + switch(val->type) + { + case TYPE_BOOL: + return (val->val.int_val ? "true" : "false"); + + case TYPE_INT: + ostr << val->val.int_val; + return ostr.str(); + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + ostr << val->val.uint_val; + return ostr.str(); + + case TYPE_SUBNET: + ostr << Render(val->val.subnet_val); + return ostr.str(); + + case TYPE_ADDR: + ostr << Render(val->val.addr_val); + return ostr.str(); + + // Note: These two cases are relatively special. We need to convert these values into their integer equivalents + // to maximize precision. At the moment, there won't be a noticeable effect (Bro uses the double format everywhere + // internally, so we've already lost the precision we'd gain here), but timestamps may eventually switch to this + // representation within Bro. + // + // in the near-term, this *should* lead to better pack_relative (and thus smaller output files). + case TYPE_TIME: + case TYPE_INTERVAL: + ostr << (unsigned long)(DataSeries::TIME_SCALE * val->val.double_val); + return ostr.str(); + + case TYPE_DOUBLE: + ostr << val->val.double_val; + return ostr.str(); + + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + { + int size = val->val.string_val->size(); + string tmpString = ""; + if(size) + tmpString = string(val->val.string_val->data(), val->val.string_val->size()); + else + tmpString = string(""); + return tmpString; + } + case TYPE_TABLE: + { + if ( ! val->val.set_val.size ) + { + return ""; + } + + string tmpString = ""; + for ( int j = 0; j < val->val.set_val.size; j++ ) + { + if ( j > 0 ) + tmpString += ":"; //TODO: Specify set separator char in configuration. + + tmpString += LogValueToString(val->val.set_val.vals[j]); + } + return tmpString; + } + case TYPE_VECTOR: + { + if ( ! val->val.vector_val.size ) + { + return ""; + } + + string tmpString = ""; + for ( int j = 0; j < val->val.vector_val.size; j++ ) + { + if ( j > 0 ) + tmpString += ":"; //TODO: Specify set separator char in configuration. + + tmpString += LogValueToString(val->val.vector_val.vals[j]); + } + + return tmpString; + } + default: + return "???"; + } +} + +static string _GetDSFieldType(const threading::Field *field) +{ + switch(field->type) + { + case TYPE_BOOL: + return "bool"; + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + case TYPE_INT: + case TYPE_TIME: + case TYPE_INTERVAL: + return "int64"; + + case TYPE_DOUBLE: + return "double"; + + case TYPE_SUBNET: + case TYPE_ADDR: + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_TABLE: + case TYPE_VECTOR: + default: + return "variable32"; + + } +} + +static string _GetBroTypeString(const threading::Field *field) +{ + switch(field->type) + { + case TYPE_BOOL: + return "bool"; + case TYPE_COUNT: + return "count"; + case TYPE_COUNTER: + return "counter"; + case TYPE_PORT: + return "port"; + case TYPE_INT: + return "int"; + case TYPE_TIME: + return "time"; + case TYPE_INTERVAL: + return "interval"; + case TYPE_DOUBLE: + return "double"; + case TYPE_SUBNET: + return "subnet"; + case TYPE_ADDR: + return "addr"; + case TYPE_ENUM: + return "enum"; + case TYPE_STRING: + return "string"; + case TYPE_FILE: + return "file"; + case TYPE_TABLE: + return "table"; + case TYPE_VECTOR: + return "vector"; + default: + return "???"; + } +} + +static string _BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle) +{ + if("" == sTitle) + { + sTitle = "GenericBroStream"; + } + string xmlschema; + xmlschema = "\n"; + for(size_t i = 0; i < vals.size(); ++i) + { + xmlschema += "\t\n"; + } + xmlschema += "\n"; + for(size_t i = 0; i < vals.size(); ++i) + { + xmlschema += "\n"; + } + return xmlschema; +} + +static std::string _GetDSOptionsForType(const threading::Field *field) +{ + switch(field->type) + { + case TYPE_TIME: + case TYPE_INTERVAL: + return "pack_relative=\"" + std::string(field->name) + "\""; + case TYPE_SUBNET: + case TYPE_ADDR: + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_TABLE: + case TYPE_VECTOR: + return "pack_unique=\"yes\""; + default: + return ""; + } +} + +// ************************ CLASS IMPL ********************************* + +DataSeries::DataSeries(WriterFrontend* frontend) : WriterBackend(frontend) +{ + ds_compression = string((const char *)BifConst::LogDataSeries::ds_compression->Bytes(), BifConst::LogDataSeries::ds_compression->Len()); + ds_dump_schema = BifConst::LogDataSeries::ds_dump_schema; + ds_extent_size = BifConst::LogDataSeries::ds_extent_size; + ds_num_threads = BifConst::LogDataSeries::ds_num_threads; +} + +DataSeries::~DataSeries() +{ +} + +bool DataSeries::DoInit(string path, int num_fields, const threading::Field* const * fields) + { + // We first construct an XML schema thing (and, if ds_dump_schema is + // set, dump it to path + ".ds.xml"). Assuming that goes well, we + // use that schema to build our output logfile and prepare it to be + // written to. + + // Note: compressor count must be set *BEFORE* DataSeriesSink is instantiated. + if(ds_num_threads < THREAD_MIN && ds_num_threads != 0) + { + fprintf(stderr, "%d is too few threads! Using %d instead\n", (int)ds_num_threads, (int)THREAD_MIN); + ds_num_threads = THREAD_MIN; + } + if(ds_num_threads > THREAD_MAX) + { + fprintf(stderr, "%d is too many threads! Dropping back to %d\n", (int)ds_num_threads, (int)THREAD_MAX); + ds_num_threads = THREAD_MAX; + } + + if(ds_num_threads > 0) + { + DataSeriesSink::setCompressorCount(ds_num_threads); + } + vector schema_list; + for ( int i = 0; i < num_fields; i++ ) + { + const threading::Field* field = fields[i]; + SchemaValue val(field); + schema_list.push_back(val); + } + string schema = _BuildDSSchemaFromFieldTypes(schema_list, path); + if(ds_dump_schema) + { + FILE * pFile; + pFile = fopen ( string(path + ".ds.xml").c_str() , "wb" ); + if(NULL == pFile) + { + perror("Could not dump schema"); + } + fwrite (schema.c_str(), 1 , schema.length() , pFile ); + fclose (pFile); + } + + int compress_type = Extent::compress_all; + + if(ds_compression == "lzf") + { + compress_type = Extent::compress_lzf; + } + else if(ds_compression == "lzo") + { + compress_type = Extent::compress_lzo; + } + else if(ds_compression == "gz") + { + compress_type = Extent::compress_gz; + } + else if(ds_compression == "bz2") + { + compress_type = Extent::compress_bz2; + } + else if(ds_compression == "none") + { + compress_type = Extent::compress_none; + } + else if(ds_compression == "any") + { + compress_type = Extent::compress_all; + } + else + { + fprintf(stderr, "%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'\n", ds_compression.c_str()); + fprintf(stderr, "Defaulting to 'any'\n"); + } + + log_type = const_cast(log_types.registerType(schema)); + + log_series.setType(*log_type); + log_file = new DataSeriesSink(path + ".ds", compress_type); + log_file->writeExtentLibrary(log_types); + + for(size_t i = 0; i < schema_list.size(); ++i) + extents.insert(std::make_pair(schema_list[i].field_name, GeneralField::create(log_series, schema_list[i].field_name))); + + if(ds_extent_size < ROW_MIN) + { + fprintf(stderr, "%d is not a valid value for 'rows'. Using min of %d instead.\n", (int)ds_extent_size, (int)ROW_MIN); + ds_extent_size = ROW_MIN; + } + else if(ds_extent_size > ROW_MAX) + { + fprintf(stderr, "%d is not a valid value for 'rows'. Using max of %d instead.\n", (int)ds_extent_size, (int)ROW_MAX); + ds_extent_size = ROW_MAX; + } + log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); + + return true; + + } + +bool DataSeries::DoFlush() +{ + // Flushing is handled by DataSeries automatically, so this function doesn't do anything. + return true; +} + +bool DataSeries::DoFinish() +{ + for(ExtentIterator iter = extents.begin(); + iter != extents.end(); ++iter) + { + delete iter->second; + } + extents.clear(); + // Don't delete the file before you delete the output, or bad things happen. + delete log_output; + delete log_file; + + return WriterBackend::DoFinish(); +} + +bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, + threading::Value** vals) +{ + log_output->newRecord(); + for(size_t i = 0; i < (size_t)num_fields; ++i) + { + ExtentIterator iter = extents.find(fields[i]->name); + assert(iter != extents.end()); + if( iter != extents.end() ) + { + GeneralField *cField = iter->second; + if(vals[i]->present) + cField->set(LogValueToString(vals[i])); + } + } + + return true; +} + +bool DataSeries::DoRotate(string rotated_path, double open, double close, bool terminating) +{ + // Note that if DS files are rotated too often, the aggregate log size will be (much) larger. + + DoFinish(); + + string dsname = Path() + ".ds"; + string nname = rotated_path + ".ds"; + rename(dsname.c_str(), nname.c_str()); + + if ( ! FinishedRotation(nname, dsname, open, close, terminating) ) + { + Error(Fmt("error rotating %s to %s", dsname.c_str(), nname.c_str())); + return false; + } + + return DoInit(Path(), NumFields(), Fields()); +} + +bool DataSeries::DoSetBuf(bool enabled) +{ + // DataSeries is *always* buffered to some degree. This option is ignored. + return true; +} diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h new file mode 100644 index 0000000000..5331975937 --- /dev/null +++ b/src/logging/writers/DataSeries.h @@ -0,0 +1,69 @@ +// See the file "COPYING" in the main distribution directory for copyright. +// +// A binary log writer producing DataSeries output. See doc/data-series.rst +// for more information. + +#ifndef LOGGING_WRITER_DATA_SERIES_H +#define LOGGING_WRITER_DATA_SERIES_H + +#include "../WriterBackend.h" + +#include +#include +#include +#include + +namespace logging { namespace writer { + +class DataSeries : public WriterBackend { +public: + DataSeries(WriterFrontend* frontend); + ~DataSeries(); + + static WriterBackend* Instantiate(WriterFrontend* frontend) + { return new DataSeries(frontend); } + +protected: + virtual bool DoInit(string path, int num_fields, + const threading::Field* const * fields); + + virtual bool DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals); + virtual bool DoSetBuf(bool enabled); + virtual bool DoRotate(string rotated_path, double open, + double close, bool terminating); + virtual bool DoFlush(); + virtual bool DoFinish(); + +private: + static const size_t ROW_MIN = 2048; // Minimum extent size. + static const size_t ROW_MAX = (1024 * 1024 * 100); // Maximum extent size. + static const size_t THREAD_MIN = 1; // Minimum number of compression threads that DataSeries may spawn. + static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. + static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. + + std::string LogValueToString(threading::Value *val); + + typedef std::map ExtentMap; + typedef ExtentMap::iterator ExtentIterator; + + // Internal DataSeries structures we need to keep track of. + DataSeriesSink* log_file; + ExtentTypeLibrary log_types; + ExtentType *log_type; + ExtentSeries log_series; + OutputModule* log_output; + ExtentMap extents; + + // Options set from the script-level. + uint64 ds_extent_size; + uint64 ds_num_threads; + string ds_compression; + bool ds_dump_schema; +}; + +} +} + +#endif + diff --git a/src/main.cc b/src/main.cc index ff33a3859d..f604d379ac 100644 --- a/src/main.cc +++ b/src/main.cc @@ -201,6 +201,27 @@ void usage() fprintf(stderr, " $BRO_LOG_SUFFIX | ASCII log file extension (.%s)\n", logging::writer::Ascii::LogExt().c_str()); fprintf(stderr, " $BRO_PROFILER_FILE | Output file for script execution statistics (not set)\n"); + fprintf(stderr, "\n"); + fprintf(stderr, " Supported log formats: "); + + bool first = true; + list fmts = logging::Manager::SupportedFormats(); + + for ( list::const_iterator i = fmts.begin(); i != fmts.end(); ++i ) + { + if ( *i == "None" ) + // Skip, it's uninteresting. + continue; + + if ( ! first ) + fprintf(stderr, ","); + + fprintf(stderr, "%s", (*i).c_str()); + first = false; + } + + fprintf(stderr, "\n"); + exit(1); } diff --git a/src/types.bif b/src/types.bif index 4657584a90..fe2e6ff861 100644 --- a/src/types.bif +++ b/src/types.bif @@ -162,6 +162,7 @@ enum Writer %{ WRITER_DEFAULT, WRITER_NONE, WRITER_ASCII, + WRITER_DATASERIES, %} enum ID %{ From c381da9ccfd956be506b17267e92077a0dc1868c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 3 Apr 2012 22:18:35 -0700 Subject: [PATCH 202/651] This could be fixing the memory problems finally. Keeping fingers crossed ... --- src/threading/Manager.cc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 43eb0313f4..7481e83192 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -73,14 +73,11 @@ void Manager::GetFds(int* read, int* write, int* except) double Manager::NextTimestamp(double* network_time) { - if ( ::network_time && ! next_beat ) - next_beat = ::network_time + HEART_BEAT_INTERVAL; - // fprintf(stderr, "N %.6f %.6f did_process=%d next_next=%.6f\n", ::network_time, timer_mgr->Time(), (int)did_process, next_beat); - if ( did_process || ::network_time > next_beat ) + if ( did_process || ::network_time > next_beat || ! next_beat ) // If we had something to process last time (or out heartbeat - // is due), we want to check for more asap. + // is due or not set yet), we want to check for more asap. return timer_mgr->Time(); return -1.0; @@ -88,7 +85,13 @@ double Manager::NextTimestamp(double* network_time) void Manager::Process() { - bool do_beat = (next_beat && network_time > next_beat); + bool do_beat = false; + + if ( network_time && (network_time > next_beat || ! next_beat) ) + { + do_beat = true; + next_beat = ::network_time + HEART_BEAT_INTERVAL; + } did_process = false; @@ -97,10 +100,7 @@ void Manager::Process() MsgThread* t = *i; if ( do_beat ) - { t->Heartbeat(); - next_beat = 0; - } while ( t->HasOut() ) { From fe61d02a9f9d32620a9783f1c61a0fe56880235f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 4 Apr 2012 10:42:50 -0700 Subject: [PATCH 203/651] Two more tweaks to reliably terminate when reading from trace. --- src/threading/Manager.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 7481e83192..abdbf849b5 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -43,6 +43,7 @@ void Manager::Terminate() msg_threads.clear(); idle = true; + closed = true; terminating = false; } @@ -75,7 +76,7 @@ double Manager::NextTimestamp(double* network_time) { // fprintf(stderr, "N %.6f %.6f did_process=%d next_next=%.6f\n", ::network_time, timer_mgr->Time(), (int)did_process, next_beat); - if ( did_process || ::network_time > next_beat || ! next_beat ) + if ( ::network_time && (did_process || ::network_time > next_beat || ! next_beat) ) // If we had something to process last time (or out heartbeat // is due or not set yet), we want to check for more asap. return timer_mgr->Time(); From d8d83f590bb9836205f71a596b2868ffb6d486f4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 4 Apr 2012 15:27:43 -0500 Subject: [PATCH 204/651] Fix handling of IPv6 atomic fragments. The FragReassembler expire_timer was left uninitialized until after the first fragment is added, but since the atomic fragment is also the last, the reassembler thought expire_timer needed to be deleted. This fix just initializes expire_timer before the first fragment is added. --- src/Frag.cc | 4 ++-- .../btest/Baseline/core.ipv6-atomic-frag/output | 4 ++++ testing/btest/Traces/ipv6-http-atomic-frag.trace | Bin 0 -> 4040 bytes testing/btest/core/ipv6-atomic-frag.test | 7 +++++++ 4 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 testing/btest/Baseline/core.ipv6-atomic-frag/output create mode 100644 testing/btest/Traces/ipv6-http-atomic-frag.trace create mode 100644 testing/btest/core/ipv6-atomic-frag.test diff --git a/src/Frag.cc b/src/Frag.cc index 9bd16a71c9..04298e14ad 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -52,8 +52,6 @@ FragReassembler::FragReassembler(NetSessions* arg_s, frag_size = 0; // flag meaning "not known" next_proto = ip->NextProto(); - AddFragment(t, ip, pkt); - if ( frag_timeout != 0.0 ) { expire_timer = new FragTimer(this, t + frag_timeout); @@ -61,6 +59,8 @@ FragReassembler::FragReassembler(NetSessions* arg_s, } else expire_timer = 0; + + AddFragment(t, ip, pkt); } FragReassembler::~FragReassembler() diff --git a/testing/btest/Baseline/core.ipv6-atomic-frag/output b/testing/btest/Baseline/core.ipv6-atomic-frag/output new file mode 100644 index 0000000000..4a628a4bdc --- /dev/null +++ b/testing/btest/Baseline/core.ipv6-atomic-frag/output @@ -0,0 +1,4 @@ +[orig_h=2001:db8:1::2, orig_p=36951/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] +[orig_h=2001:db8:1::2, orig_p=59694/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] +[orig_h=2001:db8:1::2, orig_p=27393/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] +[orig_h=2001:db8:1::2, orig_p=45805/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] diff --git a/testing/btest/Traces/ipv6-http-atomic-frag.trace b/testing/btest/Traces/ipv6-http-atomic-frag.trace new file mode 100644 index 0000000000000000000000000000000000000000..d5d9db276cc668c51dc6289a25628db8fd3786cd GIT binary patch literal 4040 zcmchZUr19?9LIn6y3M&%wA>nKvZ4pYoSBrN!_>$iOmAu~(A3Fhn{Gz9C|_dzH3&kX z2#Ub$Ee&B$MT~+B!=3_PLL{>%B{S`%ddPmia}?HXjjemv;NZEtXTQ(ye1E_5^-RAE zJ`h0z8=ucw00AFbhy3;Dx{{$8=QJS0e7m^&5w0|7uFZDPHJFmsal*)v9+>24lu)t9)FYV!SiXM(&{xl*elohi=M$ErW$}k0* zCwb&ngC0gOD2)h_y>Ui1n*uEh05-SZ=dCcL*xW9M6aS{z{2ss8RZ;D5ZnqWjIle98 zuW{CS+S$+6xr^5q;e=N37r2rIBrV*ig#(kN5uu(1ao3ZpZMJ}ZH^AXTO_righto>H zGy?(eqC~2Q?_=U;qWHaB{F^m;bz8Fy8^=eDQ@hLwm#!v&&?{vkVTlulkf4e>y{Jw* zYfT@(Nz`3?N*h(3T`?S7>S}-mD@dM;{b-*6ba35WoM1$PQsY^${;b)v38U-Cc=Bhs z@lgJ+Z>gNW4oRt0-Ti1sMA$K*jd~Q~R)`buD6G!@aTEx+4}nUH%>+wD!O-DMfvU57 zD~lLesSkxkPOuM=7P+rf(!$BKKwGHF;ac2C(y80{Mr<4>HBRY$PROfF0KrAJPONp5|r9PkMO<2-GI^O z$rf7BEa^T%eG|$|S2zE8BsEfXyG=a*7uD%eZ6R)jI00>8=a-1K@D_nei_HXUM8Q_5 z*nI0Vtpe^)SxfbUysN6U!*f^uqIG%A%2!#?r7$aNo zJm!QcB&ed!7^<_%I;9Yx4Ug6MCr#92RS<(MkP!d`mq5y%?0!#kx-I;`UL?YEBq+6o z;L_Lp<7F6qj%?xKBDV#KUYi&Fwh(vzb4V(u>IS#){Dd dl@^-`R*r%#Q?Z|2;ak~uLiueWEOLTv;TKT4;Youtput +# @TEST-EXEC: btest-diff output + +event new_connection(c: connection) + { + print c$id; + } From fb0614b5c64a544e44ba30441f498c8a36b62406 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 4 Apr 2012 17:46:13 -0700 Subject: [PATCH 205/651] Adding notice_policy.log canonification for external tests. These was still producing false positives. --- testing/scripts/diff-canon-notice-policy | 10 ++++++++++ testing/scripts/diff-canonifier-external | 2 ++ 2 files changed, 12 insertions(+) create mode 100755 testing/scripts/diff-canon-notice-policy diff --git a/testing/scripts/diff-canon-notice-policy b/testing/scripts/diff-canon-notice-policy new file mode 100755 index 0000000000..f05abaa103 --- /dev/null +++ b/testing/scripts/diff-canon-notice-policy @@ -0,0 +1,10 @@ +#! /usr/bin/awk -f +# +# A diff canonifier that removes the priorities in notice_policy.log. + +/^#/ && $2 == "notice_policy" { filter = 1; } + +filter == 1 && /^[^#]/ { sub("^[0-9]*", "X"); } + +{ print; } + diff --git a/testing/scripts/diff-canonifier-external b/testing/scripts/diff-canonifier-external index 6796614362..e788a4a1bb 100755 --- a/testing/scripts/diff-canonifier-external +++ b/testing/scripts/diff-canonifier-external @@ -6,4 +6,6 @@ | `dirname $0`/diff-remove-uids \ | `dirname $0`/diff-remove-mime-types \ | `dirname $0`/diff-remove-x509-names \ + | `dirname $0`/diff-canon-notice-policy \ | `dirname $0`/diff-sort + From 4e49b98bbabcd7acdeeb4fcfa14c4961fdc0b565 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 4 Apr 2012 17:57:38 -0700 Subject: [PATCH 206/651] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index c8e8fe477b..8da6c55697 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit c8e8fe477b5dec635e5ce00f3f764fad069c549c +Subproject commit 8da6c55697ff580600cfff474f4ccba2a592f911 From c372eaf7b59b30638c3a34e53114259c638c691e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 4 Apr 2012 18:36:40 -0700 Subject: [PATCH 207/651] Updating submodule(s). [nomail] --- cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake b/cmake index 550ab2c8d9..5ddec45563 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 550ab2c8d95b1d3e18e40a903152650e6c7a3c45 +Subproject commit 5ddec4556338339fc4d1da27bce766a827990543 From d0b68771ef78227315724cdf99b296ed723200bb Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 4 Apr 2012 18:37:10 -0700 Subject: [PATCH 208/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aux/binpac b/aux/binpac index dd1a3a95f0..56ae73ab99 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit dd1a3a95f07082efcd5274b21104a038d523d132 +Subproject commit 56ae73ab995dda665d8918d1a6b3af39b15991e3 diff --git a/aux/bro-aux b/aux/bro-aux index a59b35bdad..12d32194c1 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit a59b35bdada8f70fb1a59bf7bb2976534c86d378 +Subproject commit 12d32194c19d2dce06818588a2aeccf234de1889 diff --git a/aux/broccoli b/aux/broccoli index a4046c2f79..60898666ba 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit a4046c2f79b6ab0ac19ae8be94b79c6ce578bea7 +Subproject commit 60898666ba1df1913c08ad5045b1e56f974060cc diff --git a/aux/broctl b/aux/broctl index c86b7e990b..4d1a0692a7 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit c86b7e990b4d39cd48c0cb692077aa081b418149 +Subproject commit 4d1a0692a7d7b5229230856a4041f70fd3a6eaa5 From 017622427bc9ac791860e31d7c64b20d820373c1 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 4 Apr 2012 18:42:15 -0700 Subject: [PATCH 209/651] Fixing perftools-debug support. --- config.h.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.h.in b/config.h.in index e744cb7dbd..0047344c51 100644 --- a/config.h.in +++ b/config.h.in @@ -109,7 +109,7 @@ #cmakedefine HAVE_GEOIP_CITY_EDITION_REV0_V6 /* Use Google's perftools */ -#cmakedefine USE_PERFTOOLS +#cmakedefine USE_PERFTOOLS_DEBUG /* Version number of package */ #define VERSION "@VERSION@" From c92dc7e6afd9906a25f2c503ad1aacff05c2f08f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 4 Apr 2012 19:27:43 -0700 Subject: [PATCH 210/651] Reverting SocketComm change tuning I/O behaviour. Not sure that's right. --- src/RemoteSerializer.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 110a25e66f..bf195e9d3a 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -3399,8 +3399,10 @@ void SocketComm::Run() small_timeout.tv_usec = io->CanWrite() || io->CanRead() ? 1 : 10; +#if 0 if ( ! io->CanWrite() ) usleep(10); +#endif int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, &small_timeout); From 11b15cc2904325421173d956ec60dda4c4b7b89d Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 5 Apr 2012 10:50:35 -0500 Subject: [PATCH 211/651] Refactor IP_Hdr routing header handling, add MobileIPv6 Home Address handling. Packets that use the Home Address Destination option use that option's address as the connection's originator. --- src/IP.cc | 102 +++++++++++++++--- src/IP.h | 100 +++++++++++------ src/PacketFilter.cc | 2 +- src/PacketSort.cc | 2 +- src/Serializer.cc | 2 +- src/Sessions.cc | 6 +- src/UDP.cc | 2 +- src/bro.bif | 4 +- testing/btest/Baseline/core.checksums/bad.out | 2 + .../core.mobile-ipv6-home-addr/output | 2 + .../chksums/ip6-hoa-tcp-bad-chksum.pcap | Bin 0 -> 138 bytes .../chksums/ip6-hoa-tcp-good-chksum.pcap | Bin 0 -> 138 bytes .../chksums/ip6-hoa-udp-bad-chksum.pcap | Bin 0 -> 130 bytes .../chksums/ip6-hoa-udp-good-chksum.pcap | Bin 0 -> 130 bytes testing/btest/Traces/ipv6-mobile-hoa.trace | Bin 0 -> 130 bytes testing/btest/core/checksums.test | 4 + testing/btest/core/mobile-ipv6-home-addr.test | 10 ++ 17 files changed, 185 insertions(+), 53 deletions(-) create mode 100644 testing/btest/Baseline/core.mobile-ipv6-home-addr/output create mode 100644 testing/btest/Traces/chksums/ip6-hoa-tcp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-hoa-tcp-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-hoa-udp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-hoa-udp-good-chksum.pcap create mode 100644 testing/btest/Traces/ipv6-mobile-hoa.trace create mode 100644 testing/btest/core/mobile-ipv6-home-addr.test diff --git a/src/IP.cc b/src/IP.cc index 620b294d40..cce7af152f 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -305,19 +305,12 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) chain.push_back(p); - // Check for routing type 0 header. - if ( current_type == IPPROTO_ROUTING && - ((const struct ip6_rthdr*)hdrs)->ip6r_type == 0 ) - { - if ( ((const struct ip6_rthdr*)hdrs)->ip6r_segleft > 0 ) - // Remember the index for later so we can determine the final - // destination of the packet. - route0_hdr_idx = chain.size() - 1; + // Check for routing headers and remember final destination address. + if ( current_type == IPPROTO_ROUTING ) + ProcessRoutingHeader((const struct ip6_rthdr*) hdrs, len); - // RFC 5095 deprecates routing type 0 headers, so raise weirds - IPAddr src(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_src); - reporter->Weird(src, FinalDst(), "routing0_hdr"); - } + if ( current_type == IPPROTO_DSTOPTS ) + ProcessDstOpts((const struct ip6_dest*) hdrs, len); hdrs += len; length += len; @@ -326,6 +319,91 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) isIPv6ExtHeader(next_type) ); } +void IPv6_Hdr_Chain::ProcessRoutingHeader(const struct ip6_rthdr* r, uint16 len) + { + if ( finalDst ) + { + // RFC 2460 section 4.1 says Routing should occur at most once + reporter->Weird(SrcAddr(), DstAddr(), "multiple_routing_headers"); + return; + } + + // Last 16 bytes of header (for all known types) is the address we want + const in6_addr* addr = (const in6_addr*)(((const u_char*)r) + len - 16); + + switch ( r->ip6r_type ) { + case 0: // Defined by RFC 2460, deprecated by RFC 5095 + { + if ( r->ip6r_segleft > 0 && r->ip6r_len >= 2 ) + { + if ( r->ip6r_len % 2 == 0 ) + finalDst = new IPAddr(*addr); + else + reporter->Weird(SrcAddr(), DstAddr(), "odd_routing0_len"); + } + + // Always raise a weird since this type is deprecated + reporter->Weird(SrcAddr(), DstAddr(), "routing0_hdr"); + } + break; + + case 2: // Defined by Mobile IPv6 RFC 6275 + { + if ( r->ip6r_segleft > 0 ) + { + if ( r->ip6r_len == 2 ) + finalDst = new IPAddr(*addr); + else + reporter->Weird(SrcAddr(), DstAddr(), "bad_routing2_len"); + } + } + break; + + default: + reporter->Weird(fmt("unknown_routing_type_%d", r->ip6r_type)); + break; + } + } + +void IPv6_Hdr_Chain::ProcessDstOpts(const struct ip6_dest* d, uint16 len) + { + const u_char* data = (const u_char*) d; + len -= 2 * sizeof(uint8); + data += 2* sizeof(uint8); + + while ( len > 0 ) + { + const struct ip6_opt* opt = (const struct ip6_opt*) data; + switch ( opt->ip6o_type ) { + case 201: // Home Address Option, Mobile IPv6 RFC 6275 section 6.3 + { + if ( opt->ip6o_len == 16 ) + if ( homeAddr ) + reporter->Weird(SrcAddr(), DstAddr(), "multiple_home_addr_opts"); + else + homeAddr = new IPAddr(*((const in6_addr*)(data + 2))); + else + reporter->Weird(SrcAddr(), DstAddr(), "bad_home_addr_len"); + } + break; + + default: + break; + } + + if ( opt->ip6o_type == 0 ) + { + data += sizeof(uint8); + len -= sizeof(uint8); + } + else + { + data += 2 * sizeof(uint8) + opt->ip6o_len; + len -= 2 * sizeof(uint8) + opt->ip6o_len; + } + } + } + VectorVal* IPv6_Hdr_Chain::BuildVal() const { if ( ! ip6_ext_hdr_type ) diff --git a/src/IP.h b/src/IP.h index 7ed0968ef3..4ffb59151a 100644 --- a/src/IP.h +++ b/src/IP.h @@ -117,11 +117,15 @@ public: /** * Initializes the header chain from an IPv6 header structure. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6) : route0_hdr_idx(0) + IPv6_Hdr_Chain(const struct ip6_hdr* ip6) : homeAddr(0), finalDst(0) { Init(ip6, false); } ~IPv6_Hdr_Chain() - { for ( size_t i = 0; i < chain.size(); ++i ) delete chain[i]; } + { + for ( size_t i = 0; i < chain.size(); ++i ) delete chain[i]; + delete homeAddr; + delete finalDst; + } /** * Returns the number of headers in the chain. @@ -173,22 +177,27 @@ public: (ntohs(GetFragHdr()->ip6f_offlg) & 0x0001) != 0 : 0; } /** - * Returns the final destination of the packet this chain belongs to. - * If the chain doesn't contain any routing type 0 header with non-zero - * segments left, this is the destination in the main IP header, else - * it's the last address in the routing header. (If there were to be - * more than one routing type 0 header with non-zero segments left, the - * last one would be the one referenced). + * If the chain contains a Destination Options header with a Home Address + * option as defined by Mobile IPv6 (RFC 6275), then return it, else + * return the source address in the main IPv6 header. */ - IPAddr FinalDst() const + IPAddr SrcAddr() const { - if ( route0_hdr_idx ) - { - const struct in6_addr* a = (const struct in6_addr*) - (chain[route0_hdr_idx]->Data() + - chain[route0_hdr_idx]->Length() - 16); - return IPAddr(*a); - } + if ( homeAddr ) + return IPAddr(*homeAddr); + else + return IPAddr(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_src); + } + + /** + * If the chain contains a Routing header with non-zero segments left, + * then return the last address of the first such header, else return + * the destination address of the main IPv6 header. + */ + IPAddr DstAddr() const + { + if ( finalDst ) + return IPAddr(*finalDst); else return IPAddr(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_dst); } @@ -208,11 +217,24 @@ protected: * Initializes the header chain from an IPv6 header structure, and replaces * the first next protocol pointer field that points to a fragment header. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) : route0_hdr_idx(0) + IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) + : homeAddr(0), finalDst(0) { Init(ip6, true, next); } void Init(const struct ip6_hdr* ip6, bool set_next, uint16 next = 0); + /** + * Process a routing header and allocate/remember the final destination + * address if it has segments left and is a valid routing header. + */ + void ProcessRoutingHeader(const struct ip6_rthdr* r, uint16 len); + + /** + * Inspect a Destination Option header's options for things we need to + * remember, such as the Home Address option from Mobile IPv6. + */ + void ProcessDstOpts(const struct ip6_dest* d, uint16 len); + vector chain; /** @@ -221,11 +243,15 @@ protected: uint16 length; /** - * Index of routing type 0 header with non-zero segments left in the header - * chain or zero if none exists (it's fine since the main IP header must - * always be at index zero). + * Home Address of the packet's source as defined by Mobile IPv6 (RFC 6275). */ - uint8 route0_hdr_idx; + IPAddr* homeAddr; + + /** + * The final destination address in chain's first Routing header that has + * non-zero segments left. + */ + IPAddr* finalDst; }; class IP_Hdr { @@ -278,20 +304,11 @@ public: const struct ip6_hdr* IP6_Hdr() const { return ip6; } - IPAddr SrcAddr() const - { return ip4 ? IPAddr(ip4->ip_src) : IPAddr(ip6->ip6_src); } - /** - * Returns the final destination address of the header's packet, which - * for IPv6 packets without a routing type 0 extension header and IPv4 - * packets is the destination address in the IP header. For IPv6 packets - * with a routing type 0 extension header and a non-zero number of - * segments left, the final destination is the last address in the routing - * header. If the segments left of a routing type 0 header were zero, - * then the final destination is in the IP header itself. + * Returns the source address held in the IP header. */ - IPAddr FinalDstAddr() const - { return ip4 ? IPAddr(ip4->ip_dst) : ip6_hdrs->FinalDst(); } + IPAddr IPheaderSrcAddr() const + { return ip4 ? IPAddr(ip4->ip_src) : IPAddr(ip6->ip6_src); } /** * Returns the destination address held in the IP header. @@ -299,6 +316,23 @@ public: IPAddr IPHeaderDstAddr() const { return ip4 ? IPAddr(ip4->ip_dst) : IPAddr(ip6->ip6_dst); } + /** + * For IPv4 or IPv6 headers that don't contain a Home Address option + * (Mobile IPv6, RFC 6275), return source address held in the IP header. + * For IPv6 headers that contain a Home Address option, return that address. + */ + IPAddr SrcAddr() const + { return ip4 ? IPAddr(ip4->ip_src) : ip6_hdrs->SrcAddr(); } + + /** + * For IPv4 or IPv6 headers that don't contain a Routing header with + * non-zero segments left, return destination address held in the IP header. + * For IPv6 headers with a Routing header that has non-zero segments left, + * return the last address in the first such Routing header. + */ + IPAddr DstAddr() const + { return ip4 ? IPAddr(ip4->ip_dst) : ip6_hdrs->DstAddr(); } + /** * Returns a pointer to the payload of the IP packet, usually an * upper-layer protocol. diff --git a/src/PacketFilter.cc b/src/PacketFilter.cc index 412bf14587..4fb3b1c8f7 100644 --- a/src/PacketFilter.cc +++ b/src/PacketFilter.cc @@ -58,7 +58,7 @@ bool PacketFilter::Match(const IP_Hdr* ip, int len, int caplen) if ( f ) return MatchFilter(*f, *ip, len, caplen); - f = (Filter*) dst_filter.Lookup(ip->FinalDstAddr(), 128); + f = (Filter*) dst_filter.Lookup(ip->DstAddr(), 128); if ( f ) return MatchFilter(*f, *ip, len, caplen); diff --git a/src/PacketSort.cc b/src/PacketSort.cc index 3fb0e9ccbf..04c525c4d1 100644 --- a/src/PacketSort.cc +++ b/src/PacketSort.cc @@ -45,7 +45,7 @@ PacketSortElement::PacketSortElement(PktSrc* arg_src, (pkt + tcp_offset); id.src_addr = ip_hdr->SrcAddr(); - id.dst_addr = ip_hdr->FinalDstAddr(); + id.dst_addr = ip_hdr->DstAddr(); id.src_port = tp->th_sport; id.dst_port = tp->th_dport; id.is_one_way = 0; diff --git a/src/Serializer.cc b/src/Serializer.cc index 6aa554cc2b..06bbf73f48 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -1105,7 +1105,7 @@ void Packet::Describe(ODesc* d) const const IP_Hdr ip = IP(); d->Add(ip.SrcAddr()); d->Add("->"); - d->Add(ip.FinalDstAddr()); + d->Add(ip.DstAddr()); } bool Packet::Serialize(SerialInfo* info) const diff --git a/src/Sessions.cc b/src/Sessions.cc index 4b5f201db5..84b57bdc62 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -493,7 +493,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, ConnID id; id.src_addr = ip_hdr->SrcAddr(); - id.dst_addr = ip_hdr->FinalDstAddr(); + id.dst_addr = ip_hdr->DstAddr(); Dictionary* d = 0; switch ( proto ) { @@ -667,7 +667,7 @@ FragReassembler* NetSessions::NextFragment(double t, const IP_Hdr* ip, ListVal* key = new ListVal(TYPE_ANY); key->Append(new AddrVal(ip->SrcAddr())); - key->Append(new AddrVal(ip->FinalDstAddr())); + key->Append(new AddrVal(ip->DstAddr())); key->Append(new Val(frag_id, TYPE_COUNT)); HashKey* h = ch->ComputeHash(key, 1); @@ -1177,7 +1177,7 @@ void NetSessions::Weird(const char* name, void NetSessions::Weird(const char* name, const IP_Hdr* ip) { - reporter->Weird(ip->SrcAddr(), ip->FinalDstAddr(), name); + reporter->Weird(ip->SrcAddr(), ip->DstAddr(), name); } unsigned int NetSessions::ConnectionMemoryUsage() diff --git a/src/UDP.cc b/src/UDP.cc index fc559bf59d..d85cb39edd 100644 --- a/src/UDP.cc +++ b/src/UDP.cc @@ -217,7 +217,7 @@ bool UDP_Analyzer::ValidateChecksum(const IP_Hdr* ip, const udphdr* up, int len) sum = 0; sum = ones_complement_checksum(ip->SrcAddr(), sum); - sum = ones_complement_checksum(ip->FinalDstAddr(), sum); + sum = ones_complement_checksum(ip->DstAddr(), sum); // Note, for IPv6, strictly speaking the protocol and length fields are // 32 bits rather than 16 bits. But because the upper bits are all zero, // we get the same checksum either way. diff --git a/src/bro.bif b/src/bro.bif index 9fbf66699e..49a57274af 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -4693,6 +4693,8 @@ function pcap_error%(%): string ## Installs a filter to drop packets from a given IP source address with ## a certain probability if none of a given set of TCP flags are set. +## Note that for IPv6 packets with a Destination options header that has +## the Home Address option, this filters out against that home address. ## ## ip: The IP address to drop. ## @@ -4795,7 +4797,7 @@ function uninstall_src_net_filter%(snet: subnet%) : bool ## Installs a filter to drop packets destined to a given IP address with ## a certain probability if none of a given set of TCP flags are set. -## Note that for IPv6 packets with a routing type 0 header and non-zero +## Note that for IPv6 packets with a routing type header and non-zero ## segments left, this filters out against the final destination of the ## packet according to the routing extension header. ## diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index cd3c799277..ef83d966a3 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -5,5 +5,7 @@ 1332785210.013051 weird: bad_TCP_checksum 1332782580.798420 weird: routing0_hdr 1332782580.798420 weird: bad_UDP_checksum +1333640536.489921 weird: bad_TCP_checksum +1333640468.146461 weird: bad_UDP_checksum 1332785250.469132 weird: bad_TCP_checksum 1332781342.923813 weird: bad_UDP_checksum diff --git a/testing/btest/Baseline/core.mobile-ipv6-home-addr/output b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output new file mode 100644 index 0000000000..f28997ff0b --- /dev/null +++ b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output @@ -0,0 +1,2 @@ +[orig_h=2001:78:1:32::1, orig_p=30000/udp, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=13000/udp] +[ip=, ip6=[class=0, flow=0, len=36, nxt=60, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=60, hopopts=, dstopts=[nxt=17, len=2, options=[[otype=1, len=2, data=\0\0], [otype=201, len=16, data= ^A\0x\0^A\02\0\0\0\0\0\0\0^A]]], routing=, fragment=, ah=, esp=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] diff --git a/testing/btest/Traces/chksums/ip6-hoa-tcp-bad-chksum.pcap b/testing/btest/Traces/chksums/ip6-hoa-tcp-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..3aa4bd21fa5a72df37231879d3364577bf4b6c0c GIT binary patch literal 138 zcmca|c+)~A1{MYwaA0F#U<7g^_SX6ztYl|M0oE#rh literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-hoa-tcp-good-chksum.pcap b/testing/btest/Traces/chksums/ip6-hoa-tcp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..a6fc9cb0177e3539d6154b9932faa1476fb427f5 GIT binary patch literal 138 zcmca|c+)~A1{MYwaA0F#U<7jP_SX78)#71D0+U_QR7%Dan3XCj2fSTEv u9yI>{7xe!>Dt}hCAQK}K1H(xH1xAJnps_{}{S1ty21X|sco-NXfB*n3fFHmB literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-hoa-udp-good-chksum.pcap b/testing/btest/Traces/chksums/ip6-hoa-udp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..f3e9d632c3665fbdbfe4a737fe839051d60bb615 GIT binary patch literal 130 zcmca|c+)~A1{MYwaA0F#U<7guch&kwr*bev0ofq@9}FO>+U_QR7%Dan3XCj2fSTEv u9yI>{7xe!>Dt}hCAQK}K1H(xH1xAJnps_{}{S1ty21X|sc%1J=0097LV+U_QR7%Dan3XCj2fSTEv u9yI>{7xe!>Dt}hCAQK}K1H(xH1xAJnps_{}{S1ty21X|sc%1J=0097LV>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: btest-diff bad.out diff --git a/testing/btest/core/mobile-ipv6-home-addr.test b/testing/btest/core/mobile-ipv6-home-addr.test new file mode 100644 index 0000000000..f113016568 --- /dev/null +++ b/testing/btest/core/mobile-ipv6-home-addr.test @@ -0,0 +1,10 @@ +# @TEST-EXEC: bro -b -r $TRACES/ipv6-mobile-hoa.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +# Just check that the orig of the connection is the Home Address, but the +# source in the header is the actual source address. +event new_packet(c: connection, p: pkt_hdr) + { + print c$id; + print p; + } From 8e2ce1d350510ca7ab52f0d5a0e4654780eb1125 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 5 Apr 2012 11:41:14 -0500 Subject: [PATCH 212/651] Fix CMake from warning about unused ENABLE_PERFTOOLS_DEBUG variable. The variable was never "used" in the case google perftools was not found, so CMake warned about it being unused since it was manually specified as a cache variable in the configure script. There might be a better way to organize that, but this is a quick/easy hack. --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index febc2d6ec1..d27fa2d40b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -107,6 +107,11 @@ if (GOOGLEPERFTOOLS_FOUND) endif () endif () +if (ENABLE_PERFTOOLS_DEBUG) + # Just a no op to prevent CMake from complaining about manually-specified + # ENABLE_PERFTOOLS_DEBUG not being used if google perftools weren't found +endif () + set(brodeps ${BinPAC_LIBRARY} ${PCAP_LIBRARY} From c63d23f3af359731cbd79c1519415d832a95f994 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 5 Apr 2012 15:30:40 -0700 Subject: [PATCH 213/651] Preventing Bro processes that do neither local logging nor request remote logs from spawning threads. This applies to the proxy, which was still opening all the log files with then idle threads. --- src/RemoteSerializer.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index bf195e9d3a..61be8a9e8f 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -2503,6 +2503,9 @@ bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* if ( peer->phase != Peer::HANDSHAKE && peer->phase != Peer::RUNNING ) return false; + if ( ! peer->logs_requested ) + return false; + BinarySerializationFormat fmt; fmt.StartWrite(); @@ -2625,6 +2628,9 @@ error: bool RemoteSerializer::FlushLogBuffer(Peer* p) { + if ( ! p->logs_requested ) + return false; + last_flush = network_time; if ( p->state == Peer::CLOSING ) From fcd8f9b77e6117d6d540e9543921682a2596e563 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 6 Apr 2012 13:24:27 -0500 Subject: [PATCH 214/651] Fix table expiry for values assigned in bro_init() when reading live. Value assigned in bro_init() to a table with &create_expire weren't expiring when reading traffic from an interface. It worked when reading a pcap file, but I added a test case to show it still working. --- doc/scripts/builtins.rst | 7 +- src/Val.cc | 9 +- .../Baseline/language.expire_func/output | 378 ++++++++++++++++++ testing/btest/language/expire_func.test | 23 ++ 4 files changed, 413 insertions(+), 4 deletions(-) create mode 100644 testing/btest/Baseline/language.expire_func/output create mode 100644 testing/btest/language/expire_func.test diff --git a/doc/scripts/builtins.rst b/doc/scripts/builtins.rst index 30b344ca6b..c151601ab5 100644 --- a/doc/scripts/builtins.rst +++ b/doc/scripts/builtins.rst @@ -550,7 +550,12 @@ scripting language supports the following built-in attributes. .. bro:attr:: &expire_func - Called right before a container element expires. + Called right before a container element expires. The function's + first parameter is of the same type of the container and the second + parameter the same type of the container's index. The return + value is a :bro:type:`interval` indicating the amount of additional + time to wait before expiring the container element at the given + index. .. bro:attr:: &read_expire diff --git a/src/Val.cc b/src/Val.cc index 83bbc59b9d..f3977789f2 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -2136,10 +2136,13 @@ void TableVal::DoExpire(double t) (v = tbl->NextEntry(k, expire_cookie)); ++i ) { if ( v->ExpireAccessTime() == 0 ) + { // This happens when we insert val while network_time - // hasn't been initialized yet (e.g. in bro_init()). - // We correct the timestamp now. - v->SetExpireAccess(network_time); + // hasn't been initialized yet (e.g. in bro_init()), and + // also when bro_start_network_time hasn't been initialized + // (e.g. before first packet). The expire_access_time is + // correct, so we just need to wait. + } else if ( v->ExpireAccessTime() + expire_time < t ) { diff --git a/testing/btest/Baseline/language.expire_func/output b/testing/btest/Baseline/language.expire_func/output new file mode 100644 index 0000000000..91cd2bad16 --- /dev/null +++ b/testing/btest/Baseline/language.expire_func/output @@ -0,0 +1,378 @@ +{ +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +here, +am +} +{ +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +[orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], +here, +am +} +{ +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +[orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], +here, +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +am +} +{ +[orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +[orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], +here, +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +am +} +{ +[orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +[orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], +here, +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +am +} +{ +[orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +[orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], +here, +[orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +am +} +{ +[orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +[orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], +here, +[orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], +[orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +am +} +{ +[orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=17500/udp, resp_h=172.16.238.255, resp_p=17500/udp], +[orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp], +i, +[orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], +here, +[orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], +[orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], +am +} +expired [orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp] +expired [orig_h=172.16.238.1, orig_p=17500/udp, resp_h=172.16.238.255, resp_p=17500/udp] +expired [orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp] +expired i +expired [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired here +expired [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp] +expired [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp] +expired [orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp] +expired [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp] +expired am +{ +[orig_h=172.16.238.1, orig_p=49659/tcp, resp_h=172.16.238.131, resp_p=21/tcp] +} +{ +[orig_h=172.16.238.1, orig_p=49659/tcp, resp_h=172.16.238.131, resp_p=21/tcp], +[orig_h=172.16.238.131, orig_p=45126/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +expired [orig_h=172.16.238.1, orig_p=49659/tcp, resp_h=172.16.238.131, resp_p=21/tcp] +expired [orig_h=172.16.238.131, orig_p=45126/udp, resp_h=172.16.238.2, resp_p=53/udp] +{ +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45140/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55368/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45140/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55368/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45140/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=53102/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55368/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=59573/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45140/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=53102/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55368/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=59573/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=52952/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45140/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=53102/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=48621/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp], +[orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=55368/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=59573/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=52952/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45140/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=53102/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +expired [orig_h=172.16.238.131, orig_p=48621/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=37846/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=57272/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=55515/tcp, resp_h=74.125.225.81, resp_p=80/tcp] +expired [orig_h=172.16.238.131, orig_p=44555/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=55368/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=59573/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=33109/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=52952/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=45140/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=53102/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=51970/udp, resp_h=172.16.238.2, resp_p=53/udp] +expired [orig_h=172.16.238.131, orig_p=54304/udp, resp_h=172.16.238.2, resp_p=53/udp] +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp], +[orig_h=172.16.238.131, orig_p=36682/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=46552/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp], +[orig_h=172.16.238.131, orig_p=36682/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=58367/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=46552/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp], +[orig_h=172.16.238.131, orig_p=36682/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=58367/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=46552/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=42269/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp], +[orig_h=172.16.238.131, orig_p=36682/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=58367/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56485/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=46552/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=42269/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp], +[orig_h=172.16.238.131, orig_p=36682/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=58367/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56485/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=46552/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=42269/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=39723/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp], +[orig_h=172.16.238.131, orig_p=36682/udp, resp_h=172.16.238.2, resp_p=53/udp] +} +{ +[orig_h=172.16.238.131, orig_p=54935/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=58367/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56214/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=123/udp, resp_h=69.50.219.51, resp_p=123/udp], +[orig_h=172.16.238.131, orig_p=38118/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=56485/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=46552/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=42269/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=33624/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=37934/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=39723/udp, resp_h=172.16.238.2, resp_p=53/udp], +[orig_h=172.16.238.131, orig_p=45908/tcp, resp_h=141.142.192.39, resp_p=22/tcp], +[orig_h=172.16.238.131, orig_p=36682/udp, resp_h=172.16.238.2, resp_p=53/udp] +} diff --git a/testing/btest/language/expire_func.test b/testing/btest/language/expire_func.test new file mode 100644 index 0000000000..653a4d9a86 --- /dev/null +++ b/testing/btest/language/expire_func.test @@ -0,0 +1,23 @@ +# @TEST-EXEC: bro -C -r $TRACES/var-services-std-ports.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +function inform_me(s: set[string], idx: string): interval + { + print fmt("expired %s", idx); + return 0secs; + } + +global s: set[string] &create_expire=1secs &expire_func=inform_me; + +event bro_init() + { + add s["i"]; + add s["am"]; + add s["here"]; + } + +event new_connection(c: connection) + { + add s[fmt("%s", c$id)]; + print s; + } From 91330f1e1c2edcf3ba610190ab03e235e36debde Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 9 Apr 2012 14:39:00 -0500 Subject: [PATCH 215/651] Add support for mobile IPv6 Mobility Header (RFC 6275). - Accessible at script-layer through 'mobile_ipv6_message' event. - All Mobile IPv6 analysis now enabled through --enable-mobile-ipv6 configure-time option, otherwise the mobility header, routing type 2, and Home Address Destination option are ignored. --- config.h.in | 3 + configure | 5 + scripts/base/init-bare.bro | 156 ++++++++++++ src/IP.cc | 155 ++++++++++- src/IP.h | 62 ++++- src/Sessions.cc | 29 +++ src/event.bif | 7 + src/net_util.cc | 26 ++ src/net_util.h | 25 ++ testing/btest/Baseline/core.checksums/bad.out | 2 - .../Baseline/core.disable-mobile-ipv6/output | 1 + testing/btest/Baseline/core.ipv6_esp/output | 240 +++++++++--------- .../Baseline/core.ipv6_ext_headers/output | 2 +- .../core.mobile-ipv6-home-addr/output | 2 +- .../Baseline/core.mobile-ipv6-routing/output | 2 + .../Baseline/core.mobility-checksums/bad.out | 3 + .../Baseline/core.mobility-checksums/good.out | 0 .../btest/Baseline/core.mobility_msg/output | 16 ++ .../Traces/chksums/mip6-bad-mh-chksum.pcap | Bin 0 -> 118 bytes .../Traces/chksums/mip6-good-mh-chksum.pcap | Bin 0 -> 118 bytes .../{ => mobile-ipv6}/ipv6-mobile-hoa.trace | Bin .../mobile-ipv6/ipv6-mobile-routing.trace | Bin 0 -> 130 bytes .../btest/Traces/mobile-ipv6/mip6_back.trace | Bin 0 -> 110 bytes .../btest/Traces/mobile-ipv6/mip6_be.trace | Bin 0 -> 118 bytes .../btest/Traces/mobile-ipv6/mip6_brr.trace | Bin 0 -> 102 bytes .../btest/Traces/mobile-ipv6/mip6_bu.trace | Bin 0 -> 110 bytes .../btest/Traces/mobile-ipv6/mip6_cot.trace | Bin 0 -> 118 bytes .../btest/Traces/mobile-ipv6/mip6_coti.trace | Bin 0 -> 110 bytes .../btest/Traces/mobile-ipv6/mip6_hot.trace | Bin 0 -> 118 bytes .../btest/Traces/mobile-ipv6/mip6_hoti.trace | Bin 0 -> 110 bytes testing/btest/core/checksums.test | 4 - testing/btest/core/disable-mobile-ipv6.test | 12 + testing/btest/core/ipv6_esp.test | 3 +- testing/btest/core/mobile-ipv6-home-addr.test | 3 +- testing/btest/core/mobile-ipv6-routing.test | 11 + testing/btest/core/mobility-checksums.test | 9 + testing/btest/core/mobility_msg.test | 44 ++++ 37 files changed, 688 insertions(+), 134 deletions(-) create mode 100644 testing/btest/Baseline/core.disable-mobile-ipv6/output create mode 100644 testing/btest/Baseline/core.mobile-ipv6-routing/output create mode 100644 testing/btest/Baseline/core.mobility-checksums/bad.out create mode 100644 testing/btest/Baseline/core.mobility-checksums/good.out create mode 100644 testing/btest/Baseline/core.mobility_msg/output create mode 100644 testing/btest/Traces/chksums/mip6-bad-mh-chksum.pcap create mode 100644 testing/btest/Traces/chksums/mip6-good-mh-chksum.pcap rename testing/btest/Traces/{ => mobile-ipv6}/ipv6-mobile-hoa.trace (100%) create mode 100644 testing/btest/Traces/mobile-ipv6/ipv6-mobile-routing.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_back.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_be.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_brr.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_bu.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_cot.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_coti.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_hot.trace create mode 100644 testing/btest/Traces/mobile-ipv6/mip6_hoti.trace create mode 100644 testing/btest/core/disable-mobile-ipv6.test create mode 100644 testing/btest/core/mobile-ipv6-routing.test create mode 100644 testing/btest/core/mobility-checksums.test create mode 100644 testing/btest/core/mobility_msg.test diff --git a/config.h.in b/config.h.in index 0047344c51..6c64fb61bc 100644 --- a/config.h.in +++ b/config.h.in @@ -111,6 +111,9 @@ /* Use Google's perftools */ #cmakedefine USE_PERFTOOLS_DEBUG +/* Analyze Mobile IPv6 traffic */ +#cmakedefine ENABLE_MOBILE_IPV6 + /* Version number of package */ #define VERSION "@VERSION@" diff --git a/configure b/configure index 05aa12815b..3c1cca8c9d 100755 --- a/configure +++ b/configure @@ -27,6 +27,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... Optional Features: --enable-debug compile in debugging mode + --enable-mobile-ipv6 analyze mobile IPv6 features defined by RFC 6275 --enable-perftools-debug use Google's perftools for debugging --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl @@ -98,6 +99,7 @@ append_cache_entry INSTALL_AUX_TOOLS BOOL true append_cache_entry INSTALL_BROCCOLI BOOL true append_cache_entry INSTALL_BROCTL BOOL true append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING +append_cache_entry ENABLE_MOBILE_IPV6 BOOL false # parse arguments while [ $# -ne 0 ]; do @@ -132,6 +134,9 @@ while [ $# -ne 0 ]; do --enable-debug) append_cache_entry ENABLE_DEBUG BOOL true ;; + --enable-mobile-ipv6) + append_cache_entry ENABLE_MOBILE_IPV6 BOOL true + ;; --enable-perftools-debug) append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true ;; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index b2237d7af8..a439a0dcb0 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -957,6 +957,7 @@ const IPPROTO_ESP = 50; ##< IPv6 encapsulating security payload header. const IPPROTO_AH = 51; ##< IPv6 authentication header. const IPPROTO_NONE = 59; ##< IPv6 no next header. const IPPROTO_DSTOPTS = 60; ##< IPv6 destination options header. +const IPPROTO_MOBILITY = 135; ##< IPv6 mobility header. ## Values extracted from an IPv6 extension header's (e.g. hop-by-hop or ## destination option headers) option field. @@ -1059,6 +1060,159 @@ type ip6_esp: record { seq: count; }; +## Values extracted from an IPv6 Mobility Binding Refresh Request message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_brr: record { + ## Reserved. + rsv: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Home Test Init message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_hoti: record { + ## Reserved. + rsv: count; + ## Home Init Cookie. + cookie: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Care-of Test Init message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_coti: record { + ## Reserved. + rsv: count; + ## Care-of Init Cookie. + cookie: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Home Test message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_hot: record { + ## Home Nonce Index. + nonce_idx: count; + ## Home Init Cookie. + cookie: count; + ## Home Keygen Token. + token: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Care-of Test message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_cot: record { + ## Care-of Nonce Index. + nonce_idx: count; + ## Care-of Init Cookie. + cookie: count; + ## Care-of Keygen Token. + token: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Binding Update message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_bu: record { + ## Sequence number. + seq: count; + ## Acknowledge bit. + a: bool; + ## Home Registration bit. + h: bool; + ## Link-Local Address Compatibility bit. + l: bool; + ## Key Management Mobility Capability bit. + k: bool; + ## Lifetime. + life: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Binding Acknowledgement message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_back: record { + ## Status. + status: count; + ## Key Management Mobility Capability. + k: bool; + ## Sequence number. + seq: count; + ## Lifetime. + life: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Binding Error message. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +type ip6_mobility_be: record { + ## Status. + status: count; + ## Home Address. + hoa: addr; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility header's message data. +## +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain +type ip6_mobility_msg: record { + ## The type of message from the header's MH Type field. + id: count; + ## Binding Refresh Request. + brr: ip6_mobility_brr &optional; + ## Home Test Init. + hoti: ip6_mobility_hoti &optional; + ## Care-of Test Init. + coti: ip6_mobility_coti &optional; + ## Home Test. + hot: ip6_mobility_hot &optional; + ## Care-of Test. + cot: ip6_mobility_cot &optional; + ## Binding Update. + bu: ip6_mobility_bu &optional; + ## Binding Acknowledgement. + back: ip6_mobility_back &optional; + ## Binding Error. + be: ip6_mobility_be &optional; +}; + +## Values extracted from an IPv6 Mobility header. +## +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain +type ip6_mobility_hdr: record { + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :bro:id:`IPPROTO_ICMP`. + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## Mobility header type used to identify header's the message. + mh_type: count; + ## Reserved field. + rsv: count; + ## Mobility header checksum. + chksum: count; + ## Mobility header message + msg: ip6_mobility_msg; +}; + ## A general container for a more specific IPv6 extension header. ## ## .. bro:see:: pkt_hdr ip4_hdr ip6_hopopts ip6_dstopts ip6_routing ip6_fragment @@ -1079,6 +1233,8 @@ type ip6_ext_hdr: record { ah: ip6_ah &optional; ## Encapsulating security payload header. esp: ip6_esp &optional; + ## Mobility header. + mobility: ip6_mobility_hdr &optional; }; ## Values extracted from an IPv6 header. diff --git a/src/IP.cc b/src/IP.cc index 10de8696f9..27c4b83114 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -15,6 +15,16 @@ static RecordType* ip6_routing_type = 0; static RecordType* ip6_fragment_type = 0; static RecordType* ip6_ah_type = 0; static RecordType* ip6_esp_type = 0; +static RecordType* ip6_mob_type = 0; +static RecordType* ip6_mob_msg_type = 0; +static RecordType* ip6_mob_brr_type = 0; +static RecordType* ip6_mob_hoti_type = 0; +static RecordType* ip6_mob_coti_type = 0; +static RecordType* ip6_mob_hot_type = 0; +static RecordType* ip6_mob_cot_type = 0; +static RecordType* ip6_mob_bu_type = 0; +static RecordType* ip6_mob_back_type = 0; +static RecordType* ip6_mob_be_type = 0; static inline RecordType* hdrType(RecordType*& type, const char* name) { @@ -24,7 +34,7 @@ static inline RecordType* hdrType(RecordType*& type, const char* name) return type; } -static VectorVal* BuildOptionsVal(const u_char* data, uint16 len) +static VectorVal* BuildOptionsVal(const u_char* data, int len) { VectorVal* vv = new VectorVal(new VectorType( hdrType(ip6_option_type, "ip6_option")->Ref())); @@ -154,6 +164,130 @@ RecordVal* IPv6_Hdr::BuildRecordVal(VectorVal* chain) const } break; +#ifdef ENABLE_MOBILE_IPV6 + case IPPROTO_MOBILITY: + { + rv = new RecordVal(hdrType(ip6_mob_type, "ip6_mobility_hdr")); + const struct ip6_mobility* mob = (const struct ip6_mobility*) data; + rv->Assign(0, new Val(mob->ip6mob_payload, TYPE_COUNT)); + rv->Assign(1, new Val(mob->ip6mob_len, TYPE_COUNT)); + rv->Assign(2, new Val(mob->ip6mob_type, TYPE_COUNT)); + rv->Assign(3, new Val(mob->ip6mob_rsv, TYPE_COUNT)); + rv->Assign(4, new Val(ntohs(mob->ip6mob_chksum), TYPE_COUNT)); + + RecordVal* msg = new RecordVal(hdrType(ip6_mob_msg_type, "ip6_mobility_msg")); + msg->Assign(0, new Val(mob->ip6mob_type, TYPE_COUNT)); + + uint16 off = sizeof(ip6_mobility); + const u_char* msg_data = data + off; + + switch ( mob->ip6mob_type ) { + case 0: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_brr")); + m->Assign(0, new Val(ntohs(*((uint16*)msg_data)), TYPE_COUNT)); + off += sizeof(uint16); + m->Assign(1, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(1, m); + } + break; + + case 1: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_hoti")); + m->Assign(0, new Val(ntohs(*((uint16*)msg_data)), TYPE_COUNT)); + m->Assign(1, new Val(ntohll(*((uint64*)(msg_data + sizeof(uint16)))), TYPE_COUNT)); + off += sizeof(uint16) + sizeof(uint64); + m->Assign(2, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(2, m); + break; + } + + case 2: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_coti")); + m->Assign(0, new Val(ntohs(*((uint16*)msg_data)), TYPE_COUNT)); + m->Assign(1, new Val(ntohll(*((uint64*)(msg_data + sizeof(uint16)))), TYPE_COUNT)); + off += sizeof(uint16) + sizeof(uint64); + m->Assign(2, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(3, m); + break; + } + + case 3: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_hot")); + m->Assign(0, new Val(ntohs(*((uint16*)msg_data)), TYPE_COUNT)); + m->Assign(1, new Val(ntohll(*((uint64*)(msg_data + sizeof(uint16)))), TYPE_COUNT)); + m->Assign(2, new Val(ntohll(*((uint64*)(msg_data + sizeof(uint16) + sizeof(uint64)))), TYPE_COUNT)); + off += sizeof(uint16) + 2 * sizeof(uint64); + m->Assign(3, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(4, m); + break; + } + + case 4: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_cot")); + m->Assign(0, new Val(ntohs(*((uint16*)msg_data)), TYPE_COUNT)); + m->Assign(1, new Val(ntohll(*((uint64*)(msg_data + sizeof(uint16)))), TYPE_COUNT)); + m->Assign(2, new Val(ntohll(*((uint64*)(msg_data + sizeof(uint16) + sizeof(uint64)))), TYPE_COUNT)); + off += sizeof(uint16) + 2 * sizeof(uint64); + m->Assign(3, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(5, m); + break; + } + + case 5: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_bu")); + m->Assign(0, new Val(ntohs(*((uint16*)msg_data)), TYPE_COUNT)); + m->Assign(1, new Val(ntohs(*((uint16*)(msg_data + sizeof(uint16)))) & 0x8000, TYPE_BOOL)); + m->Assign(2, new Val(ntohs(*((uint16*)(msg_data + sizeof(uint16)))) & 0x4000, TYPE_BOOL)); + m->Assign(3, new Val(ntohs(*((uint16*)(msg_data + sizeof(uint16)))) & 0x2000, TYPE_BOOL)); + m->Assign(4, new Val(ntohs(*((uint16*)(msg_data + sizeof(uint16)))) & 0x1000, TYPE_BOOL)); + m->Assign(5, new Val(ntohs(*((uint16*)(msg_data + 2*sizeof(uint16)))), TYPE_COUNT)); + off += 3 * sizeof(uint16); + m->Assign(6, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(6, m); + break; + } + + case 6: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_back")); + m->Assign(0, new Val(*((uint8*)msg_data), TYPE_COUNT)); + m->Assign(1, new Val(*((uint8*)(msg_data + sizeof(uint8))) & 0x80, TYPE_BOOL)); + m->Assign(2, new Val(ntohs(*((uint16*)(msg_data + sizeof(uint16)))), TYPE_COUNT)); + m->Assign(3, new Val(ntohs(*((uint16*)(msg_data + 2*sizeof(uint16)))), TYPE_COUNT)); + off += 3 * sizeof(uint16); + m->Assign(4, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(7, m); + break; + } + + case 7: + { + RecordVal* m = new RecordVal(hdrType(ip6_mob_brr_type, "ip6_mobility_be")); + m->Assign(0, new Val(*((uint8*)msg_data), TYPE_COUNT)); + const in6_addr* hoa = (const in6_addr*)(msg_data + sizeof(uint16)); + m->Assign(1, new AddrVal(IPAddr(*hoa))); + off += sizeof(uint16) + sizeof(in6_addr); + m->Assign(2, BuildOptionsVal(data + off, Length() - off)); + msg->Assign(8, m); + break; + } + + default: + reporter->Weird(fmt("unknown_mobility_type_%d", mob->ip6mob_type)); + break; + } + + rv->Assign(5, msg); + } + break; +#endif //ENABLE_MOBILE_IPV6 + default: break; } @@ -276,6 +410,9 @@ static inline bool isIPv6ExtHeader(uint8 type) case IPPROTO_FRAGMENT: case IPPROTO_AH: case IPPROTO_ESP: +#ifdef ENABLE_MOBILE_IPV6 + case IPPROTO_MOBILITY: +#endif return true; default: return false; @@ -309,13 +446,19 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) if ( current_type == IPPROTO_ROUTING ) ProcessRoutingHeader((const struct ip6_rthdr*) hdrs, len); +#ifdef ENABLE_MOBILE_IPV6 + // Only Mobile IPv6 has a destination option we care about right now if ( current_type == IPPROTO_DSTOPTS ) ProcessDstOpts((const struct ip6_dest*) hdrs, len); +#endif hdrs += len; length += len; } while ( current_type != IPPROTO_FRAGMENT && current_type != IPPROTO_ESP && +#ifdef ENABLE_MOBILE_IPV6 + current_type != IPPROTO_MOBILITY && +#endif isIPv6ExtHeader(next_type) ); } @@ -347,6 +490,7 @@ void IPv6_Hdr_Chain::ProcessRoutingHeader(const struct ip6_rthdr* r, uint16 len) } break; +#ifdef ENABLE_MOBILE_IPV6 case 2: // Defined by Mobile IPv6 RFC 6275 { if ( r->ip6r_segleft > 0 ) @@ -358,6 +502,7 @@ void IPv6_Hdr_Chain::ProcessRoutingHeader(const struct ip6_rthdr* r, uint16 len) } } break; +#endif default: reporter->Weird(fmt("unknown_routing_type_%d", r->ip6r_type)); @@ -365,6 +510,7 @@ void IPv6_Hdr_Chain::ProcessRoutingHeader(const struct ip6_rthdr* r, uint16 len) } } +#ifdef ENABLE_MOBILE_IPV6 void IPv6_Hdr_Chain::ProcessDstOpts(const struct ip6_dest* d, uint16 len) { const u_char* data = (const u_char*) d; @@ -403,6 +549,7 @@ void IPv6_Hdr_Chain::ProcessDstOpts(const struct ip6_dest* d, uint16 len) } } } +#endif VectorVal* IPv6_Hdr_Chain::BuildVal() const { @@ -415,6 +562,7 @@ VectorVal* IPv6_Hdr_Chain::BuildVal() const ip6_fragment_type = internal_type("ip6_fragment")->AsRecordType(); ip6_ah_type = internal_type("ip6_ah")->AsRecordType(); ip6_esp_type = internal_type("ip6_esp")->AsRecordType(); + ip6_mob_type = internal_type("ip6_mobility_hdr")->AsRecordType(); } VectorVal* rval = new VectorVal(new VectorType(ip6_ext_hdr_type->Ref())); @@ -445,6 +593,11 @@ VectorVal* IPv6_Hdr_Chain::BuildVal() const case IPPROTO_ESP: ext_hdr->Assign(6, v); break; +#ifdef ENABLE_MOBILE_IPV6 + case IPPROTO_MOBILITY: + ext_hdr->Assign(7, v); + break; +#endif default: reporter->InternalError("IPv6_Hdr_Chain bad header %d", type); break; diff --git a/src/IP.h b/src/IP.h index 4ffb59151a..8a6ade470d 100644 --- a/src/IP.h +++ b/src/IP.h @@ -14,6 +14,22 @@ #include #include +#ifdef ENABLE_MOBILE_IPV6 + +#ifndef IPPROTO_MOBILITY +#define IPPROTO_MOBILITY 135 +#endif + +struct ip6_mobility { + uint8 ip6mob_payload; + uint8 ip6mob_len; + uint8 ip6mob_type; + uint8 ip6mob_rsv; + uint16 ip6mob_chksum; +}; + +#endif //ENABLE_MOBILE_IPV6 + /** * Base class for IPv6 header/extensions. */ @@ -38,6 +54,9 @@ public: case IPPROTO_ROUTING: case IPPROTO_FRAGMENT: case IPPROTO_AH: +#ifdef ENABLE_MOBILE_IPV6 + case IPPROTO_MOBILITY: +#endif ((ip6_ext*)data)->ip6e_nxt = next_type; break; case IPPROTO_ESP: @@ -62,6 +81,9 @@ public: case IPPROTO_ROUTING: case IPPROTO_FRAGMENT: case IPPROTO_AH: +#ifdef ENABLE_MOBILE_IPV6 + case IPPROTO_MOBILITY: +#endif return ((ip6_ext*)data)->ip6e_nxt; case IPPROTO_ESP: default: @@ -80,6 +102,9 @@ public: case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: +#ifdef ENABLE_MOBILE_IPV6 + case IPPROTO_MOBILITY: +#endif return 8 + 8 * ((ip6_ext*)data)->ip6e_len; case IPPROTO_FRAGMENT: return 8; @@ -117,13 +142,19 @@ public: /** * Initializes the header chain from an IPv6 header structure. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6) : homeAddr(0), finalDst(0) + IPv6_Hdr_Chain(const struct ip6_hdr* ip6) : +#ifdef ENABLE_MOBILE_IPV6 + homeAddr(0), +#endif + finalDst(0) { Init(ip6, false); } ~IPv6_Hdr_Chain() { for ( size_t i = 0; i < chain.size(); ++i ) delete chain[i]; +#ifdef ENABLE_MOBILE_IPV6 delete homeAddr; +#endif delete finalDst; } @@ -183,9 +214,11 @@ public: */ IPAddr SrcAddr() const { +#ifdef ENABLE_MOBILE_IPV6 if ( homeAddr ) return IPAddr(*homeAddr); else +#endif return IPAddr(((const struct ip6_hdr*)(chain[0]->Data()))->ip6_src); } @@ -217,8 +250,11 @@ protected: * Initializes the header chain from an IPv6 header structure, and replaces * the first next protocol pointer field that points to a fragment header. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) - : homeAddr(0), finalDst(0) + IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) : +#ifdef ENABLE_MOBILE_IPV6 + homeAddr(0), +#endif + finalDst(0) { Init(ip6, true, next); } void Init(const struct ip6_hdr* ip6, bool set_next, uint16 next = 0); @@ -229,11 +265,13 @@ protected: */ void ProcessRoutingHeader(const struct ip6_rthdr* r, uint16 len); +#ifdef ENABLE_MOBILE_IPV6 /** * Inspect a Destination Option header's options for things we need to * remember, such as the Home Address option from Mobile IPv6. */ void ProcessDstOpts(const struct ip6_dest* d, uint16 len); +#endif vector chain; @@ -242,10 +280,12 @@ protected: */ uint16 length; +#ifdef ENABLE_MOBILE_IPV6 /** * Home Address of the packet's source as defined by Mobile IPv6 (RFC 6275). */ IPAddr* homeAddr; +#endif /** * The final destination address in chain's first Routing header that has @@ -345,6 +385,22 @@ public: return ((const u_char*) ip6) + ip6_hdrs->TotalLength(); } +#ifdef ENABLE_MOBILE_IPV6 + /** + * Returns a pointer to the mobility header of the IP packet, if present, + * else a null pointer. + */ + const ip6_mobility* MobilityHeader() const + { + if ( ip4 ) + return 0; + else if ( (*ip6_hdrs)[ip6_hdrs->Size()-1]->Type() != IPPROTO_MOBILITY ) + return 0; + else + return (const ip6_mobility*)(*ip6_hdrs)[ip6_hdrs->Size()-1]->Data(); + } +#endif + /** * Returns the length of the IP packet's payload (length of packet minus * header length or, for IPv6, also minus length of all extension headers). diff --git a/src/Sessions.cc b/src/Sessions.cc index 84b57bdc62..eb70e687db 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -481,6 +481,35 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } +#ifdef ENABLE_MOBILE_IPV6 + // We stop building the chain when seeing IPPROTO_MOBILITY so it's always + // last if present + if ( ip_hdr->LastHeader() == IPPROTO_MOBILITY ) + { + dump_this_packet = 1; + + if ( ! ignore_checksums && mobility_header_checksum(ip_hdr) != 0xffff ) + { + Weird("bad_MH_checksum", hdr, pkt); + Remove(f); + return; + } + + if ( mobile_ipv6_message ) + { + val_list* vl = new val_list(); + vl->append(ip_hdr->BuildPktHdrVal()); + mgr.QueueEvent(mobile_ipv6_message, vl); + } + + if ( ip_hdr->NextProto() != IPPROTO_NONE ) + Weird("mobility_piggyback", hdr, pkt); + + Remove(f); + return; + } +#endif + int proto = ip_hdr->NextProto(); if ( CheckHeaderTrunc(proto, len, caplen, hdr, pkt) ) diff --git a/src/event.bif b/src/event.bif index 113c003e37..0d90a6e88d 100644 --- a/src/event.bif +++ b/src/event.bif @@ -478,6 +478,13 @@ event ipv6_ext_headers%(c: connection, p: pkt_hdr%); ## .. bro:see:: new_packet tcp_packet ipv6_ext_headers event esp_packet%(p: pkt_hdr%); +## Generated for any packet using a Mobile IPv6 Mobility Header. +## +## p: Information from the header of the packet that triggered the event. +## +## .. bro:see:: new_packet tcp_packet ipv6_ext_headers +event mobile_ipv6_message%(p: pkt_hdr%); + ## Generated for every packet that has non-empty transport-layer payload. This is a ## very low-level and expensive event that should be avoided when at all possible. ## It's usually infeasible to handle when processing even medium volumes of diff --git a/src/net_util.cc b/src/net_util.cc index 578f5f44ad..ecdafdf247 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -12,6 +12,7 @@ #include "Reporter.h" #include "net_util.h" #include "IPAddr.h" +#include "IP.h" // - adapted from tcpdump // Returns the ones-complement checksum of a chunk of b short-aligned bytes. @@ -53,6 +54,31 @@ int icmp_checksum(const struct icmp* icmpp, int len) return sum; } +#ifdef ENABLE_MOBILE_IPV6 +int mobility_header_checksum(const IP_Hdr* ip) + { + const ip6_mobility* mh = ip->MobilityHeader(); + + if ( ! mh ) return 0; + + uint32 sum = 0; + uint8 mh_len = 8 + 8 * mh->ip6mob_len; + + if ( mh_len % 2 == 1 ) + reporter->Weird(ip->SrcAddr(), ip->DstAddr(), "odd_mobility_hdr_len"); + + sum = ones_complement_checksum(ip->SrcAddr(), sum); + sum = ones_complement_checksum(ip->DstAddr(), sum); + // Note, for IPv6, strictly speaking the protocol and length fields are + // 32 bits rather than 16 bits. But because the upper bits are all zero, + // we get the same checksum either way. + sum += htons(IPPROTO_MOBILITY); + sum += htons(mh_len); + sum = ones_complement_checksum(mh, mh_len, sum); + + return sum; + } +#endif #define CLASS_A 0x00000000 #define CLASS_B 0x80000000 diff --git a/src/net_util.h b/src/net_util.h index 4d215b3743..3f8eb01e2a 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -73,6 +73,11 @@ extern int ones_complement_checksum(const IPAddr& a, uint32 sum); extern int icmp_checksum(const struct icmp* icmpp, int len); +#ifdef ENABLE_MOBILE_IPV6 +class IP_Hdr; +extern int mobility_header_checksum(const IP_Hdr* ip); +#endif + // Returns 'A', 'B', 'C' or 'D' extern char addr_to_class(uint32 addr); @@ -93,6 +98,8 @@ extern uint32 extract_uint32(const u_char* data); inline double ntohd(double d) { return d; } inline double htond(double d) { return d; } +inline uint64 ntohll(uint64 i) { return i; } +inline uint64 htonll(uint64 i) { return i; } #else @@ -118,6 +125,24 @@ inline double ntohd(double d) inline double htond(double d) { return ntohd(d); } +inline uint64 ntohll(uint64 i) + { + u_char c; + union { + uint64 i; + u_char c[8]; + } x; + + x.i = i; + c = x.c[0]; x.c[0] = x.c[7]; x.c[7] = c; + c = x.c[1]; x.c[1] = x.c[6]; x.c[6] = c; + c = x.c[2]; x.c[2] = x.c[5]; x.c[5] = c; + c = x.c[3]; x.c[3] = x.c[4]; x.c[4] = c; + return x.i; + } + +inline uint64 htonll(uint64 i) { return ntohll(i); } + #endif #endif diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index ef83d966a3..cd3c799277 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -5,7 +5,5 @@ 1332785210.013051 weird: bad_TCP_checksum 1332782580.798420 weird: routing0_hdr 1332782580.798420 weird: bad_UDP_checksum -1333640536.489921 weird: bad_TCP_checksum -1333640468.146461 weird: bad_UDP_checksum 1332785250.469132 weird: bad_TCP_checksum 1332781342.923813 weird: bad_UDP_checksum diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/output b/testing/btest/Baseline/core.disable-mobile-ipv6/output new file mode 100644 index 0000000000..b156353f74 --- /dev/null +++ b/testing/btest/Baseline/core.disable-mobile-ipv6/output @@ -0,0 +1 @@ +1333663011.602839 weird: unknown_protocol_135 diff --git a/testing/btest/Baseline/core.ipv6_esp/output b/testing/btest/Baseline/core.ipv6_esp/output index 97a8434e7b..02fb7e154f 100644 --- a/testing/btest/Baseline/core.ipv6_esp/output +++ b/testing/btest/Baseline/core.ipv6_esp/output @@ -1,120 +1,120 @@ -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=10]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=1]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=2]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=3]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=4]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=5]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=6]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=7]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=8]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=9]]]], tcp=, udp=, icmp=] -[ip=, ip6=[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=10]]]], tcp=, udp=, icmp=] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=1], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=2], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=3], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=4], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=5], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=6], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=7], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=8], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=9], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=10], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=1], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=2], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=3], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=4], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=5], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=6], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=7], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=8], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=9], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=10], mobility=]]] diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index c6ebddc7e1..b4cd249371 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1,3 +1,3 @@ weird routing0_hdr from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:78:1:32::2 [orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=53/udp, resp_h=2001:78:1:32::2, resp_p=53/udp] -[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] +[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=, mobility=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/Baseline/core.mobile-ipv6-home-addr/output b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output index f28997ff0b..88cbe0cb16 100644 --- a/testing/btest/Baseline/core.mobile-ipv6-home-addr/output +++ b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output @@ -1,2 +1,2 @@ [orig_h=2001:78:1:32::1, orig_p=30000/udp, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=13000/udp] -[ip=, ip6=[class=0, flow=0, len=36, nxt=60, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=60, hopopts=, dstopts=[nxt=17, len=2, options=[[otype=1, len=2, data=\0\0], [otype=201, len=16, data= ^A\0x\0^A\02\0\0\0\0\0\0\0^A]]], routing=, fragment=, ah=, esp=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] +[ip=, ip6=[class=0, flow=0, len=36, nxt=60, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=60, hopopts=, dstopts=[nxt=17, len=2, options=[[otype=1, len=2, data=\0\0], [otype=201, len=16, data= ^A\0x\0^A\02\0\0\0\0\0\0\0^A]]], routing=, fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] diff --git a/testing/btest/Baseline/core.mobile-ipv6-routing/output b/testing/btest/Baseline/core.mobile-ipv6-routing/output new file mode 100644 index 0000000000..04292caaa7 --- /dev/null +++ b/testing/btest/Baseline/core.mobile-ipv6-routing/output @@ -0,0 +1,2 @@ +[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=30000/udp, resp_h=2001:78:1:32::1, resp_p=13000/udp] +[ip=, ip6=[class=0, flow=0, len=36, nxt=43, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=43, hopopts=, dstopts=, routing=[nxt=17, len=2, rtype=2, segleft=1, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] diff --git a/testing/btest/Baseline/core.mobility-checksums/bad.out b/testing/btest/Baseline/core.mobility-checksums/bad.out new file mode 100644 index 0000000000..6ea9955402 --- /dev/null +++ b/testing/btest/Baseline/core.mobility-checksums/bad.out @@ -0,0 +1,3 @@ +1333988844.893456 weird: bad_MH_checksum +1333995733.276730 weird: bad_TCP_checksum +1333995701.656496 weird: bad_UDP_checksum diff --git a/testing/btest/Baseline/core.mobility-checksums/good.out b/testing/btest/Baseline/core.mobility-checksums/good.out new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/btest/Baseline/core.mobility_msg/output b/testing/btest/Baseline/core.mobility_msg/output new file mode 100644 index 0000000000..6f8d6a1699 --- /dev/null +++ b/testing/btest/Baseline/core.mobility_msg/output @@ -0,0 +1,16 @@ +Binding ACK: +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=6, rsv=0, chksum=53722, msg=[id=6, brr=, hoti=, coti=, hot=, cot=, bu=, back=[status=0, k=T, seq=42, life=8, options=[[otype=1, len=2, data=\0\0]]], be=]]]]] +Binding Error: +[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=7, rsv=0, chksum=45272, msg=[id=7, brr=, hoti=, coti=, hot=, cot=, bu=, back=, be=[status=1, hoa=2001:78:1:32::1, options=[]]]]]]] +Binding Refresh Request: +[class=0, flow=0, len=8, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=0, mh_type=0, rsv=0, chksum=55703, msg=[id=0, brr=[rsv=0, options=[]], hoti=, coti=, hot=, cot=, bu=, back=, be=]]]]] +Binding Update: +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=5, rsv=0, chksum=868, msg=[id=5, brr=, hoti=, coti=, hot=, cot=, bu=[seq=37, a=T, h=T, l=F, k=T, life=3, options=[[otype=1, len=2, data=\0\0]]], back=, be=]]]]] +Care-of Test: +[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=4, rsv=0, chksum=54378, msg=[id=4, brr=, hoti=, coti=, hot=, cot=[nonce_idx=13, cookie=15, token=255, options=[]], bu=, back=, be=]]]]] +Care-of Test Init: +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=2, rsv=0, chksum=55181, msg=[id=2, brr=, hoti=, coti=[rsv=0, cookie=1, options=[]], hot=, cot=, bu=, back=, be=]]]]] +Home Test: +[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=3, rsv=0, chksum=54634, msg=[id=3, brr=, hoti=, coti=, hot=[nonce_idx=13, cookie=15, token=255, options=[]], cot=, bu=, back=, be=]]]]] +Home Test Init: +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=1, rsv=0, chksum=55437, msg=[id=1, brr=, hoti=[rsv=0, cookie=1, options=[]], coti=, hot=, cot=, bu=, back=, be=]]]]] diff --git a/testing/btest/Traces/chksums/mip6-bad-mh-chksum.pcap b/testing/btest/Traces/chksums/mip6-bad-mh-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..9a2437baef86be2a73e00c7b59d631b8c07c44fc GIT binary patch literal 118 zcmca|c+)~A1{MYwaA0F#U<7jB@HYDkEaGMG1F}K*KNvt%wcSksF(ld@6c|~405!8S iJ!t&@FX;b&RQ{}NYbJJ}af}QKj0_b(V~rsC85jZZY#hV@ literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/mip6-good-mh-chksum.pcap b/testing/btest/Traces/chksums/mip6-good-mh-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..6183fd9cb167c878e7245107840dfa503c69ffde GIT binary patch literal 118 zcmca|c+)~A1{MYwaA0F#U<7i0iZ=VV%QG_g0ofq@9}FO>+U_QR7!vIc3XCj2fSTEv j9yI>{7xe!>Dt}hCH4{6-h8v6w3XBXDKx2&{`WYAj4hbGe literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/ipv6-mobile-hoa.trace b/testing/btest/Traces/mobile-ipv6/ipv6-mobile-hoa.trace similarity index 100% rename from testing/btest/Traces/ipv6-mobile-hoa.trace rename to testing/btest/Traces/mobile-ipv6/ipv6-mobile-hoa.trace diff --git a/testing/btest/Traces/mobile-ipv6/ipv6-mobile-routing.trace b/testing/btest/Traces/mobile-ipv6/ipv6-mobile-routing.trace new file mode 100644 index 0000000000000000000000000000000000000000..6289f268e3c45d7b7b5209a6464a5c3e956737eb GIT binary patch literal 130 zcmca|c+)~A1{MYwaA0F#U<7iGi8uRCInB-x1!RNpe=vZkYP*{NVyI|4C@`}80BUAu tdeHd)U(o;msQg*kf=o;x!x$7887hFr8iCkAz*uTvbb^8Bo@WFQ004*ZAsGMw literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_back.trace b/testing/btest/Traces/mobile-ipv6/mip6_back.trace new file mode 100644 index 0000000000000000000000000000000000000000..9b97186979a5e95cca0c7b3c1cf47a04fd56aae3 GIT binary patch literal 110 zcmca|c+)~A1{MYwaA0F#U<7iMMeF>p8*wtY0ofq@9}FO>+U_QR7y|7M3XCj2fSTEv g9yI>{7xe!>Dt}hCH6t6t#aj#w3|b5vj7$s+0P(aP$N&HU literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_be.trace b/testing/btest/Traces/mobile-ipv6/mip6_be.trace new file mode 100644 index 0000000000000000000000000000000000000000..19862ee4be183ec0eb9362222bb95e77553d216d GIT binary patch literal 118 zcmca|c+)~A1{MYwaA0F#U<7iCMC<%F3-L1e0ofq@9}FO>+U_QR7!vIc3XCj2fSTEv j9yI>{7xe!>Dt}hCH4{6-h8v6w3XBXDKx2&{`WYAj>8~B< literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_brr.trace b/testing/btest/Traces/mobile-ipv6/mip6_brr.trace new file mode 100644 index 0000000000000000000000000000000000000000..4020ae8b14d02d46325748bd1d3a275deb00b070 GIT binary patch literal 102 zcmca|c+)~A1{MYwaA0F#U<7hRSnB*Gi+LFAfNT){4+aobZFdtu432gO1xA)1K+Wt- X4;ugf3;O>bl|L)n8mR2%bOr_h&yO4* literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_bu.trace b/testing/btest/Traces/mobile-ipv6/mip6_bu.trace new file mode 100644 index 0000000000000000000000000000000000000000..1c8c61e09de7a2d9dd8d944170500e941eb1e4d0 GIT binary patch literal 110 zcmca|c+)~A1{MYwaA0F#U<7hr2-o>9*viP@24sWqe=vZkYP*{NVhFT5C@`}80BUAu gdeHd)U(o;msQg*k){LwS%qa}27Z@0r8JQRu06XFx&;S4c literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_cot.trace b/testing/btest/Traces/mobile-ipv6/mip6_cot.trace new file mode 100644 index 0000000000000000000000000000000000000000..2d8d215a41435f598a3a5241a214cba96e79f142 GIT binary patch literal 118 zcmca|c+)~A1{MYwaA0F#U<7jR3f1{5UE^Tz1F}K*KNvt%wcSksF(ld@6c|~405!8S fJ!t&@FX;b&RQ{}NYbF+kD_IP@5Pb~%Q2IXr5+fdc literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_coti.trace b/testing/btest/Traces/mobile-ipv6/mip6_coti.trace new file mode 100644 index 0000000000000000000000000000000000000000..2a5790cc7c50c182fc5f96d6acfe3b1237634303 GIT binary patch literal 110 zcmca|c+)~A1{MYwaA0F#U<7jh2-f-U_{hcJ24sWqe=vZkYP*{NVhFT5C@`}80BUAu bdeHd)U(o;msQg*k){IOH*LxwlfFvUTV-Oxc literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_hot.trace b/testing/btest/Traces/mobile-ipv6/mip6_hot.trace new file mode 100644 index 0000000000000000000000000000000000000000..0b54c9797d427e460e83256e23ee36c00dd15a4c GIT binary patch literal 118 zcmca|c+)~A1{MYwaA0F#U<7g&3f1{nJ>_EX1F}K*KNvt%wcSksF(ld@6c|~405!8S fJ!t&@FX;b&RQ{}NYbIuft62=Z5Pb~%Q2IXrAn+cl literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/mobile-ipv6/mip6_hoti.trace b/testing/btest/Traces/mobile-ipv6/mip6_hoti.trace new file mode 100644 index 0000000000000000000000000000000000000000..3daaeb2905bbb8219e1b6029bf17d256acd56946 GIT binary patch literal 110 zcmca|c+)~A1{MYwaA0F#U<7jR2-f+#N^&u{0ofq@9}FO>+U_QR7y|7M3XCj2fSTEv b9yI>{7xe!>Dt}hCH6tU#jb4Z@Ajt>-?-3l- literal 0 HcmV?d00001 diff --git a/testing/btest/core/checksums.test b/testing/btest/core/checksums.test index 1cf7f9c54f..c01ab710af 100644 --- a/testing/btest/core/checksums.test +++ b/testing/btest/core/checksums.test @@ -3,16 +3,12 @@ # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: btest-diff bad.out diff --git a/testing/btest/core/disable-mobile-ipv6.test b/testing/btest/core/disable-mobile-ipv6.test new file mode 100644 index 0000000000..84dc43dae8 --- /dev/null +++ b/testing/btest/core/disable-mobile-ipv6.test @@ -0,0 +1,12 @@ +# @TEST-REQUIRES: grep -q "#undef ENABLE_MOBILE_IPV6" $BUILD/config.h +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +event mobile_ipv6_message(p: pkt_hdr) + { + if ( ! p?$ip6 ) return; + + for ( i in p$ip6$exts ) + if ( p$ip6$exts[i]$id == IPPROTO_MOBILITY ) + print p$ip6; + } diff --git a/testing/btest/core/ipv6_esp.test b/testing/btest/core/ipv6_esp.test index b606c23400..8744df0036 100644 --- a/testing/btest/core/ipv6_esp.test +++ b/testing/btest/core/ipv6_esp.test @@ -6,5 +6,6 @@ event esp_packet(p: pkt_hdr) { - print p; + if ( p?$ip6 ) + print p$ip6; } diff --git a/testing/btest/core/mobile-ipv6-home-addr.test b/testing/btest/core/mobile-ipv6-home-addr.test index f113016568..536d381f9b 100644 --- a/testing/btest/core/mobile-ipv6-home-addr.test +++ b/testing/btest/core/mobile-ipv6-home-addr.test @@ -1,4 +1,5 @@ -# @TEST-EXEC: bro -b -r $TRACES/ipv6-mobile-hoa.trace %INPUT >output +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/config.h +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/ipv6-mobile-hoa.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the orig of the connection is the Home Address, but the diff --git a/testing/btest/core/mobile-ipv6-routing.test b/testing/btest/core/mobile-ipv6-routing.test new file mode 100644 index 0000000000..6ad5be002d --- /dev/null +++ b/testing/btest/core/mobile-ipv6-routing.test @@ -0,0 +1,11 @@ +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/config.h +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/ipv6-mobile-routing.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +# Just check that the responder of the connection is the final routing +# address, but the destination in the header is the actual destination address. +event new_packet(c: connection, p: pkt_hdr) + { + print c$id; + print p; + } diff --git a/testing/btest/core/mobility-checksums.test b/testing/btest/core/mobility-checksums.test new file mode 100644 index 0000000000..1d41daf543 --- /dev/null +++ b/testing/btest/core/mobility-checksums.test @@ -0,0 +1,9 @@ +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/config.h +# @TEST-EXEC: bro -b -r $TRACES/chksums/mip6-bad-mh-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/mip6-good-mh-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: btest-diff bad.out +# @TEST-EXEC: btest-diff good.out diff --git a/testing/btest/core/mobility_msg.test b/testing/btest/core/mobility_msg.test new file mode 100644 index 0000000000..73461e7944 --- /dev/null +++ b/testing/btest/core/mobility_msg.test @@ -0,0 +1,44 @@ +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/config.h +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT >output +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_be.trace %INPUT >>output +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_brr.trace %INPUT >>output +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_bu.trace %INPUT >>output +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_cot.trace %INPUT >>output +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_coti.trace %INPUT >>output +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_hot.trace %INPUT >>output +# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_hoti.trace %INPUT >>output +# @TEST-EXEC: btest-diff output + +event mobile_ipv6_message(p: pkt_hdr) + { + if ( ! p?$ip6 ) return; + + for ( i in p$ip6$exts ) + { + if ( p$ip6$exts[i]$id == IPPROTO_MOBILITY ) + { + if ( ! p$ip6$exts[i]?$mobility ) + print "ERROR: Mobility extension header uninitialized"; + + if ( p$ip6$exts[i]$mobility$mh_type == 0 ) + print "Binding Refresh Request:"; + else if ( p$ip6$exts[i]$mobility$mh_type == 1 ) + print "Home Test Init:"; + else if ( p$ip6$exts[i]$mobility$mh_type == 2 ) + print "Care-of Test Init:"; + else if ( p$ip6$exts[i]$mobility$mh_type == 3 ) + print "Home Test:"; + else if ( p$ip6$exts[i]$mobility$mh_type == 4 ) + print "Care-of Test:"; + else if ( p$ip6$exts[i]$mobility$mh_type == 5 ) + print "Binding Update:"; + else if ( p$ip6$exts[i]$mobility$mh_type == 6 ) + print "Binding ACK:"; + else if ( p$ip6$exts[i]$mobility$mh_type == 7 ) + print "Binding Error:"; + else + print "Unknown Mobility Header:"; + print p$ip6; + } + } + } From 7131feefbc5164c7e92fbba938531fef0d913514 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 9 Apr 2012 17:30:57 -0700 Subject: [PATCH 216/651] Working on DataSeries support. - The option to use integers insteads of double was ignored. - Renaming script-level options to remove the ds_ prefix. - Log rotation didn't work. - A set of simple unit tests. --- .../frameworks/logging/writers/dataseries.bro | 10 +- src/logging.bif | 9 +- src/logging/writers/DataSeries.cc | 194 ++++----- src/logging/writers/DataSeries.h | 63 ++- .../ssh.ds.xml | 16 + .../out | 380 ++++++++++++++++++ .../ssh.ds.txt | 43 ++ .../conn.ds.txt | 96 +++++ .../conn.ds.txt | 96 +++++ .../http.ds.txt | 90 +++++ .../frameworks/logging/dataseries/options.bro | 43 ++ .../frameworks/logging/dataseries/rotate.bro | 33 ++ .../logging/dataseries/test-logging.bro | 34 ++ .../logging/dataseries/time-as-int.bro | 8 + .../logging/dataseries/wikipedia.bro | 8 + testing/scripts/has-writer | 6 + 16 files changed, 1001 insertions(+), 128 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt create mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/options.bro create mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro create mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro create mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro create mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro create mode 100755 testing/scripts/has-writer diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro index c8ba922d2a..daf59ebf42 100644 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -10,18 +10,18 @@ export { ## 'lzo' -- LZO compression. Very fast decompression times. ## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output. ## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output. - const ds_compression = "lzf" &redef; + const compression = "lzf" &redef; ## The extent buffer size. ## Larger values here lead to better compression and more efficient writes, but ## also increases the lag between the time events are received and the time they ## are actually written to disk. - const ds_extent_size = 65536 &redef; + const extent_size = 65536 &redef; ## Should we dump the XML schema we use for this ds file to disk? ## If yes, the XML schema shares the name of the logfile, but has ## an XML ending. - const ds_dump_schema = T &redef; + const dump_schema = F &redef; ## How many threads should DataSeries spawn to perform compression? ## Note that this dictates the number of threads per log stream. If @@ -31,7 +31,7 @@ export { ## Default value is 1, which will spawn one thread / core / stream. ## ## MAX is 128, MIN is 1. - const ds_num_threads = 1 &redef; + const num_threads = 1 &redef; ## Should time be stored as an integer or a double? ## Storing time as a double leads to possible precision issues and @@ -41,7 +41,7 @@ export { ## when working with the raw DataSeries format. ## ## Double timestamps are used by default. - const ds_use_integer = F &redef; + const use_integer_for_time = F &redef; } # Default function to postprocess a rotated DataSeries log file. It moves the diff --git a/src/logging.bif b/src/logging.bif index 6e66de8772..efc6ed0b4b 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -76,7 +76,8 @@ const unset_field: string; module LogDataSeries; -const ds_compression: string; -const ds_extent_size: count; -const ds_dump_schema: bool; -const ds_num_threads: count; +const compression: string; +const extent_size: count; +const dump_schema: bool; +const use_integer_for_time: bool; +const num_threads: count; diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 27c4cd6009..5ee8a812da 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -14,78 +14,6 @@ using namespace logging; using namespace writer; -// NOTE: Naming conventions are a little bit scattershot at the moment. -// Within the scope of this file, a function name prefixed by '_' denotes a -// static function. - -// ************************ LOCAL PROTOTYPES ********************************* - -struct SchemaValue; - -/** - * Turns a log value into a std::string. Uses an ostringstream to do the - * heavy lifting, but still need to switch on the type to know which value - * in the union to give to the string string for processing. - * - * @param val The value we wish to convert to a string - * @return the string value of val - */ -static std::string _LogValueToString(threading::Value* val); - -/** - * Takes a field type and converts it to a relevant DataSeries type. - * - * @param field We extract the type from this and convert it into a relevant DS type. - * @return String representation of type that DataSeries can understand. - */ -static string _GetDSFieldType(const threading::Field* field); - -/** - * Takes a field type and converts it to a readable string. - * - * @param field We extract the type from this and convert it into a readable string. - * @return String representation of the field's type - */ -static string _GetBroTypeString(const threading::Field *field); - -/** - * Takes a list of types, a list of names, and a title, and uses it to construct a valid DataSeries XML schema - * thing, which is then returned as a std::string - * - * @param opts std::vector of strings containing a list of options to be appended to each field (e.g. "pack_relative=yes") - * @param sTitle Name of this schema. Ideally, these schemas would be aggregated and re-used. - */ -static string _BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle); - -/** - * Are there any options we should put into the XML schema? - * - * @param field We extract the type from this and return any options that make sense for that type. - * @return Options that can be added directly to the XML (e.g. "pack_relative=\"yes\"") - */ -static std::string _GetDSOptionsForType(const threading::Field *field); - -/** - * Internal helper structure; populate a vector of these which is passed to the XML generator for its use. - */ -struct SchemaValue -{ - string ds_type; - string bro_type; - string field_name; - string field_options; - - SchemaValue(const threading::Field *field) - { - ds_type = _GetDSFieldType(field); - field_name = string(field->name); - field_options = _GetDSOptionsForType(field); - bro_type = _GetBroTypeString(field); - } -}; - -// ************************ LOCAL IMPL ********************************* - std::string DataSeries::LogValueToString(threading::Value *val) { const int strsz = 1024; @@ -127,7 +55,11 @@ std::string DataSeries::LogValueToString(threading::Value *val) // in the near-term, this *should* lead to better pack_relative (and thus smaller output files). case TYPE_TIME: case TYPE_INTERVAL: - ostr << (unsigned long)(DataSeries::TIME_SCALE * val->val.double_val); + if ( ds_use_integer_for_time ) + ostr << (unsigned long)(DataSeries::TIME_SCALE * val->val.double_val); + else + ostr << val->val.double_val; + return ostr.str(); case TYPE_DOUBLE: @@ -186,7 +118,7 @@ std::string DataSeries::LogValueToString(threading::Value *val) } } -static string _GetDSFieldType(const threading::Field *field) +string DataSeries::GetDSFieldType(const threading::Field *field) { switch(field->type) { @@ -197,13 +129,15 @@ static string _GetDSFieldType(const threading::Field *field) case TYPE_COUNTER: case TYPE_PORT: case TYPE_INT: - case TYPE_TIME: - case TYPE_INTERVAL: return "int64"; case TYPE_DOUBLE: return "double"; + case TYPE_TIME: + case TYPE_INTERVAL: + return ds_use_integer_for_time ? "int64" : "double"; + case TYPE_SUBNET: case TYPE_ADDR: case TYPE_ENUM: @@ -217,7 +151,7 @@ static string _GetDSFieldType(const threading::Field *field) } } -static string _GetBroTypeString(const threading::Field *field) +string DataSeries::GetBroTypeString(const threading::Field *field) { switch(field->type) { @@ -256,7 +190,7 @@ static string _GetBroTypeString(const threading::Field *field) } } -static string _BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle) +string DataSeries::BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle) { if("" == sTitle) { @@ -276,13 +210,21 @@ static string _BuildDSSchemaFromFieldTypes(const vector& vals, stri return xmlschema; } -static std::string _GetDSOptionsForType(const threading::Field *field) +std::string DataSeries::GetDSOptionsForType(const threading::Field *field) { switch(field->type) { case TYPE_TIME: case TYPE_INTERVAL: - return "pack_relative=\"" + std::string(field->name) + "\""; + { + std::string s = "pack_relative=\"" + std::string(field->name) + "\""; + + if ( ! ds_use_integer_for_time ) + s += " pack_scale=\"1000000\""; + + return s; + } + case TYPE_SUBNET: case TYPE_ADDR: case TYPE_ENUM: @@ -300,16 +242,40 @@ static std::string _GetDSOptionsForType(const threading::Field *field) DataSeries::DataSeries(WriterFrontend* frontend) : WriterBackend(frontend) { - ds_compression = string((const char *)BifConst::LogDataSeries::ds_compression->Bytes(), BifConst::LogDataSeries::ds_compression->Len()); - ds_dump_schema = BifConst::LogDataSeries::ds_dump_schema; - ds_extent_size = BifConst::LogDataSeries::ds_extent_size; - ds_num_threads = BifConst::LogDataSeries::ds_num_threads; + ds_compression = string((const char *)BifConst::LogDataSeries::compression->Bytes(), BifConst::LogDataSeries::compression->Len()); + ds_dump_schema = BifConst::LogDataSeries::dump_schema; + ds_extent_size = BifConst::LogDataSeries::extent_size; + ds_num_threads = BifConst::LogDataSeries::num_threads; + ds_use_integer_for_time = BifConst::LogDataSeries::use_integer_for_time; } DataSeries::~DataSeries() { } +bool DataSeries::OpenLog(string path) + { + log_file = new DataSeriesSink(path + ".ds", compress_type); + log_file->writeExtentLibrary(log_types); + + for(size_t i = 0; i < schema_list.size(); ++i) + extents.insert(std::make_pair(schema_list[i].field_name, GeneralField::create(log_series, schema_list[i].field_name))); + + if(ds_extent_size < ROW_MIN) + { + fprintf(stderr, "%d is not a valid value for 'rows'. Using min of %d instead.\n", (int)ds_extent_size, (int)ROW_MIN); + ds_extent_size = ROW_MIN; + } + else if(ds_extent_size > ROW_MAX) + { + fprintf(stderr, "%d is not a valid value for 'rows'. Using max of %d instead.\n", (int)ds_extent_size, (int)ROW_MAX); + ds_extent_size = ROW_MAX; + } + log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); + + return true; + } + bool DataSeries::DoInit(string path, int num_fields, const threading::Field* const * fields) { // We first construct an XML schema thing (and, if ds_dump_schema is @@ -333,14 +299,18 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con { DataSeriesSink::setCompressorCount(ds_num_threads); } - vector schema_list; + for ( int i = 0; i < num_fields; i++ ) { const threading::Field* field = fields[i]; - SchemaValue val(field); + SchemaValue val; + val.ds_type = GetDSFieldType(field); + val.field_name = string(field->name); + val.field_options = GetDSOptionsForType(field); + val.bro_type = GetBroTypeString(field); schema_list.push_back(val); } - string schema = _BuildDSSchemaFromFieldTypes(schema_list, path); + string schema = BuildDSSchemaFromFieldTypes(schema_list, path); if(ds_dump_schema) { FILE * pFile; @@ -353,7 +323,7 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con fclose (pFile); } - int compress_type = Extent::compress_all; + compress_type = Extent::compress_all; if(ds_compression == "lzf") { @@ -385,28 +355,11 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con fprintf(stderr, "Defaulting to 'any'\n"); } - log_type = const_cast(log_types.registerType(schema)); + log_type = const_cast(log_types.registerType(schema)); log_series.setType(*log_type); - log_file = new DataSeriesSink(path + ".ds", compress_type); - log_file->writeExtentLibrary(log_types); - for(size_t i = 0; i < schema_list.size(); ++i) - extents.insert(std::make_pair(schema_list[i].field_name, GeneralField::create(log_series, schema_list[i].field_name))); - - if(ds_extent_size < ROW_MIN) - { - fprintf(stderr, "%d is not a valid value for 'rows'. Using min of %d instead.\n", (int)ds_extent_size, (int)ROW_MIN); - ds_extent_size = ROW_MIN; - } - else if(ds_extent_size > ROW_MAX) - { - fprintf(stderr, "%d is not a valid value for 'rows'. Using max of %d instead.\n", (int)ds_extent_size, (int)ROW_MAX); - ds_extent_size = ROW_MAX; - } - log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); - - return true; + return OpenLog(path); } @@ -416,18 +369,26 @@ bool DataSeries::DoFlush() return true; } -bool DataSeries::DoFinish() -{ - for(ExtentIterator iter = extents.begin(); - iter != extents.end(); ++iter) - { +void DataSeries::CloseLog() + { + for( ExtentIterator iter = extents.begin(); iter != extents.end(); ++iter ) delete iter->second; - } + extents.clear(); - // Don't delete the file before you delete the output, or bad things happen. + + // Don't delete the file before you delete the output, or bad things + // happen. delete log_output; delete log_file; + log_output = 0; + log_file = 0; + } + +bool DataSeries::DoFinish() +{ + CloseLog(); + return WriterBackend::DoFinish(); } @@ -453,8 +414,7 @@ bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, bool DataSeries::DoRotate(string rotated_path, double open, double close, bool terminating) { // Note that if DS files are rotated too often, the aggregate log size will be (much) larger. - - DoFinish(); + CloseLog(); string dsname = Path() + ".ds"; string nname = rotated_path + ".ds"; @@ -466,7 +426,7 @@ bool DataSeries::DoRotate(string rotated_path, double open, double close, bool t return false; } - return DoInit(Path(), NumFields(), Fields()); + return OpenLog(Path()); } bool DataSeries::DoSetBuf(bool enabled) diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index 5331975937..319cb72ec5 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -42,24 +42,83 @@ private: static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. + struct SchemaValue + { + string ds_type; + string bro_type; + string field_name; + string field_options; + }; + + /** + * Turns a log value into a std::string. Uses an ostringstream to do the + * heavy lifting, but still need to switch on the type to know which value + * in the union to give to the string string for processing. + * + * @param val The value we wish to convert to a string + * @return the string value of val + */ std::string LogValueToString(threading::Value *val); + /** + * Takes a field type and converts it to a relevant DataSeries type. + * + * @param field We extract the type from this and convert it into a relevant DS type. + * @return String representation of type that DataSeries can understand. + */ + string GetDSFieldType(const threading::Field *field); + + /** + * Are there any options we should put into the XML schema? + * + * @param field We extract the type from this and return any options that make sense for that type. + * @return Options that can be added directly to the XML (e.g. "pack_relative=\"yes\"") + */ + std::string GetDSOptionsForType(const threading::Field *field); + + /** + * Takes a list of types, a list of names, and a title, and uses it to construct a valid DataSeries XML schema + * thing, which is then returned as a std::string + * + * @param opts std::vector of strings containing a list of options to be appended to each field (e.g. "pack_relative=yes") + * @param sTitle Name of this schema. Ideally, these schemas would be aggregated and re-used. + */ + string BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle); + + /** + * Takes a field type and converts it to a readable string. + * + * @param field We extract the type from this and convert it into a readable string. + * @return String representation of the field's type + */ + string GetBroTypeString(const threading::Field *field); + + /** Closes the currently open file. */ + void CloseLog(); + + /** XXX */ + bool OpenLog(string path); + typedef std::map ExtentMap; typedef ExtentMap::iterator ExtentIterator; // Internal DataSeries structures we need to keep track of. - DataSeriesSink* log_file; + vector schema_list; ExtentTypeLibrary log_types; ExtentType *log_type; ExtentSeries log_series; - OutputModule* log_output; ExtentMap extents; + int compress_type; + + DataSeriesSink* log_file; + OutputModule* log_output; // Options set from the script-level. uint64 ds_extent_size; uint64 ds_num_threads; string ds_compression; bool ds_dump_schema; + bool ds_use_integer_for_time; }; } diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml new file mode 100644 index 0000000000..71ad5d70a0 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out new file mode 100644 index 0000000000..b6f05003f3 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out @@ -0,0 +1,380 @@ +test.2011-03-07-03-00-05.ds test 11-03-07_03.00.05 11-03-07_04.00.05 0 +test.2011-03-07-04-00-05.ds test 11-03-07_04.00.05 11-03-07_05.00.05 0 +test.2011-03-07-05-00-05.ds test 11-03-07_05.00.05 11-03-07_06.00.05 0 +test.2011-03-07-06-00-05.ds test 11-03-07_06.00.05 11-03-07_07.00.05 0 +test.2011-03-07-07-00-05.ds test 11-03-07_07.00.05 11-03-07_08.00.05 0 +test.2011-03-07-08-00-05.ds test 11-03-07_08.00.05 11-03-07_09.00.05 0 +test.2011-03-07-09-00-05.ds test 11-03-07_09.00.05 11-03-07_10.00.05 0 +test.2011-03-07-10-00-05.ds test 11-03-07_10.00.05 11-03-07_11.00.05 0 +test.2011-03-07-11-00-05.ds test 11-03-07_11.00.05 11-03-07_12.00.05 0 +test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 +> test.2011-03-07-03-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1024 +1.299e+09 10.0.0.2 20 10.0.0.3 0 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-04-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1025 +1.299e+09 10.0.0.2 20 10.0.0.3 1 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-05-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1026 +1.299e+09 10.0.0.2 20 10.0.0.3 2 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-06-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1027 +1.299e+09 10.0.0.2 20 10.0.0.3 3 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-07-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1028 +1.299e+09 10.0.0.2 20 10.0.0.3 4 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-08-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1029 +1.299e+09 10.0.0.2 20 10.0.0.3 5 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-09-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1030 +1.299e+09 10.0.0.2 20 10.0.0.3 6 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-10-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.299e+09 10.0.0.1 20 10.0.0.2 1031 +1.299e+09 10.0.0.2 20 10.0.0.3 7 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-11-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.3e+09 10.0.0.1 20 10.0.0.2 1032 +1.3e+09 10.0.0.2 20 10.0.0.3 8 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +> test.2011-03-07-12-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1.3e+09 10.0.0.1 20 10.0.0.2 1033 +1.3e+09 10.0.0.2 20 10.0.0.3 9 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +360 test +468 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt new file mode 100644 index 0000000000..f66f40b701 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt @@ -0,0 +1,43 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +400 ssh +604 DataSeries: ExtentIndex +# Extent, type='ssh' +t id.orig_h id.orig_p id.resp_h id.resp_p status country +1.334e+09 1.2.3.4 1234 2.3.4.5 80 success unknown +1.334e+09 1.2.3.4 1234 2.3.4.5 80 failure US +1.334e+09 1.2.3.4 1234 2.3.4.5 80 failure UK +1.334e+09 1.2.3.4 1234 2.3.4.5 80 success BR +1.334e+09 1.2.3.4 1234 2.3.4.5 80 failure MX +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +400 ssh +604 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt new file mode 100644 index 0000000000..e6294b1d71 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt @@ -0,0 +1,96 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +636 conn +2912 DataSeries: ExtentIndex +# Extent, type='conn' +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes +1300475167096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 +1300475167097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 +1300475167099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 +1300475168853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 435 0 89 SHR F 0 Cd 0 0 1 117 +1300475168854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 420 0 99 SHR F 0 Cd 0 0 1 127 +1300475168854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 391 0 183 SHR F 0 Cd 0 0 1 211 +1300475168857956 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 317 0 89 SHR F 0 Cd 0 0 1 117 +1300475168858306 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 343 0 99 SHR F 0 Cd 0 0 1 127 +1300475168858713 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 375 0 183 SHR F 0 Cd 0 0 1 211 +1300475168891644 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 339 0 89 SHR F 0 Cd 0 0 1 117 +1300475168892037 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 334 0 99 SHR F 0 Cd 0 0 1 127 +1300475168892414 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 420 0 183 SHR F 0 Cd 0 0 1 211 +1300475168893988 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 384 0 89 SHR F 0 Cd 0 0 1 117 +1300475168894422 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 316 0 99 SHR F 0 Cd 0 0 1 127 +1300475168894787 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 422 0 183 SHR F 0 Cd 0 0 1 211 +1300475168901749 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 402 0 131 SHR F 0 Cd 0 0 1 159 +1300475168902195 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 374 0 198 SHR F 0 Cd 0 0 1 226 +1300475169899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 +1300475170862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2613016 350 0 S0 F 0 D 7 546 0 0 +1300475171675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 100096 66 0 S0 F 0 D 2 162 0 0 +1300475171677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 100020 66 0 S0 F 0 D 2 122 0 0 +1300475173116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 99801 66 0 S0 F 0 D 2 162 0 0 +1300475173117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 99848 66 0 S0 F 0 D 2 122 0 0 +1300475173153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 +1300475168859163 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 215893 1130 734 S1 F 1130 ShACad 4 216 4 950 +1300475168652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 61328 0 350 OTH F 0 CdA 1 52 1 402 +1300475168895267 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 227283 1178 734 S1 F 1178 ShACad 4 216 4 950 +1300475168902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 120040 534 412 S1 F 534 ShACad 3 164 3 576 +1300475168892936 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 229603 1148 734 S1 F 1148 ShACad 4 216 4 950 +1300475168855305 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 218501 1171 733 S1 F 1171 ShACad 4 216 4 949 +1300475168892913 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 220960 1137 733 S1 F 1137 ShACad 4 216 4 949 +1300475169780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 +1300475168724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 119904 525 232 S1 F 525 ShACad 3 164 3 396 +1300475168855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 219720 1125 734 S1 F 1125 ShACad 4 216 4 950 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +636 conn +2912 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt new file mode 100644 index 0000000000..e85cf9337e --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -0,0 +1,96 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +660 conn +2564 DataSeries: ExtentIndex +# Extent, type='conn' +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes +1.3e+09 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 +1.3e+09 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 +1.3e+09 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 +1.3e+09 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.3e+09 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.3e+09 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.3e+09 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.3e+09 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.3e+09 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.3e+09 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.3e+09 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.3e+09 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.3e+09 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.3e+09 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.3e+09 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.3e+09 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 0 0 131 SHR F 0 Cd 0 0 1 159 +1.3e+09 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 0 0 198 SHR F 0 Cd 0 0 1 226 +1.3e+09 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 +1.3e+09 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 0 350 0 S0 F 0 D 7 546 0 0 +1.3e+09 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 +1.3e+09 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 +1.3e+09 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 +1.3e+09 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 +1.3e+09 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 +1.3e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 0 1130 734 S1 F 1130 ShACad 4 216 4 950 +1.3e+09 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0 0 350 OTH F 0 CdA 1 52 1 402 +1.3e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 0 1178 734 S1 F 1178 ShACad 4 216 4 950 +1.3e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0 534 412 S1 F 534 ShACad 3 164 3 576 +1.3e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 0 1148 734 S1 F 1148 ShACad 4 216 4 950 +1.3e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 0 1171 733 S1 F 1171 ShACad 4 216 4 949 +1.3e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 0 1137 733 S1 F 1137 ShACad 4 216 4 949 +1.3e+09 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 +1.3e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0 525 232 S1 F 525 ShACad 3 164 3 396 +1.3e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0 1125 734 S1 F 1125 ShACad 4 216 4 950 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +660 conn +2564 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt new file mode 100644 index 0000000000..49e431085c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt @@ -0,0 +1,90 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +extent offset ExtentType +40 DataSeries: XmlType +756 http +1144 DataSeries: ExtentIndex +# Extent, type='http' +ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +1.3e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 +1.3e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 0 0 0 304 Not Modified 0 +1.3e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.3e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +# Extent, type='DataSeries: ExtentIndex' +offset extenttype +40 DataSeries: XmlType +756 http +1144 DataSeries: ExtentIndex diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro new file mode 100644 index 0000000000..77ea32908a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro @@ -0,0 +1,43 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# +# @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: test -e ssh.ds.xml +# @TEST-EXEC: btest-diff ssh.ds.xml + +module SSH; + +redef LogDataSeries::dump_schema = T; + +# Haven't yet found a way to check for the effect of these. +redef LogDataSeries::compression = "bz2"; +redef LogDataSeries::extent_size = 1000; +redef LogDataSeries::num_threads = 5; + +# LogDataSeries::use_integer_for_time is tested separately. + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event bro_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro new file mode 100644 index 0000000000..639c7f3562 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro @@ -0,0 +1,33 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# +# @TEST-EXEC: bro -b -r %DIR/../rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out +# @TEST-EXEC: for i in test.*.ds; do printf '> %s\n' $i; ds2txt $i; done >>out +# @TEST-EXEC: btest-diff out + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef Log::default_rotation_postprocessor_cmd = "echo"; + +event bro_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); +} + +event new_connection(c: connection) + { + Log::write(Test::LOG, [$t=network_time(), $id=c$id]); + } diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro new file mode 100644 index 0000000000..c7f8a5618f --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro @@ -0,0 +1,34 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# +# @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: ds2txt ssh.ds >ssh.ds.txt +# @TEST-EXEC: btest-diff ssh.ds.txt + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event bro_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro new file mode 100644 index 0000000000..3a072998c0 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro @@ -0,0 +1,8 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# +# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: ds2txt conn.ds >conn.ds.txt +# @TEST-EXEC: btest-diff conn.ds.txt + +redef LogDataSeries::use_integer_for_time = T; diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro new file mode 100644 index 0000000000..4a4b70afc2 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro @@ -0,0 +1,8 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# +# @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: ds2txt conn.ds >conn.ds.txt +# @TEST-EXEC: ds2txt http.ds >http.ds.txt +# @TEST-EXEC: btest-diff conn.ds.txt +# @TEST-EXEC: btest-diff http.ds.txt diff --git a/testing/scripts/has-writer b/testing/scripts/has-writer new file mode 100755 index 0000000000..683d31041f --- /dev/null +++ b/testing/scripts/has-writer @@ -0,0 +1,6 @@ +#! /usr/bin/env bash +# +# Returns true if Bro has been compiled with support for writer type +# $1. The type name must match what "bro --help" prints. + +bro --helper 2>&1 | grep -qi "Supported log formats:.*$1" From 958c6c7cf438cbd8d118e63754f4849531d72196 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 10 Apr 2012 11:09:07 -0500 Subject: [PATCH 217/651] Update IPv6 atomic fragment unit test to filter output of ICMPv6. Since that's not relevant to test. --- testing/btest/core/ipv6-atomic-frag.test | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testing/btest/core/ipv6-atomic-frag.test b/testing/btest/core/ipv6-atomic-frag.test index 0829d8e973..8c8fe6ca64 100644 --- a/testing/btest/core/ipv6-atomic-frag.test +++ b/testing/btest/core/ipv6-atomic-frag.test @@ -3,5 +3,6 @@ event new_connection(c: connection) { - print c$id; + if ( c$id$resp_p == 80/tcp ) + print c$id; } From 27ba3118c1b7fb2efddac3ce2ec8ce5a3e26f554 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 10 Apr 2012 11:37:08 -0500 Subject: [PATCH 218/651] Change ICMPv6 checksum calculation to use IP_Hdr wrapper. So that src/dst addresses used in pseudo-header are correct when there's certain extension headers (routing/destination). Add ICMP/ICMPv6 checksum unit tests. --- src/ICMP.cc | 2 +- src/net_util.cc | 6 +++--- src/net_util.h | 5 ++--- testing/btest/Baseline/core.checksums/bad.out | 4 ++++ testing/btest/Baseline/core.checksums/good.out | 1 + .../btest/Traces/chksums/ip4-icmp-bad-chksum.pcap | Bin 0 -> 82 bytes .../Traces/chksums/ip4-icmp-good-chksum.pcap | Bin 0 -> 82 bytes .../Traces/chksums/ip6-icmp6-bad-chksum.pcap | Bin 0 -> 109 bytes .../Traces/chksums/ip6-icmp6-good-chksum.pcap | Bin 0 -> 109 bytes .../chksums/ip6-route0-icmp6-bad-chksum.pcap | Bin 0 -> 133 bytes .../chksums/ip6-route0-icmp6-good-chksum.pcap | Bin 0 -> 133 bytes testing/btest/core/checksums.test | 8 ++++++++ 12 files changed, 19 insertions(+), 7 deletions(-) create mode 100644 testing/btest/Traces/chksums/ip4-icmp-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip4-icmp-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-icmp6-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-icmp6-good-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-route0-icmp6-bad-chksum.pcap create mode 100644 testing/btest/Traces/chksums/ip6-route0-icmp6-good-chksum.pcap diff --git a/src/ICMP.cc b/src/ICMP.cc index a5cfdbcb64..9bd004e7f8 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -62,7 +62,7 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, break; case IPPROTO_ICMPV6: - chksum = icmp6_checksum(icmpp, ip->IP6_Hdr(), len); + chksum = icmp6_checksum(icmpp, ip, len); break; default: diff --git a/src/net_util.cc b/src/net_util.cc index 362a33b201..d91cf02de9 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -80,7 +80,7 @@ int mobility_header_checksum(const IP_Hdr* ip) } #endif -int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) +int icmp6_checksum(const struct icmp* icmpp, const IP_Hdr* ip, int len) { // ICMP6 uses the same checksum function as ICMP4 but a different // pseudo-header over which it is computed. @@ -93,8 +93,8 @@ int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, int len) sum = 0; // Pseudo-header as for UDP over IPv6 above. - sum = ones_complement_checksum((void*) ip6->ip6_src.s6_addr, 16, sum); - sum = ones_complement_checksum((void*) ip6->ip6_dst.s6_addr, 16, sum); + sum = ones_complement_checksum(ip->SrcAddr(), sum); + sum = ones_complement_checksum(ip->DstAddr(), sum); uint32 l = htonl(len); sum = ones_complement_checksum((void*) &l, 4, sum); diff --git a/src/net_util.h b/src/net_util.h index 92f0880014..fb410503f4 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -65,18 +65,17 @@ inline int seq_delta(uint32 a, uint32 b) } class IPAddr; +class IP_Hdr; // Returns the ones-complement checksum of a chunk of b short-aligned bytes. extern int ones_complement_checksum(const void* p, int b, uint32 sum); extern int ones_complement_checksum(const IPAddr& a, uint32 sum); -extern int icmp6_checksum(const struct icmp* icmpp, const struct ip6_hdr* ip6, - int len); +extern int icmp6_checksum(const struct icmp* icmpp, const IP_Hdr* ip, int len); extern int icmp_checksum(const struct icmp* icmpp, int len); #ifdef ENABLE_MOBILE_IPV6 -class IP_Hdr; extern int mobility_header_checksum(const IP_Hdr* ip); #endif diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index cd3c799277..57089a72a6 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -1,9 +1,13 @@ 1332784981.078396 weird: bad_IP_checksum 1332784885.686428 weird: bad_TCP_checksum 1332784933.501023 weird: bad_UDP_checksum +1334075363.536871 weird: bad_ICMP_checksum 1332785210.013051 weird: routing0_hdr 1332785210.013051 weird: bad_TCP_checksum 1332782580.798420 weird: routing0_hdr 1332782580.798420 weird: bad_UDP_checksum +1334075111.800086 weird: routing0_hdr +1334075111.800086 weird: bad_ICMP_checksum 1332785250.469132 weird: bad_TCP_checksum 1332781342.923813 weird: bad_UDP_checksum +1334074939.467194 weird: bad_ICMP_checksum diff --git a/testing/btest/Baseline/core.checksums/good.out b/testing/btest/Baseline/core.checksums/good.out index 627a330928..4330967d8d 100644 --- a/testing/btest/Baseline/core.checksums/good.out +++ b/testing/btest/Baseline/core.checksums/good.out @@ -1,2 +1,3 @@ 1332785125.596793 weird: routing0_hdr 1332782508.592037 weird: routing0_hdr +1334075027.053380 weird: routing0_hdr diff --git a/testing/btest/Traces/chksums/ip4-icmp-bad-chksum.pcap b/testing/btest/Traces/chksums/ip4-icmp-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..cc60d879c4a7895581a609caa47aebe32d7cfa5e GIT binary patch literal 82 zcmca|c+)~A1{MYwaA0F#U<7g=$G7;a8*(sc0ofqTAaJ|m()$y8PE5PBfP=x6fk6hO W%z^RSu>&g@Q-C;?gMon;q!0iERTH29 literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip4-icmp-good-chksum.pcap b/testing/btest/Traces/chksums/ip4-icmp-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..2b07326eabd4a1ba68f6b00bbefe7c072b372a08 GIT binary patch literal 82 zcmca|c+)~A1{MYwaA0F#U<7iO$G7;4=rS>A0ofqTAaJ|m()$y8PE5PBfP=x6fk6hO W%z^RSu>&g@Q-C;?gW>yskU{|aBo#0K literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-icmp6-bad-chksum.pcap b/testing/btest/Traces/chksums/ip6-icmp6-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..ce1dfa547a3b787632159f67c65b6603c1284e5b GIT binary patch literal 109 zcmca|c+)~A1{MYwaA0F#U<7ik<68WGDX=rR0@)z^9}FO>+U_QR82nZa3XCj2fSTEv d9yI>{7xe!>Dt}gX0|NsuNU5c7LB3I;GXUYo9wYz& literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-icmp6-good-chksum.pcap b/testing/btest/Traces/chksums/ip6-icmp6-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4051fa5bc550def32285d6d93fde68342216354e GIT binary patch literal 109 zcmca|c+)~A1{MYwaA0F#U<7jR$F}$j=x{K&0@)z^9}FO>+U_QR82nZa3XCj2fSTEv d9yI>{7xe!>Dt}gX14GDpkkax37dO9fGXUUU9{~UW literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-route0-icmp6-bad-chksum.pcap b/testing/btest/Traces/chksums/ip6-route0-icmp6-bad-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..15e11ed3261c33a8319dd27d48a80d65eedc942b GIT binary patch literal 133 zcmca|c+)~A1{MYwaA0F#U<7iW$F=x}nes5i0@)z^9}FO>+U_QR80y*%3XCj2fSTEv t9yI>{7xe!>Dt}hC6%*Jn1_efj3Lwh}qMw1Wfq{V+#LjbZOb;$~1pv0KAEf{Q literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/chksums/ip6-route0-icmp6-good-chksum.pcap b/testing/btest/Traces/chksums/ip6-route0-icmp6-good-chksum.pcap new file mode 100644 index 0000000000000000000000000000000000000000..b7924cab6fd6a5188614cdefb6d9db64e6a41ab8 GIT binary patch literal 133 zcmca|c+)~A1{MYwaA0F#U<7g|$F=ykTwq{`1+qc-KNvt%wcSksG1Rpk6c|~405!8S tJ!t&@FX;b&RQ{}ND<-gE3<``46+o5|L_YH*#jAhtzTu%%nN3jptnAs_$% literal 0 HcmV?d00001 diff --git a/testing/btest/core/checksums.test b/testing/btest/core/checksums.test index c01ab710af..f5b3230686 100644 --- a/testing/btest/core/checksums.test +++ b/testing/btest/core/checksums.test @@ -1,15 +1,23 @@ # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-icmp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-icmp6-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap >>bad.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-icmp6-bad-chksum.pcap >>bad.out 2>&1 + + # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-icmp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-icmp6-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-good-chksum.pcap >>good.out 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: btest-diff bad.out # @TEST-EXEC: btest-diff good.out From a5cc98bb5d189dcc9ad04516b6a583bae4ea1508 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 10 Apr 2012 13:57:09 -0700 Subject: [PATCH 219/651] fix memory leak in tables and vectors that are read into tables --- src/input/Manager.cc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index a1a3410f5e..102fd78d6f 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1795,8 +1795,12 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { SetType* s = new SetType(set_index, 0); TableVal* t = new TableVal(s); for ( int i = 0; i < val->val.set_val.size; i++ ) { - t->Assign(ValueToVal( val->val.set_val.vals[i], type ), 0); + Val* assignval = ValueToVal( val->val.set_val.vals[i], type ); + t->Assign(assignval, 0); + Unref(assignval); // idex is not consumed by assign. } + + Unref(s); return t; break; } @@ -1809,6 +1813,7 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { for ( int i = 0; i < val->val.vector_val.size; i++ ) { v->Assign(i, ValueToVal( val->val.set_val.vals[i], type ), 0); } + Unref(vt); return v; } From 86834c941cb45bfefb12b2096a0812aa17f2c02d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 10 Apr 2012 16:14:33 -0700 Subject: [PATCH 220/651] Adding missing leak groups to a couple tests. Also activating leak checking for proxy in basic-cluster test. --- testing/btest/core/leaks/basic-cluster.bro | 9 ++++++--- testing/btest/core/leaks/remote.bro | 2 ++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro index a82f52c8b2..f5b40c1104 100644 --- a/testing/btest/core/leaks/basic-cluster.bro +++ b/testing/btest/core/leaks/basic-cluster.bro @@ -1,11 +1,14 @@ # Needs perftools support. # +# @TEST-GROUP: leaks + # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks + # @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro -m %INPUT # @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT -# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT +# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT +# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT # @TEST-EXEC: btest-bg-wait -k 30 # @TEST-EXEC: btest-diff manager-1/metrics.log diff --git a/testing/btest/core/leaks/remote.bro b/testing/btest/core/leaks/remote.bro index fa72ce6024..f888d8f6ee 100644 --- a/testing/btest/core/leaks/remote.bro +++ b/testing/btest/core/leaks/remote.bro @@ -1,5 +1,7 @@ # Needs perftools support. # +# @TEST-GROUP: leaks +# # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # # @TEST-EXEC: btest-bg-run sender HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m --pseudo-realtime %INPUT ../sender.bro From 51bad73e1ec0c2bc93c3770950491b6ac09345c5 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 11 Apr 2012 16:27:31 -0500 Subject: [PATCH 221/651] Fixes for IPv6 truncation and ICMP/ICMP6 analysis. - Add more guards against trying to analyze captured packets with a truncated IPv6 static header or extension header chain. - Add back in the ICMP payload tracking for ICMP "connections". - Fix 'icmp_context' record construction. Some field assignments were mismatched for ICMP and ICMP6. Source and destination addresses were set incorrectly for context packets that don't contain a full IP header. Some fields for ICMP6 weren't filled out. - Changed ICMP Time Exceeded packets to raise the 'icmp_time_exceeded' event instead of 'icmp_error_message'. - Add unit tests for truncation and the main types of ICMP/ICMP6 that have specific events. - Documentation clarifications. --- scripts/base/init-bare.bro | 8 +- src/Frag.cc | 10 +- src/ICMP.cc | 52 +++++---- src/IP.cc | 28 ++++- src/IP.h | 68 +++++++++-- src/PacketSort.cc | 4 +- src/Sessions.cc | 7 +- .../Baseline/core.icmp.icmp-context/output | 12 ++ .../Baseline/core.icmp.icmp-events/output | 20 ++++ .../Baseline/core.icmp.icmp6-context/output | 16 +++ .../Baseline/core.icmp.icmp6-events/output | 55 +++++++++ testing/btest/Baseline/core.truncation/output | 3 + .../Traces/icmp/icmp-destunreach-ip.pcap | Bin 0 -> 102 bytes .../icmp/icmp-destunreach-no-context.pcap | Bin 0 -> 82 bytes .../icmp-destunreach-udp.pcap} | Bin testing/btest/Traces/icmp/icmp-ping.pcap | Bin 0 -> 480 bytes .../btest/Traces/icmp/icmp-timeexceeded.pcap | Bin 0 -> 114 bytes .../icmp/icmp6-destunreach-ip6ext-trunc.pcap | Bin 0 -> 142 bytes .../icmp/icmp6-destunreach-ip6ext-udp.pcap | Bin 0 -> 162 bytes .../Traces/icmp/icmp6-destunreach-ip6ext.pcap | Bin 0 -> 150 bytes .../icmp/icmp6-destunreach-no-context.pcap | Bin 0 -> 102 bytes .../Traces/icmp/icmp6-neighbor-advert.pcap | Bin 0 -> 118 bytes .../Traces/icmp/icmp6-neighbor-solicit.pcap | Bin 0 -> 118 bytes .../btest/Traces/icmp/icmp6-paramprob.pcap | Bin 0 -> 154 bytes testing/btest/Traces/icmp/icmp6-ping.pcap | Bin 0 -> 904 bytes testing/btest/Traces/icmp/icmp6-redirect.pcap | Bin 0 -> 134 bytes .../Traces/icmp/icmp6-router-advert.pcap | Bin 0 -> 110 bytes .../Traces/icmp/icmp6-router-solicit.pcap | Bin 0 -> 102 bytes .../btest/Traces/icmp/icmp6-timeexceeded.pcap | Bin 0 -> 154 bytes testing/btest/Traces/icmp/icmp6-toobig.pcap | Bin 0 -> 154 bytes testing/btest/Traces/trunc/ip4-trunc.pcap | Bin 0 -> 60 bytes testing/btest/Traces/trunc/ip6-ext-trunc.pcap | Bin 0 -> 94 bytes testing/btest/Traces/trunc/ip6-trunc.pcap | Bin 0 -> 88 bytes testing/btest/core/discarder.bro | 2 +- testing/btest/core/icmp/icmp-context.test | 14 +++ testing/btest/core/icmp/icmp-events.test | 44 +++++++ testing/btest/core/icmp/icmp6-context.test | 15 +++ testing/btest/core/icmp/icmp6-events.test | 110 ++++++++++++++++++ testing/btest/core/truncation.test | 6 + 39 files changed, 422 insertions(+), 52 deletions(-) create mode 100644 testing/btest/Baseline/core.icmp.icmp-context/output create mode 100644 testing/btest/Baseline/core.icmp.icmp-events/output create mode 100644 testing/btest/Baseline/core.icmp.icmp6-context/output create mode 100644 testing/btest/Baseline/core.icmp.icmp6-events/output create mode 100644 testing/btest/Baseline/core.truncation/output create mode 100644 testing/btest/Traces/icmp/icmp-destunreach-ip.pcap create mode 100644 testing/btest/Traces/icmp/icmp-destunreach-no-context.pcap rename testing/btest/Traces/{icmp-unreach.trace => icmp/icmp-destunreach-udp.pcap} (100%) create mode 100644 testing/btest/Traces/icmp/icmp-ping.pcap create mode 100644 testing/btest/Traces/icmp/icmp-timeexceeded.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-destunreach-ip6ext-trunc.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-destunreach-ip6ext-udp.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-destunreach-ip6ext.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-destunreach-no-context.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-neighbor-advert.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-neighbor-solicit.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-paramprob.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-ping.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-redirect.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-router-advert.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-router-solicit.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-timeexceeded.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-toobig.pcap create mode 100644 testing/btest/Traces/trunc/ip4-trunc.pcap create mode 100644 testing/btest/Traces/trunc/ip6-ext-trunc.pcap create mode 100644 testing/btest/Traces/trunc/ip6-trunc.pcap create mode 100644 testing/btest/core/icmp/icmp-context.test create mode 100644 testing/btest/core/icmp/icmp-events.test create mode 100644 testing/btest/core/icmp/icmp6-context.test create mode 100644 testing/btest/core/icmp/icmp6-events.test create mode 100644 testing/btest/core/truncation.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 3e615f7669..1863e44552 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -102,11 +102,13 @@ type icmp_conn: record { ## .. bro:see:: icmp_time_exceeded icmp_unreachable type icmp_context: record { id: conn_id; ##< The packet's 4-tuple. - len: count; ##< The lenght of the packet's IP header. + len: count; ##< The length of the IP packet (headers + payload). proto: count; ##< The packet's transport-layer protocol. frag_offset: count; ##< The packet's fragementation offset. - ## True if the packet's IP header is fully included in the context. If that is not - ## the case, the other fields will all be set to null values. + ## True if the packet's IP header is not fully included in the context + ## or if there is not enough of the transport header to determine source + ## and destination ports. If that is the cast, the appropriate fields + ## of this record will be set to null values. bad_hdr_len: bool; bad_checksum: bool; ##< True if the packet's IP checksum is not correct. MF: bool; ##< True if the packets *more fragements* flag is set. diff --git a/src/Frag.cc b/src/Frag.cc index 0261b97bf0..d873f5bc0c 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -150,7 +150,7 @@ void FragReassembler::AddFragment(double t, const IP_Hdr* ip, const u_char* pkt) void FragReassembler::Overlap(const u_char* b1, const u_char* b2, int n) { - IP_Hdr proto_h(proto_hdr, false); + IP_Hdr proto_h(proto_hdr, false, proto_hdr_len); if ( memcmp((const void*) b1, (const void*) b2, n) ) s->Weird("fragment_inconsistency", &proto_h); @@ -182,7 +182,7 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) // can happen for benign reasons when we're // intermingling parts of two fragmented packets. - IP_Hdr proto_h(proto_hdr, false); + IP_Hdr proto_h(proto_hdr, false, proto_hdr_len); s->Weird("fragment_size_inconsistency", &proto_h); // We decide to analyze the contiguous portion now. @@ -196,7 +196,7 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) else if ( last_block->upper > frag_size ) { - IP_Hdr proto_h(proto_hdr, false); + IP_Hdr proto_h(proto_hdr, false, proto_hdr_len); s->Weird("fragment_size_inconsistency", &proto_h); frag_size = last_block->upper; } @@ -250,8 +250,8 @@ void FragReassembler::BlockInserted(DataBlock* /* start_block */) { struct ip6_hdr* reassem6 = (struct ip6_hdr*) pkt_start; reassem6->ip6_plen = htons(frag_size + proto_hdr_len - 40); - const IPv6_Hdr_Chain* chain = new IPv6_Hdr_Chain(reassem6, next_proto); - reassembled_pkt = new IP_Hdr(reassem6, true, chain); + const IPv6_Hdr_Chain* chain = new IPv6_Hdr_Chain(reassem6, next_proto, n); + reassembled_pkt = new IP_Hdr(reassem6, true, n, chain); } else diff --git a/src/ICMP.cc b/src/ICMP.cc index 9bd004e7f8..f65965f86f 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -93,6 +93,12 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, caplen -= 8; len -= 8; + int& len_stat = is_orig ? request_len : reply_len; + if ( len_stat < 0 ) + len_stat = len; + else + len_stat += len; + if ( ip->NextProto() == IPPROTO_ICMP ) NextICMP4(current_timestamp, icmpp, len, caplen, data, ip); else @@ -286,13 +292,12 @@ RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) IPAddr src_addr, dst_addr; uint32 src_port, dst_port; - if ( ip_hdr_len < sizeof(struct ip) || ip_hdr_len > uint32(len) ) + if ( len < (int)sizeof(struct ip) || ip_hdr_len > uint32(len) ) { // We don't have an entire IP header. bad_hdr_len = 1; ip_len = frag_offset = 0; DF = MF = bad_checksum = 0; - src_addr = dst_addr = 0; src_port = dst_port = 0; } @@ -331,9 +336,9 @@ RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) iprec->Assign(0, id_val); iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); iprec->Assign(2, new Val(proto, TYPE_COUNT)); - iprec->Assign(3, new Val(bad_hdr_len, TYPE_BOOL)); - iprec->Assign(4, new Val(bad_checksum, TYPE_BOOL)); - iprec->Assign(5, new Val(frag_offset, TYPE_COUNT)); + iprec->Assign(3, new Val(frag_offset, TYPE_COUNT)); + iprec->Assign(4, new Val(bad_hdr_len, TYPE_BOOL)); + iprec->Assign(5, new Val(bad_checksum, TYPE_BOOL)); iprec->Assign(6, new Val(MF, TYPE_BOOL)); iprec->Assign(7, new Val(DF, TYPE_BOOL)); @@ -342,32 +347,33 @@ RecordVal* ICMP_Analyzer::ExtractICMP4Context(int len, const u_char*& data) RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) { - const IP_Hdr ip_hdr_data((const struct ip6_hdr*) data, false); - const IP_Hdr* ip_hdr = &ip_hdr_data; - int DF = 0, MF = 0, bad_hdr_len = 0, bad_checksum = 0; + int DF = 0, MF = 0, bad_hdr_len = 0; TransportProto proto = TRANSPORT_UNKNOWN; - uint32 ip_hdr_len = ip_hdr->HdrLen(); //should always be 40 IPAddr src_addr; IPAddr dst_addr; uint32 ip_len, frag_offset = 0; uint32 src_port, dst_port; - if ( ip_hdr_len < sizeof(struct ip6_hdr) || ip_hdr_len != 40 ) // XXX What's the 2nd part doing? + if ( len < (int)sizeof(struct ip6_hdr) ) { bad_hdr_len = 1; ip_len = 0; - src_addr = dst_addr = 0; src_port = dst_port = 0; } else { - ip_len = ip_hdr->TotalLen(); + const IP_Hdr ip_hdr_data((const struct ip6_hdr*) data, false, len); + const IP_Hdr* ip_hdr = &ip_hdr_data; + ip_len = ip_hdr->TotalLen(); src_addr = ip_hdr->SrcAddr(); dst_addr = ip_hdr->DstAddr(); + frag_offset = ip_hdr->FragOffset(); + MF = ip_hdr->MF(); + DF = ip_hdr->DF(); - if ( uint32(len) >= ip_hdr_len + 4 ) + if ( uint32(len) >= uint32(ip_hdr->HdrLen() + 4) ) proto = GetContextProtocol(ip_hdr, &src_port, &dst_port); else { @@ -388,17 +394,13 @@ RecordVal* ICMP_Analyzer::ExtractICMP6Context(int len, const u_char*& data) iprec->Assign(0, id_val); iprec->Assign(1, new Val(ip_len, TYPE_COUNT)); - - //if the encap packet is ICMPv6 we force this... (cause there is no IGMP (by that name) for ICMPv6), rather ugly hack once more - iprec->Assign(2, new Val(58, TYPE_COUNT)); - - iprec->Assign(3, new Val(bad_hdr_len, TYPE_BOOL)); - - // The following are not available for IPv6. - iprec->Assign(4, new Val(0, TYPE_BOOL)); // bad_checksum - iprec->Assign(5, new Val(frag_offset, TYPE_COUNT)); // frag_offset - iprec->Assign(6, new Val(0, TYPE_BOOL)); // MF - iprec->Assign(7, new Val(1, TYPE_BOOL)); // DF + iprec->Assign(2, new Val(proto, TYPE_COUNT)); + iprec->Assign(3, new Val(frag_offset, TYPE_COUNT)); + iprec->Assign(4, new Val(bad_hdr_len, TYPE_BOOL)); + // bad_checksum is always false since IPv6 layer doesn't have a checksum + iprec->Assign(5, new Val(0, TYPE_BOOL)); + iprec->Assign(6, new Val(MF, TYPE_BOOL)); + iprec->Assign(7, new Val(DF, TYPE_BOOL)); return iprec; } @@ -608,7 +610,7 @@ void ICMP_Analyzer::Context4(double t, const struct icmp* icmpp, break; case ICMP_TIMXCEED: - f = icmp_error_message; + f = icmp_time_exceeded; break; } diff --git a/src/IP.cc b/src/IP.cc index 3c4d1e4a8c..3decdae13f 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -419,20 +419,35 @@ static inline bool isIPv6ExtHeader(uint8 type) } } -void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) +void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, int total_len, + bool set_next, uint16 next) { length = 0; uint8 current_type, next_type; next_type = IPPROTO_IPV6; const u_char* hdrs = (const u_char*) ip6; + if ( total_len < (int)sizeof(struct ip6_hdr) ) + reporter->InternalError("IPv6_HdrChain::Init with truncated IP header"); + do { + // We can't determine a given header's length if there's less than + // two bytes of data available (2nd byte of extension headers is length) + if ( total_len < 2 ) return; + current_type = next_type; IPv6_Hdr* p = new IPv6_Hdr(current_type, hdrs); next_type = p->NextHdr(); - uint16 len = p->Length(); + uint16 cur_len = p->Length(); + + // If this header is truncated, don't add it to chain, don't go further + if ( cur_len > total_len ) + { + delete p; + return; + } if ( set_next && next_type == IPPROTO_FRAGMENT ) { @@ -444,16 +459,17 @@ void IPv6_Hdr_Chain::Init(const struct ip6_hdr* ip6, bool set_next, uint16 next) // Check for routing headers and remember final destination address. if ( current_type == IPPROTO_ROUTING ) - ProcessRoutingHeader((const struct ip6_rthdr*) hdrs, len); + ProcessRoutingHeader((const struct ip6_rthdr*) hdrs, cur_len); #ifdef ENABLE_MOBILE_IPV6 // Only Mobile IPv6 has a destination option we care about right now. if ( current_type == IPPROTO_DSTOPTS ) - ProcessDstOpts((const struct ip6_dest*) hdrs, len); + ProcessDstOpts((const struct ip6_dest*) hdrs, cur_len); #endif - hdrs += len; - length += len; + hdrs += cur_len; + length += cur_len; + total_len -= cur_len; } while ( current_type != IPPROTO_FRAGMENT && current_type != IPPROTO_ESP && #ifdef ENABLE_MOBILE_IPV6 diff --git a/src/IP.h b/src/IP.h index 96642f08f7..7e05ee98ea 100644 --- a/src/IP.h +++ b/src/IP.h @@ -142,12 +142,12 @@ public: /** * Initializes the header chain from an IPv6 header structure. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6) : + IPv6_Hdr_Chain(const struct ip6_hdr* ip6, int len) : #ifdef ENABLE_MOBILE_IPV6 homeAddr(0), #endif finalDst(0) - { Init(ip6, false); } + { Init(ip6, len, false); } ~IPv6_Hdr_Chain() { @@ -250,14 +250,20 @@ protected: * Initializes the header chain from an IPv6 header structure, and replaces * the first next protocol pointer field that points to a fragment header. */ - IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next) : + IPv6_Hdr_Chain(const struct ip6_hdr* ip6, uint16 next, int len) : #ifdef ENABLE_MOBILE_IPV6 homeAddr(0), #endif finalDst(0) - { Init(ip6, true, next); } + { Init(ip6, len, true, next); } - void Init(const struct ip6_hdr* ip6, bool set_next, uint16 next = 0); + /** + * Initializes the header chain from an IPv6 header structure of a given + * length, possibly setting the first next protocol pointer field that + * points to a fragment header. + */ + void Init(const struct ip6_hdr* ip6, int total_len, bool set_next, + uint16 next = 0); /** * Process a routing header and allocate/remember the final destination @@ -294,9 +300,21 @@ protected: IPAddr* finalDst; }; +/** + * A class that wraps either an IPv4 or IPv6 packet and abstracts methods + * for inquiring about common features between the two. + */ class IP_Hdr { public: - IP_Hdr(const u_char* p, bool arg_del) + /** + * Attempts to construct the header from some blob of data based on IP + * version number. Caller must have already checked that the header + * is not truncated. + * @param p pointer to memory containing an IPv4 or IPv6 packet. + * @param arg_del whether to take ownership of \a p pointer's memory. + * @param len the length of data, in bytes, pointed to by \a p. + */ + IP_Hdr(const u_char* p, bool arg_del, int len) : ip4(0), ip6(0), del(arg_del), ip6_hdrs(0) { if ( ((const struct ip*)p)->ip_v == 4 ) @@ -304,7 +322,7 @@ public: else if ( ((const struct ip*)p)->ip_v == 6 ) { ip6 = (const struct ip6_hdr*)p; - ip6_hdrs = new IPv6_Hdr_Chain(ip6); + ip6_hdrs = new IPv6_Hdr_Chain(ip6, len); } else { @@ -314,18 +332,38 @@ public: } } + /** + * Construct the header wrapper from an IPv4 packet. Caller must have + * already checked that the header is not truncated. + * @param arg_ip4 pointer to memory containing an IPv4 packet. + * @param arg_del whether to take ownership of \a arg_ip4 pointer's memory. + */ IP_Hdr(const struct ip* arg_ip4, bool arg_del) : ip4(arg_ip4), ip6(0), del(arg_del), ip6_hdrs(0) { } - IP_Hdr(const struct ip6_hdr* arg_ip6, bool arg_del, + /** + * Construct the header wrapper from an IPv6 packet. Caller must have + * already checked that the static IPv6 header is not truncated. If + * the packet contains extension headers and they are truncated, that can + * be checked afterwards by comparing \a len with \a TotalLen. E.g. + * NetSessions::DoNextPacket does this to skip truncated packets. + * @param arg_ip6 pointer to memory containing an IPv6 packet. + * @param arg_del whether to take ownership of \a arg_ip6 pointer's memory. + * @param len the packet's length in bytes. + * @param c an already-constructed header chain to take ownership of. + */ + IP_Hdr(const struct ip6_hdr* arg_ip6, bool arg_del, int len, const IPv6_Hdr_Chain* c = 0) : ip4(0), ip6(arg_ip6), del(arg_del), - ip6_hdrs(c ? c : new IPv6_Hdr_Chain(ip6)) + ip6_hdrs(c ? c : new IPv6_Hdr_Chain(ip6, len)) { } + /** + * Destructor. + */ ~IP_Hdr() { if ( ip6 ) @@ -340,8 +378,14 @@ public: } } + /** + * If an IPv4 packet is wrapped, return a pointer to it, else null. + */ const struct ip* IP4_Hdr() const { return ip4; } + /** + * If an IPv6 packet is wrapped, return a pointer to it, else null. + */ const struct ip6_hdr* IP6_Hdr() const { return ip6; } /** @@ -441,9 +485,15 @@ public: { return ip4 ? ip4->ip_p : ((*ip6_hdrs)[ip6_hdrs->Size()-1])->NextHdr(); } + /** + * Returns the IPv4 Time to Live or IPv6 Hop Limit field. + */ unsigned char TTL() const { return ip4 ? ip4->ip_ttl : ip6->ip6_hlim; } + /** + * Returns whether the IP header indicates this packet is a fragment. + */ bool IsFragment() const { return ip4 ? (ntohs(ip4->ip_off) & 0x3fff) != 0 : ip6_hdrs->IsFragment(); } diff --git a/src/PacketSort.cc b/src/PacketSort.cc index 04c525c4d1..a7e2b04572 100644 --- a/src/PacketSort.cc +++ b/src/PacketSort.cc @@ -28,8 +28,8 @@ PacketSortElement::PacketSortElement(PktSrc* arg_src, const struct ip* ip = (const struct ip*) (pkt + hdr_size); if ( ip->ip_v == 4 ) ip_hdr = new IP_Hdr(ip, false); - else if ( ip->ip_v == 6 ) - ip_hdr = new IP_Hdr((const struct ip6_hdr*) ip, false); + else if ( ip->ip_v == 6 && (caplen >= sizeof(struct ip6_hdr) + hdr_size) ) + ip_hdr = new IP_Hdr((const struct ip6_hdr*) ip, false, caplen - hdr_size); else // Weird will be generated later in NetSessions::NextPacket. return; diff --git a/src/Sessions.cc b/src/Sessions.cc index d734f4b4a2..2dd6f7027b 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -281,7 +281,12 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, else if ( ip->ip_v == 6 ) { - IP_Hdr ip_hdr((const struct ip6_hdr*) (pkt + hdr_size), false); + if ( caplen < sizeof(struct ip6_hdr) ) + { + Weird("truncated_IP", hdr, pkt); + return; + } + IP_Hdr ip_hdr((const struct ip6_hdr*) (pkt + hdr_size), false, caplen); DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size); } diff --git a/testing/btest/Baseline/core.icmp.icmp-context/output b/testing/btest/Baseline/core.icmp.icmp-context/output new file mode 100644 index 0000000000..9e252d8c38 --- /dev/null +++ b/testing/btest/Baseline/core.icmp.icmp-context/output @@ -0,0 +1,12 @@ +icmp_unreachable (code=0) + conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=0, v6=F] + icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] +icmp_unreachable (code=0) + conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=20, v6=F] + icmp_context: [id=[orig_h=10.0.0.2, orig_p=0/unknown, resp_h=10.0.0.1, resp_p=0/unknown], len=20, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] +icmp_unreachable (code=3) + conn_id: [orig_h=192.168.1.102, orig_p=3/icmp, resp_h=192.168.1.1, resp_p=3/icmp] + icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, v6=F] + icmp_context: [id=[orig_h=192.168.1.1, orig_p=53/udp, resp_h=192.168.1.102, resp_p=59207/udp], len=163, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp-events/output b/testing/btest/Baseline/core.icmp.icmp-events/output new file mode 100644 index 0000000000..9d8f484921 --- /dev/null +++ b/testing/btest/Baseline/core.icmp.icmp-events/output @@ -0,0 +1,20 @@ +icmp_unreachable (code=3) + conn_id: [orig_h=192.168.1.102, orig_p=3/icmp, resp_h=192.168.1.1, resp_p=3/icmp] + icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, v6=F] + icmp_context: [id=[orig_h=192.168.1.1, orig_p=53/udp, resp_h=192.168.1.102, resp_p=59207/udp], len=163, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] +icmp_time_exceeded (code=0) + conn_id: [orig_h=10.0.0.1, orig_p=11/icmp, resp_h=10.0.0.2, resp_p=0/icmp] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=11, icode=0, len=32, v6=F] + icmp_context: [id=[orig_h=10.0.0.2, orig_p=30000/udp, resp_h=10.0.0.1, resp_p=13000/udp], len=32, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] +icmp_echo_request (id=34844, seq=0, payload=O\x85\xe0C\0^N\xeb\xff^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) + conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] +icmp_echo_reply (id=34844, seq=0, payload=O\x85\xe0C\0^N\xeb\xff^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) + conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] +icmp_echo_request (id=34844, seq=1, payload=O\x85\xe0D\0^N\xf0}^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) + conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] +icmp_echo_reply (id=34844, seq=1, payload=O\x85\xe0D\0^N\xf0}^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) + conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-context/output b/testing/btest/Baseline/core.icmp.icmp6-context/output new file mode 100644 index 0000000000..4b75210a18 --- /dev/null +++ b/testing/btest/Baseline/core.icmp.icmp6-context/output @@ -0,0 +1,16 @@ +icmp_unreachable (code=0) + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=0, v6=T] + icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] +icmp_unreachable (code=0) + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=40, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] +icmp_unreachable (code=0) + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] +icmp_unreachable (code=0) + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=48, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-events/output b/testing/btest/Baseline/core.icmp.icmp6-events/output new file mode 100644 index 0000000000..1ff26ff889 --- /dev/null +++ b/testing/btest/Baseline/core.icmp.icmp6-events/output @@ -0,0 +1,55 @@ +icmp_unreachable (code=0) + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] +icmp_packet_too_big (code=0) + conn_id: [orig_h=fe80::dead, orig_p=2/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=2, icode=0, len=52, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] +icmp_time_exceeded (code=0) + conn_id: [orig_h=fe80::dead, orig_p=3/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=3, icode=0, len=52, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] +icmp_parameter_problem (code=0) + conn_id: [orig_h=fe80::dead, orig_p=4/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=4, icode=0, len=52, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] +icmp_echo_request (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_echo_reply (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_echo_request (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_echo_reply (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_echo_request (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_echo_reply (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_echo_request (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_echo_reply (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] +icmp_redirect (tgt=fe80::cafe, dest=fe80::babe) + conn_id: [orig_h=fe80::dead, orig_p=137/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, v6=T] +icmp_router_advertisement (hop_limit=0, managed=F, rlifetime=1800, reachable=0.000000, retrans=0.000000) + conn_id: [orig_h=fe80::dead, orig_p=134/icmp, resp_h=fe80::beef, resp_p=133/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, v6=T] +icmp_neighbor_advertisement (tgt=fe80::babe) + conn_id: [orig_h=fe80::dead, orig_p=136/icmp, resp_h=fe80::beef, resp_p=135/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, v6=T] +icmp_router_solicitation + conn_id: [orig_h=fe80::dead, orig_p=133/icmp, resp_h=fe80::beef, resp_p=134/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, v6=T] +icmp_neighbor_solicitation (tgt=fe80::babe) + conn_id: [orig_h=fe80::dead, orig_p=135/icmp, resp_h=fe80::beef, resp_p=136/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, v6=T] diff --git a/testing/btest/Baseline/core.truncation/output b/testing/btest/Baseline/core.truncation/output new file mode 100644 index 0000000000..ba8d3eedee --- /dev/null +++ b/testing/btest/Baseline/core.truncation/output @@ -0,0 +1,3 @@ +1334160095.895421 weird: truncated_IP +1334156241.519125 weird: truncated_IP +1334094648.590126 weird: truncated_IP diff --git a/testing/btest/Traces/icmp/icmp-destunreach-ip.pcap b/testing/btest/Traces/icmp/icmp-destunreach-ip.pcap new file mode 100644 index 0000000000000000000000000000000000000000..982f2e4734a8546743a4f16f956100587f20155a GIT binary patch literal 102 zcmca|c+)~A1{MYwaA0F#U<7g=-E8%*wBTj11F}JwLEv`BrS~WHoS1fL0SALC1A_rb hnFC|mDK4M}AZB7_`12p65GW@CmSaeJ4iX2+0|1#g6pH`= literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp-destunreach-no-context.pcap b/testing/btest/Traces/icmp/icmp-destunreach-no-context.pcap new file mode 100644 index 0000000000000000000000000000000000000000..1f904e3d9155161efe80e7d06747618935a649c4 GIT binary patch literal 82 zcmca|c+)~A1{MYwaA0F#U<7jZ-D>q;V8+9s1!RLTgTU>MOYcwYIWg_h0uBaO1_l|B VG6%-Adt5*bK+MF<@aI2BApi$W5@rAZ literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp-unreach.trace b/testing/btest/Traces/icmp/icmp-destunreach-udp.pcap similarity index 100% rename from testing/btest/Traces/icmp-unreach.trace rename to testing/btest/Traces/icmp/icmp-destunreach-udp.pcap diff --git a/testing/btest/Traces/icmp/icmp-ping.pcap b/testing/btest/Traces/icmp/icmp-ping.pcap new file mode 100644 index 0000000000000000000000000000000000000000..499769b280e4b3bb15d624cad1d12b83dc8e2ba9 GIT binary patch literal 480 zcmca|c+)~A1{MYw`2U}Qff2}Ye$eW#{DzMq3CIRv27%i^;>4a4K$3&Om4P8d**NJp!bXT)5PKmuF127_uwY!?53&tpD+5Cz zMK-zsZCv*OVI!s=7p(yKagjX2k13r%KQaRS=)%DFp_WuXf^7WxA7LXge$1iBMgT{r BV*~&I literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp-timeexceeded.pcap b/testing/btest/Traces/icmp/icmp-timeexceeded.pcap new file mode 100644 index 0000000000000000000000000000000000000000..27804b55592e6e857a7cde11c9e9fce153c900d9 GIT binary patch literal 114 zcmca|c+)~A1{MYwaA0F#U<7iW-Dvgy7Rkim1!RLTgTU>MOYcwYIWg_h0uBaO1_m3D rG6%-AeOy2dK+MF=z^MRI2$WL*%L%5P0*Qm~u=;a+= literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-destunreach-ip6ext-udp.pcap b/testing/btest/Traces/icmp/icmp6-destunreach-ip6ext-udp.pcap new file mode 100644 index 0000000000000000000000000000000000000000..5aca9af1b5d72fac60a392d196c87be1ed57e31d GIT binary patch literal 162 zcmca|c+)~A1{MYwaA0F#U<7iiDq8$~TR9l2Kx`oX4+aobZFdtu3>Pbhe+?i84BT6b r%HQ{%k>TSqkZb~o5Me-5zwbS&`g>~y85qG%DK#)U!N9}Q7XbtSO1UAm literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-destunreach-ip6ext.pcap b/testing/btest/Traces/icmp/icmp6-destunreach-ip6ext.pcap new file mode 100644 index 0000000000000000000000000000000000000000..996048e5ab117ff8e9bd87261c891dad320ac270 GIT binary patch literal 150 zcmca|c+)~A1{MYwaA0F#U<7iS3tRl(ePw3I1F}K*KNvt%wcSksF)XYc{xyIYFmP`z hDu3U5Mh0CbkZb~o;9x*gzwbS&`g?1w85qG%0RZo^9k2iZ literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-destunreach-no-context.pcap b/testing/btest/Traces/icmp/icmp6-destunreach-no-context.pcap new file mode 100644 index 0000000000000000000000000000000000000000..cf15a7cf65c22b1922057b338802b61bb6470f51 GIT binary patch literal 102 zcmca|c+)~A1{MYwaA0F#U<7jb_O$xX?`31K1F}K*KNvt%wcSksF*vLo{xyIYFmP`z ODu3U5MurqQkSqY44i$O; literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-neighbor-advert.pcap b/testing/btest/Traces/icmp/icmp6-neighbor-advert.pcap new file mode 100644 index 0000000000000000000000000000000000000000..0a06329fb542245747ee47488d63499fc51af3e7 GIT binary patch literal 118 zcmca|c+)~A1{MYwaA0F#U<7hf&bIoCaIrG@0ofq@9}FO>+U_QR7!p?h|22RZFmP`z UDu3Vm4uM9eQ6r7 literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-paramprob.pcap b/testing/btest/Traces/icmp/icmp6-paramprob.pcap new file mode 100644 index 0000000000000000000000000000000000000000..ab2d41cd3a294305fc65394648ea41424dccf628 GIT binary patch literal 154 zcmca|c+)~A1{MYwaA0F#U<7jfkGJ~&n!&iea_C^K`qHMbYwC$WW!Zxz~#t-xx3)~Yp{06e^ z)@FolWc!UM+pYp_d##VKjcmX10sY2G$ZsIqcI`meMz-IGvh5nsHU$HOZDjk67w9)O PLVg3;)M9MTH%| literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-router-advert.pcap b/testing/btest/Traces/icmp/icmp6-router-advert.pcap new file mode 100644 index 0000000000000000000000000000000000000000..9c3e557a9d26e12d1e8a1a3e8b2a818612a1dded GIT binary patch literal 110 zcmca|c+)~A1{MYwaA0F#U<7jho^JKG@nU9h1F}K*KNvt%wcSksF$Aps|7!p-VBp?b SRQ|sAZ45V9893NEU@8Ehi5Hy! literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-router-solicit.pcap b/testing/btest/Traces/icmp/icmp6-router-solicit.pcap new file mode 100644 index 0000000000000000000000000000000000000000..b33495aa8d3bb8d7b96ab21544882e6c90c87145 GIT binary patch literal 102 zcmca|c+)~A1{MYwaA0F#U<7h*o@wd$1Z0Eoe=vZkYP*{NV%S(Y{A&O)VBp?b mRQ|sA%nToPfn*av1dkw^`hD+F)!$oNYG8DNfrqCr0tf)R6(Vr} literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-toobig.pcap b/testing/btest/Traces/icmp/icmp6-toobig.pcap new file mode 100644 index 0000000000000000000000000000000000000000..92bf50f240fef9e6a75f84fe6f31b4e39c17bb7a GIT binary patch literal 154 zcmca|c+)~A1{MYwaA0F#U<7jf4z~KYxG^vk0ofq@9}FO>+U_QR7&cZ8{~ACH7`V3< pmA~&j6T_Qb3=FIc2_Pjrf@tdZy+>7lZ*8f8(Fq0~p1uem003M{A+rDg literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/trunc/ip4-trunc.pcap b/testing/btest/Traces/trunc/ip4-trunc.pcap new file mode 100644 index 0000000000000000000000000000000000000000..30df0ea94d9fef8ec78d6a461a2d0fc17b99aca8 GIT binary patch literal 60 zcmca|c+)~A1{MYwaA0F#U<7jRuWI$*yONhd1jyC{NebNVxb*(So)gn9E#P2qWnfTX GU<3fDI}bnr literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/trunc/ip6-ext-trunc.pcap b/testing/btest/Traces/trunc/ip6-ext-trunc.pcap new file mode 100644 index 0000000000000000000000000000000000000000..1de659084e22fcd6b1eec1d6fd9c5157e568c77c GIT binary patch literal 94 zcmca|c+)~A1{MYwaA0F#U<7h3R=4=;F>*4P0ofq@9}FO>+U_QR7$OV~3XCj2fSTEv Q9yI>{7xe!>Dt}fs009pgS^xk5 literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/trunc/ip6-trunc.pcap b/testing/btest/Traces/trunc/ip6-trunc.pcap new file mode 100644 index 0000000000000000000000000000000000000000..0111caed0f32737b3b6d6667ecfd2e207e039c94 GIT binary patch literal 88 zcmca|c+)~A1{MYwaA0F#U<7h5&TjR;`kI}=0Lboutput # @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace discarder-tcp.bro >>output # @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace discarder-udp.bro >>output -# @TEST-EXEC: bro -C -r $TRACES/icmp-unreach.trace discarder-icmp.bro >>output +# @TEST-EXEC: bro -C -r $TRACES/icmp/icmp-destunreach-udp.pcap discarder-icmp.bro >>output # @TEST-EXEC: btest-diff output @TEST-START-FILE discarder-ip.bro diff --git a/testing/btest/core/icmp/icmp-context.test b/testing/btest/core/icmp/icmp-context.test new file mode 100644 index 0000000000..ca7a34c5aa --- /dev/null +++ b/testing/btest/core/icmp/icmp-context.test @@ -0,0 +1,14 @@ +# These tests all check that IPv6 context packet construction for ICMP6 works. + +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-no-context.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-ip.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: btest-diff output + +event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_unreachable (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } diff --git a/testing/btest/core/icmp/icmp-events.test b/testing/btest/core/icmp/icmp-events.test new file mode 100644 index 0000000000..1a54f05fba --- /dev/null +++ b/testing/btest/core/icmp/icmp-events.test @@ -0,0 +1,44 @@ +# These tests all check that ICMP6 events get raised with correct arguments. + +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-timeexceeded.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-ping.pcap %INPUT >>output 2>&1 + +# @TEST-EXEC: btest-diff output + +event icmp_sent(c: connection, icmp: icmp_conn) + { + print "icmp_sent"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string) + { + print "icmp_echo_request (id=" + fmt("%d", id) + ", seq=" + fmt("%d", seq) + ", payload=" + payload + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string) + { + print "icmp_echo_reply (id=" + fmt("%d", id) + ", seq=" + fmt("%d", seq) + ", payload=" + payload + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_unreachable (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } + +event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_time_exceeded (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } diff --git a/testing/btest/core/icmp/icmp6-context.test b/testing/btest/core/icmp/icmp6-context.test new file mode 100644 index 0000000000..dfa8271cbc --- /dev/null +++ b/testing/btest/core/icmp/icmp6-context.test @@ -0,0 +1,15 @@ +# These tests all check that IPv6 context packet construction for ICMP6 works. + +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-no-context.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-trunc.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: btest-diff output + +event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_unreachable (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } diff --git a/testing/btest/core/icmp/icmp6-events.test b/testing/btest/core/icmp/icmp6-events.test new file mode 100644 index 0000000000..64c14920ff --- /dev/null +++ b/testing/btest/core/icmp/icmp6-events.test @@ -0,0 +1,110 @@ +# These tests all check that ICMP6 events get raised with correct arguments. + +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-toobig.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-timeexceeded.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-paramprob.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-ping.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-redirect.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-router-advert.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-neighbor-advert.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-router-solicit.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-neighbor-solicit.pcap %INPUT >>output 2>&1 + +# @TEST-EXEC: btest-diff output + +event icmp_sent(c: connection, icmp: icmp_conn) + { + print "icmp_sent"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string) + { + print "icmp_echo_request (id=" + fmt("%d", id) + ", seq=" + fmt("%d", seq) + ", payload=" + payload + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string) + { + print "icmp_echo_reply (id=" + fmt("%d", id) + ", seq=" + fmt("%d", seq) + ", payload=" + payload + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_unreachable (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } + +event icmp_packet_too_big(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_packet_too_big (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } + +event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_time_exceeded (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } + +event icmp_parameter_problem(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_parameter_problem (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } + +event icmp_redirect(c: connection, icmp: icmp_conn, tgt: addr, dest: addr) + { + print "icmp_redirect (tgt=" + fmt("%s", tgt) + ", dest=" + fmt("%s", dest) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_error_message(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + print "icmp_error_message (code=" + fmt("%d", code) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + print " icmp_context: " + fmt("%s", context); + } + +event icmp_neighbor_solicitation(c: connection, icmp: icmp_conn, tgt: addr) + { + print "icmp_neighbor_solicitation (tgt=" + fmt("%s", tgt) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, tgt:addr) + { + print "icmp_neighbor_advertisement (tgt=" + fmt("%s", tgt) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_router_solicitation(c: connection, icmp: icmp_conn) + { + print "icmp_router_solicitation"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } + +event icmp_router_advertisement(c: connection, icmp: icmp_conn, hop_limit: count, managed: bool, router_lifetime: count, reachable_time: interval, retrans_timer: interval) + { + print "icmp_router_advertisement (hop_limit=" + fmt("%d", hop_limit) + ", managed=" + fmt("%s", managed) + ", rlifetime=" + fmt("%d", router_lifetime) + ", reachable=" + fmt("%f", reachable_time) + ", retrans=" + fmt("%f", retrans_timer) + ")"; + print " conn_id: " + fmt("%s", c$id); + print " icmp_conn: " + fmt("%s", icmp); + } diff --git a/testing/btest/core/truncation.test b/testing/btest/core/truncation.test new file mode 100644 index 0000000000..16a60fe6db --- /dev/null +++ b/testing/btest/core/truncation.test @@ -0,0 +1,6 @@ +# Truncated IP packet's should not be analyzed, and generate truncated_IP weird + +# @TEST-EXEC: bro -b -r $TRACES/trunc/ip4-trunc.pcap >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/trunc/ip6-trunc.pcap >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/trunc/ip6-ext-trunc.pcap >>output 2>&1 +# @TEST-EXEC: btest-diff output From c90148d073c276e3434b5977ed9e96498434b611 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 12 Apr 2012 10:28:10 -0500 Subject: [PATCH 222/651] Sync up patricia.c/h with pysubnettree repo --- src/patricia.c | 20 +------------------- src/patricia.h | 7 ------- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/src/patricia.c b/src/patricia.c index c2e2016570..1dbc795ab7 100644 --- a/src/patricia.c +++ b/src/patricia.c @@ -115,16 +115,12 @@ local_inet_pton (int af, const char *src, void *dst) } } #ifdef NT -#ifdef HAVE_IPV6 else if (af == AF_INET6) { struct in6_addr Address; return (inet6_addr(src, &Address)); } -#endif /* HAVE_IPV6 */ -#endif /* NT */ -#ifndef NT +#else else { - errno = EAFNOSUPPORT; return -1; } @@ -160,10 +156,8 @@ my_inet_pton (int af, const char *src, void *dst) } memcpy (dst, xp, 4); return (1); -#ifdef HAVE_IPV6 } else if (af == AF_INET6) { return (local_inet_pton (af, src, dst)); -#endif /* HAVE_IPV6 */ } else { #ifndef NT errno = EAFNOSUPPORT; @@ -217,7 +211,6 @@ prefix_toa2x (prefix_t *prefix, char *buff, int with_len) } return (buff); } -#ifdef HAVE_IPV6 else if (prefix->family == AF_INET6) { char *r; r = (char *) inet_ntop (AF_INET6, &prefix->add.sin6, buff, 48 /* a guess value */ ); @@ -227,7 +220,6 @@ prefix_toa2x (prefix_t *prefix, char *buff, int with_len) } return (buff); } -#endif /* HAVE_IPV6 */ else return (NULL); } @@ -255,7 +247,6 @@ New_Prefix2 (int family, void *dest, int bitlen, prefix_t *prefix) int dynamic_allocated = 0; int default_bitlen = 32; -#ifdef HAVE_IPV6 if (family == AF_INET6) { default_bitlen = 128; if (prefix == NULL) { @@ -265,7 +256,6 @@ New_Prefix2 (int family, void *dest, int bitlen, prefix_t *prefix) memcpy (&prefix->add.sin6, dest, 16); } else -#endif /* HAVE_IPV6 */ if (family == AF_INET) { if (prefix == NULL) { #ifndef NT @@ -308,9 +298,7 @@ ascii2prefix (int family, char *string) u_long bitlen, maxbitlen = 0; char *cp; struct in_addr sin; -#ifdef HAVE_IPV6 struct in6_addr sin6; -#endif /* HAVE_IPV6 */ int result; char save[MAXLINE]; @@ -320,19 +308,15 @@ ascii2prefix (int family, char *string) /* easy way to handle both families */ if (family == 0) { family = AF_INET; -#ifdef HAVE_IPV6 if (strchr (string, ':')) family = AF_INET6; -#endif /* HAVE_IPV6 */ } if (family == AF_INET) { maxbitlen = 32; } -#ifdef HAVE_IPV6 else if (family == AF_INET6) { maxbitlen = 128; } -#endif /* HAVE_IPV6 */ if ((cp = strchr (string, '/')) != NULL) { bitlen = atol (cp + 1); @@ -355,7 +339,6 @@ ascii2prefix (int family, char *string) return (New_Prefix (AF_INET, &sin, bitlen)); } -#ifdef HAVE_IPV6 else if (family == AF_INET6) { // Get rid of this with next IPv6 upgrade #if defined(NT) && !defined(HAVE_INET_NTOP) @@ -367,7 +350,6 @@ ascii2prefix (int family, char *string) #endif /* NT */ return (New_Prefix (AF_INET6, &sin6, bitlen)); } -#endif /* HAVE_IPV6 */ else return (NULL); } diff --git a/src/patricia.h b/src/patricia.h index c4d3ce9b08..dc67226362 100644 --- a/src/patricia.h +++ b/src/patricia.h @@ -52,11 +52,6 @@ #include - -#ifndef HAVE_IPV6 -#define HAVE_IPV6 -#endif - /* typedef unsigned int u_int; */ typedef void (*void_fn_t)(); /* { from defs.h */ @@ -86,9 +81,7 @@ typedef struct _prefix_t { int ref_count; /* reference count */ union { struct in_addr sin; -#ifdef HAVE_IPV6 struct in6_addr sin6; -#endif /* IPV6 */ } add; } prefix_t; From 6aa4f00159b86eac4494d1646fe12f1debccb36e Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 13 Apr 2012 14:59:38 -0500 Subject: [PATCH 223/651] Add more support for 's that lack some structure definitions. --- config.h.in | 44 +++++++++++++++++++++++++++++ src/IP.h | 1 - src/net_util.h | 75 +++++++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 115 insertions(+), 5 deletions(-) diff --git a/config.h.in b/config.h.in index 6c64fb61bc..b8e2cb9a88 100644 --- a/config.h.in +++ b/config.h.in @@ -152,3 +152,47 @@ #ifndef HAVE_DLT_PPP_SERIAL #define DLT_PPP_SERIAL @DLT_PPP_SERIAL@ #endif + +/* IPv6 Next Header values defined by RFC 3542 */ +#cmakedefine HAVE_IPPROTO_HOPOPTS +#ifndef HAVE_IPPROTO_HOPOPTS +#define IPPROTO_HOPOPTS 0 +#endif +#cmakedefine HAVE_IPPROTO_IPV6 +#ifndef HAVE_IPPROTO_IPV6 +#define IPPROTO_IPV6 41 +#endif +#cmakedefine HAVE_IPPROTO_ROUTING +#ifndef HAVE_IPPROTO_ROUTING +#define IPPROTO_ROUTING 43 +#endif +#cmakedefine HAVE_IPPROTO_FRAGMENT +#ifndef HAVE_IPPROTO_FRAGMENT +#define IPPROTO_FRAGMENT 44 +#endif +#cmakedefine HAVE_IPPROTO_ESP +#ifndef HAVE_IPPROTO_ESP +#define IPPROTO_ESP 50 +#endif +#cmakedefine HAVE_IPPROTO_AH +#ifndef HAVE_IPPROTO_AH +#define IPPROTO_AH 51 +#endif +#cmakedefine HAVE_IPPROTO_ICMPV6 +#ifndef HAVE_IPPROTO_ICMPV6 +#define IPPROTO_ICMPV6 58 +#endif +#cmakedefine HAVE_IPPROTO_NONE +#ifndef HAVE_IPPROTO_NONE +#define IPPROTO_NONE 59 +#endif +#cmakedefine HAVE_IPPROTO_DSTOPTS +#ifndef HAVE_IPPROTO_DSTOPTS +#define IPPROTO_DSTOPTS 60 +#endif + +/* IPv6 options structure defined by RFC 3542 */ +#cmakedefine HAVE_IP6_OPT + +/* Common IPv6 extension structure */ +#cmakedefine HAVE_IP6_EXT diff --git a/src/IP.h b/src/IP.h index 96642f08f7..aacec3f190 100644 --- a/src/IP.h +++ b/src/IP.h @@ -12,7 +12,6 @@ #include #include #include -#include #ifdef ENABLE_MOBILE_IPV6 diff --git a/src/net_util.h b/src/net_util.h index 3f8eb01e2a..92188237d9 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -31,13 +31,80 @@ typedef enum { IPv4, IPv6 } IPFamily; #ifdef HAVE_NETINET_IP6_H #include + +#ifndef HAVE_IP6_OPT +struct ip6_opt { + uint8 ip6o_type; + uint8 ip6o_len; +}; +#endif // HAVE_IP6_OPT + +#ifndef HAVE_IP6_EXT +struct ip6_ext { + uint8 ip6e_nxt; + uint8 ip6e_len; +}; +#endif // HAVE_IP6_EXT + #else struct ip6_hdr { - uint16 ip6_plen; - uint8 ip6_nxt; - uint8 ip6_hlim; + union { + struct ip6_hdrctl { + uint32 ip6_un1_flow; /* 4 bits version, 8 bits TC, 20 bits + flow-ID */ + uint16 ip6_un1_plen; /* payload length */ + uint8 ip6_un1_nxt; /* next header */ + uint8 ip6_un1_hlim; /* hop limit */ + } ip6_un1; + uint8 ip6_un2_vfc; /* 4 bits version, top 4 bits tclass */ + } ip6_ctlun; + struct in6_addr ip6_src; /* source address */ + struct in6_addr ip6_dst; /* destination address */ }; -#endif +#define ip6_vfc ip6_ctlun.ip6_un2_vfc +#define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow +#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen +#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt +#define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim +#define ip6_hops ip6_ctlun.ip6_un1.ip6_un1_hlim + +struct ip6_opt { + uint8 ip6o_type; + uint8 ip6o_len; +}; + +struct ip6_ext { + uint8 ip6e_nxt; + uint8 ip6e_len; +}; + +struct ip6_frag { + uint8 ip6f_nxt; /* next header */ + uint8 ip6f_reserved; /* reserved field */ + uint16 ip6f_offlg; /* offset, reserved, and flag */ + uint32 ip6f_ident; /* identification */ +}; + +struct ip6_hbh { + uint8 ip6h_nxt; /* next header */ + uint8 ip6h_len; /* length in units of 8 octets */ + /* followed by options */ +}; + +struct ip6_dest { + uint8 ip6d_nxt; /* next header */ + uint8 ip6d_len; /* length in units of 8 octets */ + /* followed by options */ +}; + +struct ip6_rthdr { + uint8 ip6r_nxt; /* next header */ + uint8 ip6r_len; /* length in units of 8 octets */ + uint8 ip6r_type; /* routing type */ + uint8 ip6r_segleft; /* segments left */ + /* followed by routing type specific data */ +}; +#endif // HAVE_NETINET_IP6_H // For Solaris. #if !defined(TCPOPT_WINDOW) && defined(TCPOPT_WSCALE) From 1967f6f81caec93c1d85c851e3229a9197fe5cf1 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 15 Apr 2012 15:11:39 -0700 Subject: [PATCH 224/651] rename a couple of structures and make the names in manager fit the api more. This should it make easier for other people to understand what is going on without having knowledge of an "internal api * means * in external api" mapping. --- src/input/Manager.cc | 98 ++++++++++++++++++++++++-------------------- src/input/Manager.h | 24 +++++------ 2 files changed, 65 insertions(+), 57 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 102fd78d6f..8c8d6d8ba3 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -23,6 +23,11 @@ using namespace input; using threading::Value; using threading::Field; +/** + * InputHashes are used as Dictionaries to store the value and index hashes for all lines currently stored in a table. + * Index hash is stored as HashKey*, because it is thrown into other Bro functions that need the complex structure of it. + * For everything we do (with values), we just take the hash_t value and compare it directly with == + */ struct InputHash { hash_t valhash; HashKey* idxkey; @@ -41,7 +46,10 @@ static void input_hash_delete_func(void* val) { declare(PDict, InputHash); -class Manager::Filter { +/** + * Base stuff that every stream can do + */ +class Manager::Stream { public: string name; string source; @@ -49,25 +57,25 @@ public: int mode; - FilterType filter_type; // to distinguish between event and table filters + StreamType filter_type; // to distinguish between event and table filters EnumVal* type; ReaderFrontend* reader; RecordVal* description; - Filter(); - virtual ~Filter(); + Stream(); + virtual ~Stream(); }; -Manager::Filter::Filter() { +Manager::Stream::Stream() { type = 0; reader = 0; description = 0; removed = false; } -Manager::Filter::~Filter() { +Manager::Stream::~Stream() { if ( type ) Unref(type); if ( description ) @@ -77,7 +85,7 @@ Manager::Filter::~Filter() { delete(reader); } -class Manager::TableFilter: public Manager::Filter { +class Manager::TableStream: public Manager::Stream { public: unsigned int num_idx_fields; @@ -96,11 +104,11 @@ public: EventHandlerPtr event; - TableFilter(); - ~TableFilter(); + TableStream(); + ~TableStream(); }; -class Manager::EventFilter: public Manager::Filter { +class Manager::EventStream: public Manager::Stream { public: EventHandlerPtr event; @@ -108,11 +116,11 @@ public: unsigned int num_fields; bool want_record; - EventFilter(); - ~EventFilter(); + EventStream(); + ~EventStream(); }; -Manager::TableFilter::TableFilter() : Manager::Filter::Filter() { +Manager::TableStream::TableStream() : Manager::Stream::Stream() { filter_type = TABLE_FILTER; tab = 0; @@ -125,18 +133,18 @@ Manager::TableFilter::TableFilter() : Manager::Filter::Filter() { pred = 0; } -Manager::EventFilter::EventFilter() : Manager::Filter::Filter() { +Manager::EventStream::EventStream() : Manager::Stream::Stream() { fields = 0; filter_type = EVENT_FILTER; } -Manager::EventFilter::~EventFilter() { +Manager::EventStream::~EventStream() { if ( fields ) { Unref(fields); } } -Manager::TableFilter::~TableFilter() { +Manager::TableStream::~TableStream() { if ( tab ) Unref(tab); if ( itype ) @@ -176,7 +184,7 @@ Manager::Manager() } Manager::~Manager() { - for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { + for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { delete s->second; delete s->first; } @@ -233,7 +241,7 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) } // create a new input reader object to be used at whomevers leisure lateron. -bool Manager::CreateStream(Filter* info, RecordVal* description) +bool Manager::CreateStream(Stream* info, RecordVal* description) { ReaderDefinition* ir = input_readers; @@ -249,7 +257,7 @@ bool Manager::CreateStream(Filter* info, RecordVal* description) Unref(name_val); { - Filter *i = FindFilter(name); + Stream *i = FindStream(name); if ( i != 0 ) { reporter->Error("Trying create already existing input stream %s", name.c_str()); return false; @@ -296,7 +304,7 @@ bool Manager::CreateEventStream(RecordVal* fval) { return false; } - EventFilter* filter = new EventFilter(); + EventStream* filter = new EventStream(); { bool res = CreateStream(filter, fval); if ( res == false ) { @@ -412,7 +420,7 @@ bool Manager::CreateTableStream(RecordVal* fval) { return false; } - TableFilter* filter = new TableFilter(); + TableStream* filter = new TableStream(); { bool res = CreateStream(filter, fval); if ( res == false ) { @@ -620,7 +628,7 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) bool Manager::RemoveStream(const string &name) { - Filter *i = FindFilter(name); + Stream *i = FindStream(name); if ( i == 0 ) { return false; // not found @@ -644,7 +652,7 @@ bool Manager::RemoveStream(const string &name) { } bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) { - Filter *i = FindFilter(reader); + Stream *i = FindStream(reader); if ( i == 0 ) { reporter->Error("Stream not found in RemoveStreamContinuation"); @@ -712,7 +720,7 @@ bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, co bool Manager::ForceUpdate(const string &name) { - Filter *i = FindFilter(name); + Stream *i = FindStream(name); if ( i == 0 ) { reporter->Error("Stream %s not found", name.c_str()); return false; @@ -786,7 +794,7 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) { - Filter *i = FindFilter(reader); + Stream *i = FindStream(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader in SendEntry"); return; @@ -797,7 +805,7 @@ void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) { readFields = SendEntryTable(i, vals); } else if ( i->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - readFields = SendEventFilterEvent(i, type, vals); + readFields = SendEventStreamEvent(i, type, vals); } else { assert(false); } @@ -808,13 +816,13 @@ void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) { delete [] vals; } -int Manager::SendEntryTable(Filter* i, const Value* const *vals) { +int Manager::SendEntryTable(Stream* i, const Value* const *vals) { bool updated = false; assert(i); assert(i->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i; + TableStream* filter = (TableStream*) i; HashKey* idxhash = HashValues(filter->num_idx_fields, vals); @@ -979,7 +987,7 @@ int Manager::SendEntryTable(Filter* i, const Value* const *vals) { void Manager::EndCurrentSend(ReaderFrontend* reader) { - Filter *i = FindFilter(reader); + Stream *i = FindStream(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader in EndCurrentSend"); return; @@ -996,7 +1004,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { } assert(i->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i; + TableStream* filter = (TableStream*) i; // lastdict contains all deleted entries and should be empty apart from that IterCookie *c = filter->lastDict->InitForIteration(); @@ -1083,7 +1091,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { } void Manager::Put(ReaderFrontend* reader, Value* *vals) { - Filter *i = FindFilter(reader); + Stream *i = FindStream(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader in Put"); return; @@ -1094,7 +1102,7 @@ void Manager::Put(ReaderFrontend* reader, Value* *vals) { readFields = PutTable(i, vals); } else if ( i->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - readFields = SendEventFilterEvent(i, type, vals); + readFields = SendEventStreamEvent(i, type, vals); } else { assert(false); } @@ -1106,11 +1114,11 @@ void Manager::Put(ReaderFrontend* reader, Value* *vals) { } -int Manager::SendEventFilterEvent(Filter* i, EnumVal* type, const Value* const *vals) { +int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const *vals) { assert(i); assert(i->filter_type == EVENT_FILTER); - EventFilter* filter = (EventFilter*) i; + EventStream* filter = (EventStream*) i; Val *val; list out_vals; @@ -1143,11 +1151,11 @@ int Manager::SendEventFilterEvent(Filter* i, EnumVal* type, const Value* const * } -int Manager::PutTable(Filter* i, const Value* const *vals) { +int Manager::PutTable(Stream* i, const Value* const *vals) { assert(i); assert(i->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i; + TableStream* filter = (TableStream*) i; Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); Val* valval; @@ -1244,7 +1252,7 @@ int Manager::PutTable(Filter* i, const Value* const *vals) { // Todo:: perhaps throw some kind of clear-event? void Manager::Clear(ReaderFrontend* reader) { - Filter *i = FindFilter(reader); + Stream *i = FindStream(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader in Clear"); return; @@ -1256,14 +1264,14 @@ void Manager::Clear(ReaderFrontend* reader) { #endif assert(i->filter_type == TABLE_FILTER); - TableFilter* filter = (TableFilter*) i; + TableStream* filter = (TableStream*) i; filter->tab->RemoveAll(); } // put interface: delete old entry from table. bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { - Filter *i = FindFilter(reader); + Stream *i = FindStream(reader); if ( i == 0 ) { reporter->InternalError("Unknown reader in Delete"); return false; @@ -1273,7 +1281,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { int readVals = 0; if ( i->filter_type == TABLE_FILTER ) { - TableFilter* filter = (TableFilter*) i; + TableStream* filter = (TableStream*) i; Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); assert(idxval != 0); readVals = filter->num_idx_fields + filter->num_val_fields; @@ -1320,7 +1328,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { } } else if ( i->filter_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - readVals = SendEventFilterEvent(i, type, vals); + readVals = SendEventStreamEvent(i, type, vals); success = true; } else { assert(false); @@ -1840,9 +1848,9 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { return NULL; } -Manager::Filter* Manager::FindFilter(const string &name) +Manager::Stream* Manager::FindStream(const string &name) { - for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) + for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { if ( (*s).second->name == name ) { @@ -1853,9 +1861,9 @@ Manager::Filter* Manager::FindFilter(const string &name) return 0; } -Manager::Filter* Manager::FindFilter(ReaderFrontend* reader) +Manager::Stream* Manager::FindStream(ReaderFrontend* reader) { - map::iterator s = readers.find(reader); + map::iterator s = readers.find(reader); if ( s != readers.end() ) { return s->second; } diff --git a/src/input/Manager.h b/src/input/Manager.h index c6dd40bd95..8f09828988 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -107,25 +107,25 @@ protected: // Functions are called from the ReaderBackend to notify the manager, that a filter has been removed // or a stream has been closed. // Used to prevent race conditions where data for a specific filter is still in the queue when the - // RemoveFilter directive is executed by the main thread. + // RemoveStream directive is executed by the main thread. // This makes sure all data that has ben queued for a filter is still received. bool RemoveStreamContinuation(ReaderFrontend* reader); private: - class Filter; - class TableFilter; - class EventFilter; + class Stream; + class TableStream; + class EventStream; - bool CreateStream(Filter*, RecordVal* description); + bool CreateStream(Stream*, RecordVal* description); // SendEntry implementation for Tablefilter - int SendEntryTable(Filter* i, const threading::Value* const *vals); + int SendEntryTable(Stream* i, const threading::Value* const *vals); // Put implementation for Tablefilter - int PutTable(Filter* i, const threading::Value* const *vals); + int PutTable(Stream* i, const threading::Value* const *vals); // SendEntry and Put implementation for Eventfilter - int SendEventFilterEvent(Filter* i, EnumVal* type, const threading::Value* const *vals); + int SendEventStreamEvent(Stream* i, EnumVal* type, const threading::Value* const *vals); // Checks is a bro type can be used for data reading. The equivalend in threading cannot be used, because we have support different types // from the log framework @@ -163,12 +163,12 @@ private: // Converts a Bro ListVal to a RecordVal given the record type RecordVal* ListValToRecordVal(ListVal* list, RecordType *request_type, int* position); - Filter* FindFilter(const string &name); - Filter* FindFilter(ReaderFrontend* reader); + Stream* FindStream(const string &name); + Stream* FindStream(ReaderFrontend* reader); - enum FilterType { TABLE_FILTER, EVENT_FILTER }; + enum StreamType { TABLE_FILTER, EVENT_FILTER }; - map readers; + map readers; }; From bfa2720a81efbab660fa34b7e382b81b10cb12c5 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sun, 15 Apr 2012 22:52:48 -0400 Subject: [PATCH 225/651] Removing QR flag from DNS log in response to question on mailing list. --- scripts/base/protocols/dns/main.bro | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index f73a947b5f..c50a8bdc54 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -39,8 +39,6 @@ export { rcode: count &log &optional; ## A descriptive name for the response code value. rcode_name: string &log &optional; - ## Whether the message is a query (F) or response (T). - QR: bool &log &default=F; ## The Authoritative Answer bit for response messages specifies that ## the responding name server is an authority for the domain name ## in the question section. From 48e05621c082983c8eb103499a4151cd320eedbd Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 16 Apr 2012 14:49:24 -0700 Subject: [PATCH 226/651] update description to current interface. But this needs to get longer... --- doc/input.rst | 128 +++++++++++++++++++++----------------------------- 1 file changed, 54 insertions(+), 74 deletions(-) diff --git a/doc/input.rst b/doc/input.rst index e201af9fed..d9fe8aa6b8 100644 --- a/doc/input.rst +++ b/doc/input.rst @@ -20,11 +20,9 @@ very similar to the abstracts used in the logging framework: Input Streams An input stream corresponds to a single input source (usually a textfile). It defined the information necessary - to find the source (e.g. the filename) - - Filters - Each input stream has a set of filters attached to it, that - determine exaclty what kind of information is read. + to find the source (e.g. the filename), the reader that it used + to get data from it (see below). + It also defines exactly what data is read from the input source. There are two different kind of streams, event streams and table streams. By default, event streams generate an event for each line read @@ -41,28 +39,37 @@ very similar to the abstracts used in the logging framework: one event per line. -Basics -====== +Event Streams +============= For examples, please look at the unit tests in ``testing/btest/scripts/base/frameworks/input/``. -A very basic example to open an input stream is: +Event Streams are streams that generate an event for each line in of the input source. + +For example, a simple stream retrieving the fields ``i`` and ``b`` from an inputSource +could be defined as follows: .. code:: bro + + type Val: record { + i: int; + b: bool; + }; + + event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) { + # work with event data + } - module Foo; - - export { - # Create an ID for our new stream - redef enum Input::ID += { INPUT }; + event bro_init { + Input::add_event([$source="input.log", $name="input", $fields=Val, $ev=line]); } - event bro_init() { - Input::create_stream(FOO::INPUT, [$source="input.log"]); - } +The fields that can be set for an event stream are: -The fields that can be set when creating a stream are: + ``want_record`` + Boolean value, that defines if the event wants to receive the fields inside of + a single record value, or individually (default). ``source`` A mandatory string identifying the source of the data. @@ -81,49 +88,9 @@ The fields that can be set when creating a stream are: ``STREAM`` means that the data from the file is streamed. Events / table entries will be generated as new data is added to the file. - ``autostart`` - If set to yes, the first update operation is triggered automatically after the first filter has been added to the stream. - This has to be set to false if several filters are added to the input source. - In this case Input::force_update has to be called manually once after all filters have been added. - -Filters -======= - -Each filter defines the data fields that it wants to receive from the respective -input file. Depending on the type of filter, events or a table are created from -the data in the source file. - -Event Filters -------------- - -Event filters are filters that generate an event for each line in of the input source. - -For example, a simple filter retrieving the fields ``i`` and ``b`` from an inputSource -could be defined as follows: - -.. code:: bro - - type Val: record { - i: int; - b: bool; - }; - - event line(tpe: Input::Event, i: int, b: bool) { - # work with event data - } - - event bro_init { - # Input stream definition, etc - ... - - Input::add_eventfilter(Foo::INPUT, [$name="input", $fields=Val, $ev=line]); - } - -The fields that can be set for an event filter are: - ``name`` - A mandatory name for the filter that can later be used - to manipulate it further. + A mandatory name for the stream that can later be used + to remove it. ``fields`` Name of a record type containing the fields, which should be retrieved from @@ -138,16 +105,14 @@ The fields that can be set for an event filter are: been ``CHANGED`` or ``DELETED``. Singe the ascii reader cannot track this information for event filters, the value is always ``NEW`` at the moment. - ``want_record`` - Boolean value, that defines if the event wants to receive the fields inside of - a single record value, or individually (default). -Table Filters -------------- -Table filters are the second, more complex type of filter. +Table Streams +============= -Table filters store the information they read from an input source in a bro table. For example, +Table streams are the second, more complex type of input streams. + +Table streams store the information they read from an input source in a bro table. For example, when reading a file that contains ip addresses and connection attemt information one could use an approach similar to this: @@ -164,18 +129,33 @@ an approach similar to this: global conn_attempts: table[addr] of count = table(); event bro_init { - # Input stream definitions, etc. - ... - - Input::add_tablefilter(Foo::INPUT, [$name="ssh", $idx=Idx, $val=Val, $destination=conn_attempts]); - - # read the file after all filters have been set (only needed if autostart is set to false) - Input::force_update(Foo::INPUT); + Input::add_table([$source="input.txt", $name="input", $idx=Idx, $val=Val, $destination=conn_attempts]); } The table conn_attempts will then contain the information about connection attemps. -The possible fields that can be set for an table filter are: +The possible fields that can be set for an table stream are: + + ``want_record`` + Boolean value, that defines if the event wants to receive the fields inside of + a single record value, or individually (default). + + ``source`` + A mandatory string identifying the source of the data. + For the ASCII reader this is the filename. + + ``reader`` + The reader used for this stream. Default is ``READER_ASCII``. + + ``mode`` + The mode in which the stream is opened. Possible values are ``MANUAL``, ``REREAD`` and ``STREAM``. + Default is ``MANUAL``. + ``MANUAL`` means, that the files is not updated after it has been read. Changes to the file will not + be reflected in the data bro knows. + ``REREAD`` means that the whole file is read again each time a change is found. This should be used for + files that are mapped to a table where individual lines can change. + ``STREAM`` means that the data from the file is streamed. Events / table entries will be generated as new + data is added to the file. ``name`` A mandatory name for the filter that can later be used From 891c53277501ab3e6c2dfa555859f4fda1a40486 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Apr 2012 14:48:33 -0700 Subject: [PATCH 227/651] DataSeries cleanup. --- src/Type.cc | 8 +- src/logging/writers/Ascii.cc | 17 +- src/logging/writers/DataSeries.cc | 267 ++++++++++++++---------------- src/logging/writers/DataSeries.h | 27 ++- src/threading/SerialTypes.cc | 14 ++ src/threading/SerialTypes.h | 10 +- 6 files changed, 162 insertions(+), 181 deletions(-) diff --git a/src/Type.cc b/src/Type.cc index 82221303af..d688b15376 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -15,10 +15,9 @@ extern int generate_documentation; +// Note: This function must be thread-safe. const char* type_name(TypeTag t) { - static char errbuf[512]; - static const char* type_names[int(NUM_TYPES)] = { "void", "bool", "int", "count", "counter", @@ -37,10 +36,7 @@ const char* type_name(TypeTag t) }; if ( int(t) >= NUM_TYPES ) - { - snprintf(errbuf, sizeof(errbuf), "%d: not a type tag", int(t)); - return errbuf; - } + return "type_name(): not a type tag"; return type_names[int(t)]; } diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 2f25ac418f..3a35eea380 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -86,6 +86,9 @@ bool Ascii::DoInit(string path, int num_fields, const Field* const * fields) if ( include_header ) { + string names; + string types; + string str = string(header_prefix, header_prefix_len) + "separator " // Always use space as separator here. + get_escaped_string(string(separator, separator_len), false) @@ -103,9 +106,6 @@ bool Ascii::DoInit(string path, int num_fields, const Field* const * fields) WriteHeaderField("path", get_escaped_string(path, false))) ) goto write_error; - string names; - string types; - for ( int i = 0; i < num_fields; ++i ) { if ( i > 0 ) @@ -114,15 +114,8 @@ bool Ascii::DoInit(string path, int num_fields, const Field* const * fields) types += string(separator, separator_len); } - const Field* field = fields[i]; - names += field->name; - types += type_name(field->type); - if ( (field->type == TYPE_TABLE) || (field->type == TYPE_VECTOR) ) - { - types += "["; - types += type_name(field->subtype); - types += "]"; - } + names += fields[i]->name; + types += fields[i]->TypeName(); } if ( ! (WriteHeaderField("fields", names) diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 5ee8a812da..f6b26dc494 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -15,17 +15,15 @@ using namespace logging; using namespace writer; std::string DataSeries::LogValueToString(threading::Value *val) -{ - const int strsz = 1024; - char strbuf[strsz]; - - // In some cases, no value is attached. If this is the case, return an empty string. - if(!val->present) + { + // In some cases, no value is attached. If this is the case, return + // an empty string. + if( ! val->present ) return ""; std::ostringstream ostr; - switch(val->type) - { + + switch(val->type) { case TYPE_BOOL: return (val->val.int_val ? "true" : "false"); @@ -40,19 +38,22 @@ std::string DataSeries::LogValueToString(threading::Value *val) return ostr.str(); case TYPE_SUBNET: - ostr << Render(val->val.subnet_val); + ostr << Render(val->val.subnet_val); return ostr.str(); case TYPE_ADDR: - ostr << Render(val->val.addr_val); + ostr << Render(val->val.addr_val); return ostr.str(); - // Note: These two cases are relatively special. We need to convert these values into their integer equivalents - // to maximize precision. At the moment, there won't be a noticeable effect (Bro uses the double format everywhere - // internally, so we've already lost the precision we'd gain here), but timestamps may eventually switch to this - // representation within Bro. + // Note: These two cases are relatively special. We need to convert + // these values into their integer equivalents to maximize precision. + // At the moment, there won't be a noticeable effect (Bro uses the + // double format everywhere internally, so we've already lost the + // precision we'd gain here), but timestamps may eventually switch to + // this representation within Bro. // - // in the near-term, this *should* lead to better pack_relative (and thus smaller output files). + // In the near-term, this *should* lead to better pack_relative (and + // thus smaller output files). case TYPE_TIME: case TYPE_INTERVAL: if ( ds_use_integer_for_time ) @@ -69,59 +70,57 @@ std::string DataSeries::LogValueToString(threading::Value *val) case TYPE_ENUM: case TYPE_STRING: case TYPE_FILE: - { - int size = val->val.string_val->size(); - string tmpString = ""; - if(size) - tmpString = string(val->val.string_val->data(), val->val.string_val->size()); - else - tmpString = string(""); - return tmpString; - } - case TYPE_TABLE: - { - if ( ! val->val.set_val.size ) - { + case TYPE_FUNC: + if ( ! val->val.string_val->size() ) + return ""; + + return string(val->val.string_val->data(), val->val.string_val->size()); + + case TYPE_TABLE: + { + if ( ! val->val.set_val.size ) return ""; - } string tmpString = ""; + for ( int j = 0; j < val->val.set_val.size; j++ ) { if ( j > 0 ) - tmpString += ":"; //TODO: Specify set separator char in configuration. + tmpString += ds_set_separator; tmpString += LogValueToString(val->val.set_val.vals[j]); } + return tmpString; - } + } + case TYPE_VECTOR: - { + { if ( ! val->val.vector_val.size ) - { return ""; - } string tmpString = ""; + for ( int j = 0; j < val->val.vector_val.size; j++ ) { if ( j > 0 ) - tmpString += ":"; //TODO: Specify set separator char in configuration. + tmpString += ds_set_separator; tmpString += LogValueToString(val->val.vector_val.vals[j]); } return tmpString; - } + } + default: - return "???"; + InternalError(Fmt("unknown type %s in DataSeries::LogValueToString", type_name(val->type))); + return "cannot be reached"; } } string DataSeries::GetDSFieldType(const threading::Field *field) { - switch(field->type) - { + switch(field->type) { case TYPE_BOOL: return "bool"; @@ -145,75 +144,49 @@ string DataSeries::GetDSFieldType(const threading::Field *field) case TYPE_FILE: case TYPE_TABLE: case TYPE_VECTOR: - default: + case TYPE_FUNC: return "variable32"; - } -} - -string DataSeries::GetBroTypeString(const threading::Field *field) -{ - switch(field->type) - { - case TYPE_BOOL: - return "bool"; - case TYPE_COUNT: - return "count"; - case TYPE_COUNTER: - return "counter"; - case TYPE_PORT: - return "port"; - case TYPE_INT: - return "int"; - case TYPE_TIME: - return "time"; - case TYPE_INTERVAL: - return "interval"; - case TYPE_DOUBLE: - return "double"; - case TYPE_SUBNET: - return "subnet"; - case TYPE_ADDR: - return "addr"; - case TYPE_ENUM: - return "enum"; - case TYPE_STRING: - return "string"; - case TYPE_FILE: - return "file"; - case TYPE_TABLE: - return "table"; - case TYPE_VECTOR: - return "vector"; default: - return "???"; + InternalError(Fmt("unknown type %s in DataSeries::GetDSFieldType", type_name(field->type))); + return "cannot be reached"; } } string DataSeries::BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle) -{ - if("" == sTitle) - { + { + if( ! sTitle.size() ) sTitle = "GenericBroStream"; - } - string xmlschema; - xmlschema = "\n"; - for(size_t i = 0; i < vals.size(); ++i) + + string xmlschema = "\n"; + + for( size_t i = 0; i < vals.size(); ++i ) { - xmlschema += "\t\n"; + xmlschema += "\t\n"; } + xmlschema += "\n"; - for(size_t i = 0; i < vals.size(); ++i) + + for( size_t i = 0; i < vals.size(); ++i ) { - xmlschema += "\n"; + xmlschema += "\n"; } + return xmlschema; } std::string DataSeries::GetDSOptionsForType(const threading::Field *field) { - switch(field->type) - { + switch( field->type ) { case TYPE_TIME: case TYPE_INTERVAL: { @@ -233,6 +206,7 @@ std::string DataSeries::GetDSOptionsForType(const threading::Field *field) case TYPE_TABLE: case TYPE_VECTOR: return "pack_unique=\"yes\""; + default: return ""; } @@ -242,11 +216,13 @@ std::string DataSeries::GetDSOptionsForType(const threading::Field *field) DataSeries::DataSeries(WriterFrontend* frontend) : WriterBackend(frontend) { - ds_compression = string((const char *)BifConst::LogDataSeries::compression->Bytes(), BifConst::LogDataSeries::compression->Len()); + ds_compression = string((const char *)BifConst::LogDataSeries::compression->Bytes(), + BifConst::LogDataSeries::compression->Len()); ds_dump_schema = BifConst::LogDataSeries::dump_schema; ds_extent_size = BifConst::LogDataSeries::extent_size; ds_num_threads = BifConst::LogDataSeries::num_threads; ds_use_integer_for_time = BifConst::LogDataSeries::use_integer_for_time; + ds_set_separator = ","; } DataSeries::~DataSeries() @@ -258,20 +234,23 @@ bool DataSeries::OpenLog(string path) log_file = new DataSeriesSink(path + ".ds", compress_type); log_file->writeExtentLibrary(log_types); - for(size_t i = 0; i < schema_list.size(); ++i) - extents.insert(std::make_pair(schema_list[i].field_name, GeneralField::create(log_series, schema_list[i].field_name))); + for( size_t i = 0; i < schema_list.size(); ++i ) + extents.insert(std::make_pair(schema_list[i].field_name, + GeneralField::create(log_series, schema_list[i].field_name))); - if(ds_extent_size < ROW_MIN) + if ( ds_extent_size < ROW_MIN ) { - fprintf(stderr, "%d is not a valid value for 'rows'. Using min of %d instead.\n", (int)ds_extent_size, (int)ROW_MIN); - ds_extent_size = ROW_MIN; + Warning(Fmt("%d is not a valid value for 'rows'. Using min of %d instead", (int)ds_extent_size, (int)ROW_MIN)); + ds_extent_size = ROW_MIN; } - else if(ds_extent_size > ROW_MAX) + + else if( ds_extent_size > ROW_MAX ) { - fprintf(stderr, "%d is not a valid value for 'rows'. Using max of %d instead.\n", (int)ds_extent_size, (int)ROW_MAX); - ds_extent_size = ROW_MAX; + Warning(Fmt("%d is not a valid value for 'rows'. Using max of %d instead", (int)ds_extent_size, (int)ROW_MAX)); + ds_extent_size = ROW_MAX; } - log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); + + log_output = new OutputModule(*log_file, log_series, *log_type, ds_extent_size); return true; } @@ -283,22 +262,22 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con // use that schema to build our output logfile and prepare it to be // written to. - // Note: compressor count must be set *BEFORE* DataSeriesSink is instantiated. - if(ds_num_threads < THREAD_MIN && ds_num_threads != 0) + // Note: compressor count must be set *BEFORE* DataSeriesSink is + // instantiated. + if( ds_num_threads < THREAD_MIN && ds_num_threads != 0 ) { - fprintf(stderr, "%d is too few threads! Using %d instead\n", (int)ds_num_threads, (int)THREAD_MIN); + Warning(Fmt("%d is too few threads! Using %d instead", (int)ds_num_threads, (int)THREAD_MIN)); ds_num_threads = THREAD_MIN; } - if(ds_num_threads > THREAD_MAX) + + if( ds_num_threads > THREAD_MAX ) { - fprintf(stderr, "%d is too many threads! Dropping back to %d\n", (int)ds_num_threads, (int)THREAD_MAX); + Warning(Fmt("%d is too many threads! Dropping back to %d", (int)ds_num_threads, (int)THREAD_MAX)); ds_num_threads = THREAD_MAX; } - if(ds_num_threads > 0) - { + if( ds_num_threads > 0 ) DataSeriesSink::setCompressorCount(ds_num_threads); - } for ( int i = 0; i < num_fields; i++ ) { @@ -307,65 +286,59 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con val.ds_type = GetDSFieldType(field); val.field_name = string(field->name); val.field_options = GetDSOptionsForType(field); - val.bro_type = GetBroTypeString(field); + val.bro_type = field->TypeName(); schema_list.push_back(val); } + string schema = BuildDSSchemaFromFieldTypes(schema_list, path); - if(ds_dump_schema) + + if( ds_dump_schema ) { - FILE * pFile; - pFile = fopen ( string(path + ".ds.xml").c_str() , "wb" ); - if(NULL == pFile) + FILE* pFile = fopen ( string(path + ".ds.xml").c_str() , "wb" ); + + if( pFile ) { - perror("Could not dump schema"); + fwrite(schema.c_str(), 1, schema.length(), pFile); + fclose(pFile); } - fwrite (schema.c_str(), 1 , schema.length() , pFile ); - fclose (pFile); + + else + Error(Fmt("cannot dump schema: %s", strerror(errno))); } compress_type = Extent::compress_all; - if(ds_compression == "lzf") - { + if( ds_compression == "lzf" ) compress_type = Extent::compress_lzf; - } - else if(ds_compression == "lzo") - { + + else if( ds_compression == "lzo" ) compress_type = Extent::compress_lzo; - } - else if(ds_compression == "gz") - { + + else if( ds_compression == "gz" ) compress_type = Extent::compress_gz; - } - else if(ds_compression == "bz2") - { + + else if( ds_compression == "bz2" ) compress_type = Extent::compress_bz2; - } - else if(ds_compression == "none") - { + + else if( ds_compression == "none" ) compress_type = Extent::compress_none; - } - else if(ds_compression == "any") - { + + else if( ds_compression == "any" ) compress_type = Extent::compress_all; - } + else - { - fprintf(stderr, "%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'\n", ds_compression.c_str()); - fprintf(stderr, "Defaulting to 'any'\n"); - } + Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); log_type = const_cast(log_types.registerType(schema)); - log_series.setType(*log_type); return OpenLog(path); - } bool DataSeries::DoFlush() { - // Flushing is handled by DataSeries automatically, so this function doesn't do anything. + // Flushing is handled by DataSeries automatically, so this function + // doesn't do anything. return true; } @@ -377,7 +350,7 @@ void DataSeries::CloseLog() extents.clear(); // Don't delete the file before you delete the output, or bad things - // happen. + // will happen. delete log_output; delete log_file; @@ -396,14 +369,17 @@ bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, threading::Value** vals) { log_output->newRecord(); - for(size_t i = 0; i < (size_t)num_fields; ++i) + + for( size_t i = 0; i < (size_t)num_fields; ++i ) { ExtentIterator iter = extents.find(fields[i]->name); assert(iter != extents.end()); + if( iter != extents.end() ) { GeneralField *cField = iter->second; - if(vals[i]->present) + + if( vals[i]->present ) cField->set(LogValueToString(vals[i])); } } @@ -413,7 +389,8 @@ bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, bool DataSeries::DoRotate(string rotated_path, double open, double close, bool terminating) { - // Note that if DS files are rotated too often, the aggregate log size will be (much) larger. + // Note that if DS files are rotated too often, the aggregate log + // size will be (much) larger. CloseLog(); string dsname = Path() + ".ds"; diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index 319cb72ec5..5faa87e1b2 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -6,13 +6,13 @@ #ifndef LOGGING_WRITER_DATA_SERIES_H #define LOGGING_WRITER_DATA_SERIES_H -#include "../WriterBackend.h" - #include #include #include #include +#include "../WriterBackend.h" + namespace logging { namespace writer { class DataSeries : public WriterBackend { @@ -24,6 +24,8 @@ public: { return new DataSeries(frontend); } protected: + // Overidden from WriterBackend. + virtual bool DoInit(string path, int num_fields, const threading::Field* const * fields); @@ -36,11 +38,11 @@ protected: virtual bool DoFinish(); private: - static const size_t ROW_MIN = 2048; // Minimum extent size. - static const size_t ROW_MAX = (1024 * 1024 * 100); // Maximum extent size. - static const size_t THREAD_MIN = 1; // Minimum number of compression threads that DataSeries may spawn. - static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. - static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. + static const size_t ROW_MIN = 2048; // Minimum extent size. + static const size_t ROW_MAX = (1024 * 1024 * 100); // Maximum extent size. + static const size_t THREAD_MIN = 1; // Minimum number of compression threads that DataSeries may spawn. + static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. + static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. struct SchemaValue { @@ -85,18 +87,10 @@ private: */ string BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle); - /** - * Takes a field type and converts it to a readable string. - * - * @param field We extract the type from this and convert it into a readable string. - * @return String representation of the field's type - */ - string GetBroTypeString(const threading::Field *field); - /** Closes the currently open file. */ void CloseLog(); - /** XXX */ + /** Opens a new file. */ bool OpenLog(string path); typedef std::map ExtentMap; @@ -119,6 +113,7 @@ private: string ds_compression; bool ds_dump_schema; bool ds_use_integer_for_time; + string ds_set_separator; }; } diff --git a/src/threading/SerialTypes.cc b/src/threading/SerialTypes.cc index a5692b2ffd..5ab61b0d41 100644 --- a/src/threading/SerialTypes.cc +++ b/src/threading/SerialTypes.cc @@ -24,6 +24,20 @@ bool Field::Write(SerializationFormat* fmt) const return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); } +string Field::TypeName() const + { + string n = type_name(type); + + if ( (type == TYPE_TABLE) || (type == TYPE_VECTOR) ) + { + n += "["; + n += type_name(subtype); + n += "]"; + } + + return n; + } + Value::~Value() { if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index db7dc837bd..eee3b750fe 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -53,6 +53,12 @@ struct Field { * @return False if an error occured. */ bool Write(SerializationFormat* fmt) const; + + /** + * Returns a textual description of the field's type. This method is + * thread-safe. + */ + string TypeName() const; }; /** @@ -132,8 +138,8 @@ struct Value { /** * Returns true if the type can be represented by a Value. If - * `atomic_only` is true, will not permit composite types. - */ + * `atomic_only` is true, will not permit composite types. This + * method is thread-safe. */ static bool IsCompatibleType(BroType* t, bool atomic_only=false); private: From 08593c5147157511c5ca54872ad58c15dfd87431 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Apr 2012 15:19:14 -0700 Subject: [PATCH 228/651] In threads, an internal error now immediately aborts. Otherwise, the error won't make it back to the main thread for a while and subsequent code in the thread would still execute. --- src/threading/MsgThread.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 0b91f8790a..c5777042f3 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -222,7 +222,9 @@ void MsgThread::InternalWarning(const char* msg) void MsgThread::InternalError(const char* msg) { - SendOut(new ReporterMessage(ReporterMessage::INTERNAL_ERROR, this, msg)); + // This one aborts immediately. + fprintf(stderr, "internal error in thread: %s\n", msg); + abort(); } #ifdef DEBUG From 91a3ce951812083dc017116f080fbdd7c3d2ea1b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Apr 2012 15:20:10 -0700 Subject: [PATCH 229/651] Additional test output canonification for ds2txt's timestamps. --- .../ssh.ds.txt | 10 +++++----- .../http.ds.txt | 12 ++++++------ .../frameworks/logging/dataseries/test-logging.bro | 2 +- testing/external/subdir-btest.cfg | 2 +- testing/scripts/diff-remove-timestamps-dataseries | 6 ++++++ 5 files changed, 19 insertions(+), 13 deletions(-) create mode 100755 testing/scripts/diff-remove-timestamps-dataseries diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt index f66f40b701..05026a24ef 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt @@ -31,11 +31,11 @@ extent offset ExtentType 604 DataSeries: ExtentIndex # Extent, type='ssh' t id.orig_h id.orig_p id.resp_h id.resp_p status country -1.334e+09 1.2.3.4 1234 2.3.4.5 80 success unknown -1.334e+09 1.2.3.4 1234 2.3.4.5 80 failure US -1.334e+09 1.2.3.4 1234 2.3.4.5 80 failure UK -1.334e+09 1.2.3.4 1234 2.3.4.5 80 success BR -1.334e+09 1.2.3.4 1234 2.3.4.5 80 failure MX +X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 success unknown +X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 failure US +X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 failure UK +X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 success BR +X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 failure MX # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt index 49e431085c..a0c6cbbff3 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt @@ -55,18 +55,18 @@ - + - + extent offset ExtentType 40 DataSeries: XmlType -756 http -1144 DataSeries: ExtentIndex +768 http +1156 DataSeries: ExtentIndex # Extent, type='http' ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file 1.3e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 @@ -86,5 +86,5 @@ ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri refer # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -756 http -1144 DataSeries: ExtentIndex +768 http +1156 DataSeries: ExtentIndex diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro index c7f8a5618f..76f2451477 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro @@ -2,7 +2,7 @@ # @TEST-REQUIRES: has-writer DataSeries && which ds2txt # # @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt ssh.ds >ssh.ds.txt +# @TEST-EXEC: ds2txt ssh.ds | ${SCRIPTS}/diff-remove-timestamps-dataseries >ssh.ds.txt # @TEST-EXEC: btest-diff ssh.ds.txt module SSH; diff --git a/testing/external/subdir-btest.cfg b/testing/external/subdir-btest.cfg index c4e74f99fa..fba89fb724 100644 --- a/testing/external/subdir-btest.cfg +++ b/testing/external/subdir-btest.cfg @@ -10,7 +10,7 @@ BROPATH=`bash -c %(testbase)s/../../../build/bro-path-dev`:%(testbase)s/../scrip BRO_SEED_FILE=%(testbase)s/../random.seed TZ=UTC LC_ALL=C -PATH=%(testbase)s/../../../build/src:%(testbase)s/../../../aux/btest:%(default_path)s +PATH=%(testbase)s/../../../build/src:%(testbase)s/../../../aux/btest:%(testbase)s/../../scripts:%(default_path)s TEST_DIFF_CANONIFIER=%(testbase)s/../../scripts/diff-canonifier-external TEST_DIFF_BRIEF=1 TRACES=%(testbase)s/Traces diff --git a/testing/scripts/diff-remove-timestamps-dataseries b/testing/scripts/diff-remove-timestamps-dataseries new file mode 100755 index 0000000000..5b20f138af --- /dev/null +++ b/testing/scripts/diff-remove-timestamps-dataseries @@ -0,0 +1,6 @@ +#! /usr/bin/env bash +# +# Replace anything which looks like DataSeries timestamps (which is a double) with XXXs. + +sed 's/1\.[0-9]*e+09/X.XXXe+09/g' + From d1c6183620aa8ee73cd52ae8ac98b90213d093d8 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Apr 2012 16:07:38 -0700 Subject: [PATCH 230/651] Starting DataSeries HowTo. --- doc/logging-dataseries.rst | 102 +++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 doc/logging-dataseries.rst diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst new file mode 100644 index 0000000000..5289bbaea9 --- /dev/null +++ b/doc/logging-dataseries.rst @@ -0,0 +1,102 @@ + +============================= +Binary Output with DataSeries +============================= + +.. rst-class:: opening + + Bro's default ASCII log format is not exactly the most efficient + way for storing large volumes of data. An an alternative, Bro comes + with experimental support for `DataSeries + `_ + output, an efficient binary format for recording structured bulk + data. DataSeries is developed and maintained at HP Labs. + +.. contents:: + +Installing DataSeries +--------------------- + +To use DataSeries, its libraries must be available at compile-time, +along with the supporting *Lintel* package. Generally, both are +distributed on `HP Labs' web site +`_. Currently, however, you need +to use recent developments of both packages with Bro, which you can +download from github like this:: + + git clone http://github.com/eric-anderson/Lintel + git clone http://github.com/eric-anderson/DataSeries + +To then build and install the two into ````, do:: + + ( cd Lintel && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) + ( cd DataSeries && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) + +Please refer to the packages' documentation for more information about +the installation process. In particular, there's more information on +required and optional `dependencies for Lintel +`_ +and `dependencies for DataSeries +`_ + +Compiling Bro with DataSeries Support +------------------------------------- + +Once you have installed DataSeries, Bro's ``configure`` should pick it +up automatically as long as it finds it in a standard system location. +Alternatively, you can specify the DataSeries installation prefix +manually with ``--with-dataseries=``. Keep an eye on +``configure``'s summary output, if it looks like this, Bro will indeed +compile in the DataSeries support:: + + # ./configure --with-dataseries=/usr/local + [...] + ====================| Bro Build Summary |===================== + [...] + DataSeries: true + [...] + ================================================================ + +Activating DataSeries +--------------------- + +The direct way to use DataSeries is to switch *all* log files over to +the binary format. To do that, just add ``redef +Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro`. +For testing, you can also just pass that on the command line:: + + bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES + +With that, Bro will now write all its output into DataSeries files +``*.ds``. You can inspect these using DataSeries's set of command line +tools, which its installation process will have installed into +``/bin``. For example, to convert a file back into an ASCII +representation:: + # ds2txt conn .log + [... We skip a bunch of meta data here ...] + ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts res + 1.3e+09 9CqElRsB9Q 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 + 1.3e+09 3bNPfUWuIhb fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 + 1.3e+09 ZoDDN7YuYx3 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 + [...] + +Note that is ASCII format is *not* equivalent to Bro's default format +as DataSeries uses a different internal representation. + +You can also switch only individual files over to DataSeries by adding +code like this to your ``local.bro``:: + + TODO + +Bro's DataSeries writer comes with a few tuning options, see +:doc:`scripts/base/frameworks/logging/writers/dataseries`. + +Working with DataSeries +======================= + +Here are few examples of using DataSeries command line tools to work +with the output files. + +TODO. + + From 18f5018a0d6cb7e3595fb186d58016b9c2ce2843 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sun, 15 Apr 2012 22:52:48 -0400 Subject: [PATCH 231/651] Removing QR flag from DNS log in response to question on mailing list. --- scripts/base/protocols/dns/main.bro | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index f73a947b5f..c50a8bdc54 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -39,8 +39,6 @@ export { rcode: count &log &optional; ## A descriptive name for the response code value. rcode_name: string &log &optional; - ## Whether the message is a query (F) or response (T). - QR: bool &log &default=F; ## The Authoritative Answer bit for response messages specifies that ## the responding name server is an authority for the domain name ## in the question section. From fe2535b08ddc55203df035c74f0205b2acb61061 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Apr 2012 18:08:16 -0700 Subject: [PATCH 232/651] Updating baselines for DNS change. --- testing/btest/Baseline/core.ipv6-frag/dns.log | 8 ++++---- .../scripts.policy.protocols.dns.event-priority/dns.log | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/testing/btest/Baseline/core.ipv6-frag/dns.log b/testing/btest/Baseline/core.ipv6-frag/dns.log index 50c9684bac..251f35d789 100644 --- a/testing/btest/Baseline/core.ipv6-frag/dns.log +++ b/testing/btest/Baseline/core.ipv6-frag/dns.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path dns -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name QR AA TC RD RA Z answers TTLs -#types time string addr port addr port enum count string count string count string count string bool bool bool bool bool count vector[string] vector[interval] -1331084278.438444 UWkUyAuUGXf 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51850 2607:f740:b::f93 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR F T F T F 0 This TXT record should be ignored 1.000000 -1331084293.592245 arKYeMETxOg 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51851 2607:f740:b::f93 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR F T F T F 0 This TXT record should be ignored 1.000000 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs +#types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] +1331084278.438444 UWkUyAuUGXf 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51850 2607:f740:b::f93 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 +1331084293.592245 arKYeMETxOg 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51851 2607:f740:b::f93 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 diff --git a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log b/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log index 9d80898e0f..f636093677 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log +++ b/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log @@ -3,6 +3,6 @@ #empty_field (empty) #unset_field - #path dns -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name QR AA TC RD RA Z answers TTLs auth addl -#types time string addr port addr port enum count string count string count string count string bool bool bool bool bool count vector[string] vector[interval] table[string] table[string] -930613226.529070 UWkUyAuUGXf 212.180.42.100 25000 131.243.64.3 53 tcp 34798 - - - - - 0 NOERROR F F F F T 0 4.3.2.1 31337.000000 - - +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs auth addl +#types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] table[string] table[string] +930613226.529070 UWkUyAuUGXf 212.180.42.100 25000 131.243.64.3 53 tcp 34798 - - - - - 0 NOERROR F F F T 0 4.3.2.1 31337.000000 - - From fede289d74e1e651ea209b938c8a51204582a155 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Apr 2012 18:12:25 -0700 Subject: [PATCH 233/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- aux/btest | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/broctl b/aux/broctl index 2524b3aeda..d50e0efe13 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 2524b3aeda916b5daaad215c42860c1477c2606b +Subproject commit d50e0efe133c50d824753c86d068467e54a3c47d diff --git a/aux/btest b/aux/btest index 8da6c55697..1897d224ce 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 8da6c55697ff580600cfff474f4ccba2a592f911 +Subproject commit 1897d224ce295e91d20e458851759c99734a0a74 From f85e0bfe9a97daacbe1d5011834ebe6289d9abf8 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Apr 2012 18:15:05 -0700 Subject: [PATCH 234/651] DataSeries TODO list with open issues/questions. --- doc/logging-dataseries.rst | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index 5289bbaea9..e530ba7c0b 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -99,4 +99,32 @@ with the output files. TODO. +TODO +==== + +* I'm seeing lots of warning on stderr:: + + Warning, while packing field ts of record 1, error was > 10%: + (1334620000 / 1000000 = 1334.62, round() = 1335) + Warning, while packing field not_valid_after of record 11, error was > 10%: + (1346460000 / 1000000 = 1346.46, round() = 1346) + +* The compiler warn about a depracated method and I'm not immediately + seeing how to avoid using that. + +* For testing our script-level options: + + - Can we get the extentsize from a ``.ds`` file? + - Can we get the compressio level from a ``.ds`` file? + +* ds2txt can apparently not read a file that is currently being + written. That's not good for the spool directory:: + + # ds2txt http.ds + **** Assertion failure in file + /DataSeriesSink.cpp, line 301 + **** Failed expression: tail[i] == 0xFF + **** Details: bad header for the tail of http.ds! + + Can that be worked around? From b3596f28d7d865763d554cceac2d77f5bffd2b99 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 17 Apr 2012 17:40:39 -0700 Subject: [PATCH 235/651] Updating submodule(s). [nomail] --- CHANGES | 7 +++++++ VERSION | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- 5 files changed, 11 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index 96f943d207..adebfff084 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,11 @@ +2.0-281 | 2012-04-17 17:40:39 -0700 + + * Small updates for the bittorrent analyzer to support 64bit types + in binpac. (Seth Hall) + + * Removed the attempt at bittorrent resynchronization. (Seth Hall) + 2.0-276 | 2012-04-17 17:35:56 -0700 * Add more support for 's that lack some structure diff --git a/VERSION b/VERSION index 04d66ce0ad..e628d94e94 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-276 +2.0-281 diff --git a/aux/bro-aux b/aux/bro-aux index 12d32194c1..d885987e79 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 12d32194c19d2dce06818588a2aeccf234de1889 +Subproject commit d885987e7968669e34504b0403ac89bd13928e9a diff --git a/aux/broccoli b/aux/broccoli index 60898666ba..bead1168ae 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 60898666ba1df1913c08ad5045b1e56f974060cc +Subproject commit bead1168ae9c2d2ae216dd58522fbc05498ff2c8 diff --git a/aux/broctl b/aux/broctl index d50e0efe13..44cc3de5f6 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit d50e0efe133c50d824753c86d068467e54a3c47d +Subproject commit 44cc3de5f6f98a86b2516bdc48dd168e6a6a28fd From 94c666f30523520f220147ac303160b74bf9f268 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 17 Apr 2012 17:42:38 -0700 Subject: [PATCH 236/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/binpac b/aux/binpac index bdc1cb65b4..71c37019bc 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit bdc1cb65b49c75d171eac58335a763f74a5bf930 +Subproject commit 71c37019bc371eb7863fb6aa47a7daa4540f4f1f From b933184b2590edc6e835bc93466e682e2318acc8 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 18 Apr 2012 13:13:56 -0500 Subject: [PATCH 237/651] Changes related to ICMPv6 Neighbor Discovery messages. - The 'icmp_conn' record now contains an 'hlim' field since hop limit in the IP header is an interesting field for at least these ND messages. - Changed 'icmp_router_advertisement' event parameters. 'router_lifetime' is now an interval. Fix 'reachable_time' and 'retrans_timer' using wrong internal Val type for intervals. Made more of the known router advertisement flags available through boolean parameters. - Changed 'icmp_neighbor_advertisement' event parameters to add more of the known boolean flags. --- scripts/base/init-bare.bro | 1 + src/ICMP.cc | 61 +++++++++++------- src/ICMP.h | 6 +- src/event.bif | 35 +++++++++- .../Baseline/core.icmp.icmp-context/output | 6 +- .../Baseline/core.icmp.icmp-events/output | 12 ++-- .../Baseline/core.icmp.icmp6-context/output | 8 +-- .../Baseline/core.icmp.icmp6-events/output | 49 ++++++++------ .../Traces/icmp/icmp6-router-advert.pcap | Bin 110 -> 110 bytes testing/btest/core/icmp/icmp6-events.test | 19 +++++- 10 files changed, 134 insertions(+), 63 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index a40443edb9..8f428b8549 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -92,6 +92,7 @@ type icmp_conn: record { itype: count; ##< The ICMP type of the packet that triggered the instantiation of the record. icode: count; ##< The ICMP code of the packet that triggered the instantiation of the record. len: count; ##< The length of the ICMP payload of the packet that triggered the instantiation of the record. + hlim: count; ##< The encapsulating IP header's Hop Limit value. v6: bool; ##< True if it's an ICMPv6 packet. }; diff --git a/src/ICMP.cc b/src/ICMP.cc index 5e1eeb66e4..dd2108ebf0 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -131,7 +131,7 @@ void ICMP_Analyzer::NextICMP4(double t, const struct icmp* icmpp, int len, int c break; default: - ICMPEvent(icmp_sent, icmpp, len, 0); + ICMPEvent(icmp_sent, icmpp, len, 0, ip_hdr); break; } } @@ -181,23 +181,25 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c case MLD_LISTENER_REDUCTION: #endif default: - ICMPEvent(icmp_sent, icmpp, len, 1); + ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); break; } } -void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, int icmpv6) +void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, + int len, int icmpv6, const IP_Hdr* ip_hdr) { if ( ! f ) return; val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, icmpv6)); + vl->append(BuildICMPVal(icmpp, len, icmpv6, ip_hdr)); ConnectionEvent(f, vl); } -RecordVal* ICMP_Analyzer::BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6) +RecordVal* ICMP_Analyzer::BuildICMPVal(const struct icmp* icmpp, int len, + int icmpv6, const IP_Hdr* ip_hdr) { if ( ! icmp_conn_val ) { @@ -208,7 +210,8 @@ RecordVal* ICMP_Analyzer::BuildICMPVal(const struct icmp* icmpp, int len, int ic icmp_conn_val->Assign(2, new Val(icmpp->icmp_type, TYPE_COUNT)); icmp_conn_val->Assign(3, new Val(icmpp->icmp_code, TYPE_COUNT)); icmp_conn_val->Assign(4, new Val(len, TYPE_COUNT)); - icmp_conn_val->Assign(5, new Val(icmpv6, TYPE_BOOL)); + icmp_conn_val->Assign(5, new Val(ip_hdr->TTL(), TYPE_COUNT)); + icmp_conn_val->Assign(6, new Val(icmpv6, TYPE_BOOL)); } Ref(icmp_conn_val); @@ -494,7 +497,7 @@ void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, ip_hdr->NextProto() != IPPROTO_ICMP)); + vl->append(BuildICMPVal(icmpp, len, ip_hdr->NextProto() != IPPROTO_ICMP, ip_hdr)); vl->append(new Val(iid, TYPE_COUNT)); vl->append(new Val(iseq, TYPE_COUNT)); vl->append(new StringVal(payload)); @@ -504,7 +507,7 @@ void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_router_advertisement; uint32 reachable, retrans; @@ -514,19 +517,24 @@ void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); - vl->append(new Val(icmpp->icmp_num_addrs, TYPE_COUNT)); - vl->append(new Val(icmpp->icmp_wpa & 0x80, TYPE_BOOL)); - vl->append(new Val(htons(icmpp->icmp_lifetime), TYPE_COUNT)); - vl->append(new Val(reachable, TYPE_INTERVAL)); - vl->append(new Val(retrans, TYPE_INTERVAL)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); + vl->append(new Val(icmpp->icmp_num_addrs, TYPE_COUNT)); // Cur Hop Limit + vl->append(new Val(icmpp->icmp_wpa & 0x80, TYPE_BOOL)); // Managed + vl->append(new Val(icmpp->icmp_wpa & 0x40, TYPE_BOOL)); // Other + vl->append(new Val(icmpp->icmp_wpa & 0x20, TYPE_BOOL)); // Home Agent + vl->append(new Val((icmpp->icmp_wpa & 0x18)>>3, TYPE_COUNT)); // Pref + vl->append(new Val(icmpp->icmp_wpa & 0x04, TYPE_BOOL)); // Proxy + vl->append(new Val(icmpp->icmp_wpa & 0x02, TYPE_COUNT)); // Reserved + vl->append(new IntervalVal((double)ntohs(icmpp->icmp_lifetime), Seconds)); + vl->append(new IntervalVal((double)ntohl(reachable), Milliseconds)); + vl->append(new IntervalVal((double)ntohl(retrans), Milliseconds)); ConnectionEvent(f, vl); } void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_advertisement; in6_addr tgtaddr; @@ -535,7 +543,10 @@ void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); + vl->append(new Val(icmpp->icmp_num_addrs & 0x80, TYPE_BOOL)); // Router + vl->append(new Val(icmpp->icmp_num_addrs & 0x40, TYPE_BOOL)); // Solicited + vl->append(new Val(icmpp->icmp_num_addrs & 0x20, TYPE_BOOL)); // Override vl->append(new AddrVal(IPAddr(tgtaddr))); ConnectionEvent(f, vl); @@ -543,7 +554,7 @@ void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_solicitation; in6_addr tgtaddr; @@ -552,7 +563,7 @@ void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); vl->append(new AddrVal(IPAddr(tgtaddr))); ConnectionEvent(f, vl); @@ -560,7 +571,7 @@ void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_redirect; in6_addr tgtaddr, dstaddr; @@ -570,7 +581,7 @@ void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); vl->append(new AddrVal(IPAddr(tgtaddr))); vl->append(new AddrVal(IPAddr(dstaddr))); @@ -579,7 +590,7 @@ void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = 0; @@ -590,13 +601,13 @@ void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, break; case ICMP6_ROUTER_RENUMBERING: default: - ICMPEvent(icmp_sent, icmpp, len, 1); + ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); return; } val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); ConnectionEvent(f, vl); } @@ -622,7 +633,7 @@ void ICMP_Analyzer::Context4(double t, const struct icmp* icmpp, { val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 0)); + vl->append(BuildICMPVal(icmpp, len, 0, ip_hdr)); vl->append(new Val(icmpp->icmp_code, TYPE_COUNT)); vl->append(ExtractICMP4Context(caplen, data)); ConnectionEvent(f, vl); @@ -658,7 +669,7 @@ void ICMP_Analyzer::Context6(double t, const struct icmp* icmpp, { val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); vl->append(new Val(icmpp->icmp_code, TYPE_COUNT)); vl->append(ExtractICMP6Context(caplen, data)); ConnectionEvent(f, vl); diff --git a/src/ICMP.h b/src/ICMP.h index 59a399f74f..33773b9762 100644 --- a/src/ICMP.h +++ b/src/ICMP.h @@ -33,7 +33,8 @@ protected: virtual bool IsReuse(double t, const u_char* pkt); virtual unsigned int MemoryAllocation() const; - void ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, int icmpv6); + void ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, + int icmpv6, const IP_Hdr* ip_hdr); void Echo(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); @@ -52,7 +53,8 @@ protected: void Describe(ODesc* d) const; - RecordVal* BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6); + RecordVal* BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6, + const IP_Hdr* ip_hdr); void NextICMP4(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr ); diff --git a/src/event.bif b/src/event.bif index 1ce8907f0b..5ef3e8f04b 100644 --- a/src/event.bif +++ b/src/event.bif @@ -960,9 +960,24 @@ event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## +## cur_hop_limit: The default value that should be placed in Hop Count field +## for outgoing IP packets. +## +## managed: Managed address configuration flag, :rfc:`4861`. +## +## other: Other stateful configuration flag, :rfc:`4861`. +## +## home_agent: Mobile IPv6 home agent flag, :rfc:`3775`. +## +## pref: Router selection preferences, :rfc:`4191`. +## +## proxy: Neighbor discovery proxy flag, :rfc:`4389`. +## +## rsv: Remaining two reserved bits of router advertisement flags. +## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent ## icmp_time_exceeded icmp_unreachable -event icmp_router_advertisement%(c: connection, icmp: icmp_conn, hop_limit: count, managed: bool, router_lifetime: count, reachable_time: interval, retrans_timer: interval%); +event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval%); ## Generated for ICMP *neighbor solicitation* messages. ## @@ -975,6 +990,8 @@ event icmp_router_advertisement%(c: connection, icmp: icmp_conn, hop_limit: coun ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## +## tgt: The IP address of the target of the solicitation. +## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent ## icmp_time_exceeded icmp_unreachable event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); @@ -990,9 +1007,18 @@ event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## +## router: Flag indicating the sender is a router. +## +## solicited: Flag indicating advertisement is in response to a solicitation. +## +## override: Flag indicating advertisement should override existing caches. +## +## tgt: the Target Address in the soliciting message or the address whose +## link-layer address has changed for unsolicited adverts. +## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent ## icmp_time_exceeded icmp_unreachable -event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, tgt:addr%); +event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt:addr%); ## Generated for ICMP *redirect* messages. ## @@ -1005,6 +1031,11 @@ event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, tgt:addr%); ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## +## tgt: The address that is supposed to be a better first hop to use for +## ICMP Destination Address. +## +## dest: The address of the destination which is redirected to the target. +## ## a: The new destination address the message is redirecting to. ## ## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent diff --git a/testing/btest/Baseline/core.icmp.icmp-context/output b/testing/btest/Baseline/core.icmp.icmp-context/output index 9e252d8c38..40dc778d8b 100644 --- a/testing/btest/Baseline/core.icmp.icmp-context/output +++ b/testing/btest/Baseline/core.icmp.icmp-context/output @@ -1,12 +1,12 @@ icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=0, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=0, hlim=64, v6=F] icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=20, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=20, hlim=64, v6=F] icmp_context: [id=[orig_h=10.0.0.2, orig_p=0/unknown, resp_h=10.0.0.1, resp_p=0/unknown], len=20, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=3) conn_id: [orig_h=192.168.1.102, orig_p=3/icmp, resp_h=192.168.1.1, resp_p=3/icmp] - icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, v6=F] + icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, hlim=128, v6=F] icmp_context: [id=[orig_h=192.168.1.1, orig_p=53/udp, resp_h=192.168.1.102, resp_p=59207/udp], len=163, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp-events/output b/testing/btest/Baseline/core.icmp.icmp-events/output index 9d8f484921..c8c8eb317f 100644 --- a/testing/btest/Baseline/core.icmp.icmp-events/output +++ b/testing/btest/Baseline/core.icmp.icmp-events/output @@ -1,20 +1,20 @@ icmp_unreachable (code=3) conn_id: [orig_h=192.168.1.102, orig_p=3/icmp, resp_h=192.168.1.1, resp_p=3/icmp] - icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, v6=F] + icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, hlim=128, v6=F] icmp_context: [id=[orig_h=192.168.1.1, orig_p=53/udp, resp_h=192.168.1.102, resp_p=59207/udp], len=163, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_time_exceeded (code=0) conn_id: [orig_h=10.0.0.1, orig_p=11/icmp, resp_h=10.0.0.2, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=11, icode=0, len=32, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=11, icode=0, len=32, hlim=64, v6=F] icmp_context: [id=[orig_h=10.0.0.2, orig_p=30000/udp, resp_h=10.0.0.1, resp_p=13000/udp], len=32, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_echo_request (id=34844, seq=0, payload=O\x85\xe0C\0^N\xeb\xff^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] icmp_echo_reply (id=34844, seq=0, payload=O\x85\xe0C\0^N\xeb\xff^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] icmp_echo_request (id=34844, seq=1, payload=O\x85\xe0D\0^N\xf0}^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] icmp_echo_reply (id=34844, seq=1, payload=O\x85\xe0D\0^N\xf0}^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-context/output b/testing/btest/Baseline/core.icmp.icmp6-context/output index 4b75210a18..7a83679018 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-context/output +++ b/testing/btest/Baseline/core.icmp.icmp6-context/output @@ -1,16 +1,16 @@ icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=0, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=0, hlim=64, v6=T] icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=40, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=40, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=48, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=48, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-events/output b/testing/btest/Baseline/core.icmp.icmp6-events/output index 1ff26ff889..81075b716a 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-events/output +++ b/testing/btest/Baseline/core.icmp.icmp6-events/output @@ -1,55 +1,68 @@ icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_packet_too_big (code=0) conn_id: [orig_h=fe80::dead, orig_p=2/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=2, icode=0, len=52, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=2, icode=0, len=52, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_time_exceeded (code=0) conn_id: [orig_h=fe80::dead, orig_p=3/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=3, icode=0, len=52, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=3, icode=0, len=52, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_parameter_problem (code=0) conn_id: [orig_h=fe80::dead, orig_p=4/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=4, icode=0, len=52, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=4, icode=0, len=52, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_echo_request (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_redirect (tgt=fe80::cafe, dest=fe80::babe) conn_id: [orig_h=fe80::dead, orig_p=137/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, v6=T] -icmp_router_advertisement (hop_limit=0, managed=F, rlifetime=1800, reachable=0.000000, retrans=0.000000) + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, hlim=255, v6=T] +icmp_router_advertisement + cur_hop_limit=13 + managed=T + other=F + home_agent=T + pref=3 + proxy=F + rsv=0 + router_lifetime=30.0 mins + reachable_time=3.0 secs 700.0 msecs + retrans_timer=1.0 sec 300.0 msecs conn_id: [orig_h=fe80::dead, orig_p=134/icmp, resp_h=fe80::beef, resp_p=133/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, hlim=255, v6=T] icmp_neighbor_advertisement (tgt=fe80::babe) + router=T + solicited=F + override=T conn_id: [orig_h=fe80::dead, orig_p=136/icmp, resp_h=fe80::beef, resp_p=135/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, hlim=255, v6=T] icmp_router_solicitation conn_id: [orig_h=fe80::dead, orig_p=133/icmp, resp_h=fe80::beef, resp_p=134/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, hlim=255, v6=T] icmp_neighbor_solicitation (tgt=fe80::babe) conn_id: [orig_h=fe80::dead, orig_p=135/icmp, resp_h=fe80::beef, resp_p=136/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, hlim=255, v6=T] diff --git a/testing/btest/Traces/icmp/icmp6-router-advert.pcap b/testing/btest/Traces/icmp/icmp6-router-advert.pcap index 9c3e557a9d26e12d1e8a1a3e8b2a818612a1dded..38de434c2f5264ad6fd14f9870eaefd09ca7a8f6 100644 GIT binary patch delta 30 lcmd1Hn;^li`>D^rG@oapyd&SXv%EXlIT#rDN*EYeMF5|O2x$NS delta 30 gcmd1Hn;^md_jIejjTiGoc}Kn*tPC9N91KtZ0F(CyqyPW_ diff --git a/testing/btest/core/icmp/icmp6-events.test b/testing/btest/core/icmp/icmp6-events.test index 64c14920ff..052ba91ee6 100644 --- a/testing/btest/core/icmp/icmp6-events.test +++ b/testing/btest/core/icmp/icmp6-events.test @@ -88,9 +88,12 @@ event icmp_neighbor_solicitation(c: connection, icmp: icmp_conn, tgt: addr) print " icmp_conn: " + fmt("%s", icmp); } -event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, tgt:addr) +event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt: addr) { print "icmp_neighbor_advertisement (tgt=" + fmt("%s", tgt) + ")"; + print " router=" + fmt("%s", router); + print " solicited=" + fmt("%s", solicited); + print " override=" + fmt("%s", override); print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); } @@ -102,9 +105,19 @@ event icmp_router_solicitation(c: connection, icmp: icmp_conn) print " icmp_conn: " + fmt("%s", icmp); } -event icmp_router_advertisement(c: connection, icmp: icmp_conn, hop_limit: count, managed: bool, router_lifetime: count, reachable_time: interval, retrans_timer: interval) +event icmp_router_advertisement(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval) { - print "icmp_router_advertisement (hop_limit=" + fmt("%d", hop_limit) + ", managed=" + fmt("%s", managed) + ", rlifetime=" + fmt("%d", router_lifetime) + ", reachable=" + fmt("%f", reachable_time) + ", retrans=" + fmt("%f", retrans_timer) + ")"; + print "icmp_router_advertisement"; + print " cur_hop_limit=" + fmt("%s", cur_hop_limit); + print " managed=" + fmt("%s", managed); + print " other=" + fmt("%s", other); + print " home_agent=" + fmt("%s", home_agent); + print " pref=" + fmt("%s", pref); + print " proxy=" + fmt("%s", proxy); + print " rsv=" + fmt("%s", rsv); + print " router_lifetime=" + fmt("%s", router_lifetime); + print " reachable_time=" + fmt("%s", reachable_time); + print " retrans_timer=" + fmt("%s", retrans_timer); print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); } From 1fba55f4f3eb37ca5b46095891416ebc720b469e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 18 Apr 2012 14:59:42 -0700 Subject: [PATCH 238/651] Removing an unnecessary const cast. --- src/logging/writers/DataSeries.cc | 4 +--- src/logging/writers/DataSeries.h | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index f6b26dc494..3c88c65653 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -212,8 +212,6 @@ std::string DataSeries::GetDSOptionsForType(const threading::Field *field) } } -// ************************ CLASS IMPL ********************************* - DataSeries::DataSeries(WriterFrontend* frontend) : WriterBackend(frontend) { ds_compression = string((const char *)BifConst::LogDataSeries::compression->Bytes(), @@ -329,7 +327,7 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con else Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); - log_type = const_cast(log_types.registerType(schema)); + log_type = log_types.registerType(schema); log_series.setType(*log_type); return OpenLog(path); diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index 5faa87e1b2..bd2eb418f6 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -99,7 +99,7 @@ private: // Internal DataSeries structures we need to keep track of. vector schema_list; ExtentTypeLibrary log_types; - ExtentType *log_type; + const ExtentType *log_type; ExtentSeries log_series; ExtentMap extents; int compress_type; From 18aa41c62b943ceb949107c883e182c4ab672220 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 19 Apr 2012 10:41:01 -0700 Subject: [PATCH 239/651] Extending log post-processor call to include the name of the writer. --- scripts/base/frameworks/logging/main.bro | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index 2c36b3001e..4093a3b429 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -376,13 +376,16 @@ function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : boo if ( pp_cmd == "" ) return T; + # Turn, e.g., Log::WRITER_ASCII into "ascii". + local writer = subst_string(to_lower(fmt("%s", info$writer)), "log::writer_", ""); + # The date format is hard-coded here to provide a standardized # script interface. - system(fmt("%s %s %s %s %s %d", + system(fmt("%s %s %s %s %s %d %s", pp_cmd, npath, info$path, strftime("%y-%m-%d_%H.%M.%S", info$open), strftime("%y-%m-%d_%H.%M.%S", info$close), - info$terminating)); + info$terminating, writer)); return T; } From 4b70adcb4b08d2c9357a734ddc30a6007ffaaf93 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 19 Apr 2012 10:41:24 -0700 Subject: [PATCH 240/651] Tweaking DataSeries support. --- doc/logging-dataseries.rst | 14 ------------- src/logging/writers/DataSeries.cc | 10 +++++++++- src/logging/writers/DataSeries.h | 1 + .../out | 20 +++++++++---------- .../conn.ds.txt | 12 +++++------ .../frameworks/logging/dataseries/options.bro | 1 + .../frameworks/logging/dataseries/rotate.bro | 1 + .../logging/dataseries/test-logging.bro | 1 + .../logging/dataseries/time-as-int.bro | 1 + .../logging/dataseries/wikipedia.bro | 1 + 10 files changed, 31 insertions(+), 31 deletions(-) diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index e530ba7c0b..6eef223a90 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -109,22 +109,8 @@ TODO Warning, while packing field not_valid_after of record 11, error was > 10%: (1346460000 / 1000000 = 1346.46, round() = 1346) -* The compiler warn about a depracated method and I'm not immediately - seeing how to avoid using that. - * For testing our script-level options: - Can we get the extentsize from a ``.ds`` file? - Can we get the compressio level from a ``.ds`` file? -* ds2txt can apparently not read a file that is currently being - written. That's not good for the spool directory:: - - # ds2txt http.ds - **** Assertion failure in file - /DataSeriesSink.cpp, line 301 - **** Failed expression: tail[i] == 0xFF - **** Details: bad header for the tail of http.ds! - - Can that be worked around? - diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 3c88c65653..aacef01f80 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -194,6 +194,8 @@ std::string DataSeries::GetDSOptionsForType(const threading::Field *field) if ( ! ds_use_integer_for_time ) s += " pack_scale=\"1000000\""; + else + s += string(" units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; return s; } @@ -327,7 +329,13 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con else Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); - log_type = log_types.registerType(schema); + const ExtentType& type = log_types.registerTypeR(schema); + + // Note: This is a bit dicey as it depends on the implementation of + // registerTypeR(), but its what the DataSeries guys recommended + // given that we function we originally used has been deprecated. + log_type = &type; + log_series.setType(*log_type); return OpenLog(path); diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index bd2eb418f6..ab2bcec88c 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -43,6 +43,7 @@ private: static const size_t THREAD_MIN = 1; // Minimum number of compression threads that DataSeries may spawn. static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. + const char* TIME_UNIT() { return "microseconds"; } // DS name for time resolution when converted to integers. Must match TIME_SCALE. struct SchemaValue { diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out index b6f05003f3..a12fed36e1 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out @@ -1,13 +1,13 @@ -test.2011-03-07-03-00-05.ds test 11-03-07_03.00.05 11-03-07_04.00.05 0 -test.2011-03-07-04-00-05.ds test 11-03-07_04.00.05 11-03-07_05.00.05 0 -test.2011-03-07-05-00-05.ds test 11-03-07_05.00.05 11-03-07_06.00.05 0 -test.2011-03-07-06-00-05.ds test 11-03-07_06.00.05 11-03-07_07.00.05 0 -test.2011-03-07-07-00-05.ds test 11-03-07_07.00.05 11-03-07_08.00.05 0 -test.2011-03-07-08-00-05.ds test 11-03-07_08.00.05 11-03-07_09.00.05 0 -test.2011-03-07-09-00-05.ds test 11-03-07_09.00.05 11-03-07_10.00.05 0 -test.2011-03-07-10-00-05.ds test 11-03-07_10.00.05 11-03-07_11.00.05 0 -test.2011-03-07-11-00-05.ds test 11-03-07_11.00.05 11-03-07_12.00.05 0 -test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 +test.2011-03-07-03-00-05.ds test 11-03-07_03.00.05 11-03-07_04.00.05 0 dataseries +test.2011-03-07-04-00-05.ds test 11-03-07_04.00.05 11-03-07_05.00.05 0 dataseries +test.2011-03-07-05-00-05.ds test 11-03-07_05.00.05 11-03-07_06.00.05 0 dataseries +test.2011-03-07-06-00-05.ds test 11-03-07_06.00.05 11-03-07_07.00.05 0 dataseries +test.2011-03-07-07-00-05.ds test 11-03-07_07.00.05 11-03-07_08.00.05 0 dataseries +test.2011-03-07-08-00-05.ds test 11-03-07_08.00.05 11-03-07_09.00.05 0 dataseries +test.2011-03-07-09-00-05.ds test 11-03-07_09.00.05 11-03-07_10.00.05 0 dataseries +test.2011-03-07-10-00-05.ds test 11-03-07_10.00.05 11-03-07_11.00.05 0 dataseries +test.2011-03-07-11-00-05.ds test 11-03-07_11.00.05 11-03-07_12.00.05 0 dataseries +test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataseries > test.2011-03-07-03-00-05.ds # Extent Types ... diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt index e6294b1d71..65d4ba0a67 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt @@ -9,7 +9,7 @@ - + @@ -17,7 +17,7 @@ - + @@ -51,8 +51,8 @@ extent offset ExtentType 40 DataSeries: XmlType -636 conn -2912 DataSeries: ExtentIndex +672 conn +2948 DataSeries: ExtentIndex # Extent, type='conn' ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes 1300475167096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 @@ -92,5 +92,5 @@ ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -636 conn -2912 DataSeries: ExtentIndex +672 conn +2948 DataSeries: ExtentIndex diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro index 77ea32908a..fc3752a168 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro @@ -1,5 +1,6 @@ # # @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES # @TEST-EXEC: test -e ssh.ds.xml diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro index 639c7f3562..6a0cee5888 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro @@ -1,5 +1,6 @@ # # @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b -r %DIR/../rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out # @TEST-EXEC: for i in test.*.ds; do printf '> %s\n' $i; ds2txt $i; done >>out diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro index 76f2451477..d04b0acf44 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro @@ -1,5 +1,6 @@ # # @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES # @TEST-EXEC: ds2txt ssh.ds | ${SCRIPTS}/diff-remove-timestamps-dataseries >ssh.ds.txt diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro index 3a072998c0..e4dd6a5431 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro @@ -1,5 +1,6 @@ # # @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES # @TEST-EXEC: ds2txt conn.ds >conn.ds.txt diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro index 4a4b70afc2..38726a8b10 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro @@ -1,5 +1,6 @@ # # @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES # @TEST-EXEC: ds2txt conn.ds >conn.ds.txt From faa89913dee1e6fbc09ca5feaab724c0dfb8222c Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 19 Apr 2012 13:45:20 -0500 Subject: [PATCH 241/651] Don't print the various "weird" events to stderr Fixes #805. --- src/Reporter.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Reporter.cc b/src/Reporter.cc index 37470cd690..18f39ce4af 100644 --- a/src/Reporter.cc +++ b/src/Reporter.cc @@ -149,7 +149,7 @@ void Reporter::WeirdHelper(EventHandlerPtr event, Val* conn_val, const char* add va_list ap; va_start(ap, fmt_name); - DoLog("weird", event, stderr, 0, vl, false, false, 0, fmt_name, ap); + DoLog("weird", event, 0, 0, vl, false, false, 0, fmt_name, ap); va_end(ap); delete vl; @@ -163,7 +163,7 @@ void Reporter::WeirdFlowHelper(const IPAddr& orig, const IPAddr& resp, const cha va_list ap; va_start(ap, fmt_name); - DoLog("weird", flow_weird, stderr, 0, vl, false, false, 0, fmt_name, ap); + DoLog("weird", flow_weird, 0, 0, vl, false, false, 0, fmt_name, ap); va_end(ap); delete vl; @@ -326,7 +326,8 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Conne s += buffer; s += "\n"; - fprintf(out, "%s", s.c_str()); + if ( out ) + fprintf(out, "%s", s.c_str()); if ( addl ) { From 6e2205aa686cb1c77da8d2b56ed9a1881cb72e7a Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sat, 21 Apr 2012 14:33:14 -0400 Subject: [PATCH 242/651] Fix problem with extracting FTP passwords. - Added "ftpuser" as another anonymous username. - Problem discovered by Patrik Lundin. --- scripts/base/protocols/ftp/main.bro | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro index e6c0131337..aa7d82469e 100644 --- a/scripts/base/protocols/ftp/main.bro +++ b/scripts/base/protocols/ftp/main.bro @@ -22,7 +22,7 @@ export { const default_capture_password = F &redef; ## User IDs that can be considered "anonymous". - const guest_ids = { "anonymous", "ftp", "guest" } &redef; + const guest_ids = { "anonymous", "ftp", "ftpuser", "guest" } &redef; type Info: record { ## Time when the command was sent. @@ -160,8 +160,12 @@ function ftp_message(s: Info) # or it's a deliberately logged command. if ( |s$tags| > 0 || (s?$cmdarg && s$cmdarg$cmd in logged_commands) ) { - if ( s?$password && to_lower(s$user) !in guest_ids ) + if ( s?$password && + !s$capture_password && + to_lower(s$user) !in guest_ids ) + { s$password = ""; + } local arg = s$cmdarg$arg; if ( s$cmdarg$cmd in file_cmds ) From bcadb67731482b26e2b3c0b7103f1f51c1ca0de3 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sat, 21 Apr 2012 14:42:20 -0400 Subject: [PATCH 243/651] First commit of binpac based AYIYA analyzer. - ayiya-analyzer.pac needs work to do something with the actual packet. - Lots more cleanup to do, but it parses the protocol at least. --- src/AYIYA.cc | 90 ++++++++++++++++++++++++++++++++++++++++++ src/AYIYA.h | 55 ++++++++++++++++++++++++++ src/Analyzer.cc | 11 ++++++ src/AnalyzerTags.h | 8 +++- src/CMakeLists.txt | 4 ++ src/ayiya-analyzer.pac | 25 ++++++++++++ src/ayiya-protocol.pac | 14 +++++++ src/ayiya.pac | 10 +++++ 8 files changed, 215 insertions(+), 2 deletions(-) create mode 100644 src/AYIYA.cc create mode 100644 src/AYIYA.h create mode 100644 src/ayiya-analyzer.pac create mode 100644 src/ayiya-protocol.pac create mode 100644 src/ayiya.pac diff --git a/src/AYIYA.cc b/src/AYIYA.cc new file mode 100644 index 0000000000..d69db642b3 --- /dev/null +++ b/src/AYIYA.cc @@ -0,0 +1,90 @@ +#include "AYIYA.h" +#include "TCP_Reassembler.h" + +AYIYA_Analyzer::AYIYA_Analyzer(Connection* conn) +: Analyzer(AnalyzerTag::SYSLOG_BINPAC, conn) + { + interp = new binpac::AYIYA::AYIYA_Conn(this); + did_session_done = 0; + //ADD_ANALYZER_TIMER(&AYIYA_Analyzer::ExpireTimer, + // network_time + Syslog_session_timeout, 1, TIMER_Syslog_EXPIRE); + } + +AYIYA_Analyzer::~AYIYA_Analyzer() + { + delete interp; + } + +void AYIYA_Analyzer::Done() + { + Analyzer::Done(); + + if ( ! did_session_done ) + Event(udp_session_done); + } + +void AYIYA_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, int seq, const IP_Hdr* ip, int caplen) + { + Analyzer::DeliverPacket(len, data, orig, seq, ip, caplen); + interp->NewData(orig, data, data + len); + } + +//void AYIYA_Analyzer::ExpireTimer(double t) +// { +// // The - 1.0 in the following is to allow 1 second for the +// // common case of a single request followed by a single reply, +// // so we don't needlessly set the timer twice in that case. +// if ( t - Conn()->LastTime() >= Syslog_session_timeout - 1.0 || terminating ) +// { +// Event(connection_timeout); +// sessions->Remove(Conn()); +// } +// else +// ADD_ANALYZER_TIMER(&AYIYA_Analyzer::ExpireTimer, +// t + Syslog_session_timeout, 1, TIMER_Syslog_EXPIRE); +// } + +//Syslog_TCP_Analyzer_binpac::Syslog_TCP_Analyzer_binpac(Connection* conn) +//: TCP_ApplicationAnalyzer(AnalyzerTag::Syslog_TCP_BINPAC, conn) +// { +// interp = new binpac::Syslog_on_TCP::Syslog_TCP_Conn(this); +// } + +//Syslog_TCP_Analyzer_binpac::~Syslog_TCP_Analyzer_binpac() +// { +// delete interp; +// } + +//void Syslog_TCP_Analyzer_binpac::Done() +// { +// TCP_ApplicationAnalyzer::Done(); +// +// interp->FlowEOF(true); +// interp->FlowEOF(false); +// } + +//void Syslog_TCP_Analyzer_binpac::EndpointEOF(TCP_Reassembler* endp) +// { +// TCP_ApplicationAnalyzer::EndpointEOF(endp); +// interp->FlowEOF(endp->IsOrig()); +// } + +//void Syslog_TCP_Analyzer_binpac::DeliverStream(int len, const u_char* data, +// bool orig) +// { +// TCP_ApplicationAnalyzer::DeliverStream(len, data, orig); +// +// assert(TCP()); +// +// if ( TCP()->IsPartial() || TCP()->HadGap(orig) ) +// // punt-on-partial or stop-on-gap. +// return; +// +// interp->NewData(orig, data, data + len); +// } + +//void Syslog_TCP_Analyzer_binpac::Undelivered(int seq, int len, bool orig) +// { +// TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); +// interp->NewGap(orig, len); +// } diff --git a/src/AYIYA.h b/src/AYIYA.h new file mode 100644 index 0000000000..294eeca1ea --- /dev/null +++ b/src/AYIYA.h @@ -0,0 +1,55 @@ +#ifndef AYIYA_h +#define AYIYA_h + +#include "UDP.h" +#include "TCP.h" + +#include "ayiya_pac.h" + +class AYIYA_Analyzer : public Analyzer { +public: + AYIYA_Analyzer(Connection* conn); + virtual ~AYIYA_Analyzer(); + + virtual void Done(); + virtual void DeliverPacket(int len, const u_char* data, bool orig, + int seq, const IP_Hdr* ip, int caplen); + + static Analyzer* InstantiateAnalyzer(Connection* conn) + { return new AYIYA_Analyzer(conn); } + + static bool Available() + { return true; } + +protected: + friend class AnalyzerTimer; + void ExpireTimer(double t); + + int did_session_done; + + binpac::AYIYA::AYIYA_Conn* interp; +}; + +// #include "Syslog_tcp_pac.h" +// +//class Syslog_TCP_Analyzer_binpac : public TCP_ApplicationAnalyzer { +//public: +// Syslog_TCP_Analyzer_binpac(Connection* conn); +// virtual ~Syslog_TCP_Analyzer_binpac(); +// +// virtual void Done(); +// virtual void DeliverStream(int len, const u_char* data, bool orig); +// virtual void Undelivered(int seq, int len, bool orig); +// virtual void EndpointEOF(TCP_Reassembler* endp); +// +// static Analyzer* InstantiateAnalyzer(Connection* conn) +// { return new Syslog_TCP_Analyzer_binpac(conn); } +// +// static bool Available() +// { return (Syslog_request || Syslog_full_request) && FLAGS_use_binpac; } +// +//protected: +// binpac::Syslog_on_TCP::Syslog_TCP_Conn* interp; +//}; +// +#endif diff --git a/src/Analyzer.cc b/src/Analyzer.cc index 92ca3ecc50..70bb5567cc 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -4,6 +4,7 @@ #include "PIA.h" #include "Event.h" +#include "AYIYA.h" #include "BackDoor.h" #include "BitTorrent.h" #include "BitTorrentTracker.h" @@ -127,6 +128,16 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { Syslog_Analyzer_binpac::InstantiateAnalyzer, Syslog_Analyzer_binpac::Available, 0, false }, + //{ AnalyzerTag::6to4, "6to4", + // 6to4_Analyzer::InstantiateAnalyzer, + // 6to4_Anylzer::Available, 0, false }, + { AnalyzerTag::AYIYA, "AYIYA", + AYIYA_Analyzer::InstantiateAnalyzer, + AYIYA_Analyzer::Available, 0, false }, + //{ AnalyzerTag::Teredo, "Teredo", + // Teredo_Analyzer::InstantiateAnalyzer, + // Teredo_Analyzer::Available, 0, false }, + { AnalyzerTag::File, "FILE", File_Analyzer::InstantiateAnalyzer, File_Analyzer::Available, 0, false }, { AnalyzerTag::Backdoor, "BACKDOOR", diff --git a/src/AnalyzerTags.h b/src/AnalyzerTags.h index dc10a55f22..0f9794527e 100644 --- a/src/AnalyzerTags.h +++ b/src/AnalyzerTags.h @@ -33,11 +33,15 @@ namespace AnalyzerTag { DHCP_BINPAC, DNS_TCP_BINPAC, DNS_UDP_BINPAC, HTTP_BINPAC, SSL, SYSLOG_BINPAC, + // Decapsulation Analyzers + //6to4, + AYIYA, + //Teredo, + // Other File, Backdoor, InterConn, SteppingStone, TCPStats, ConnSize, - - + // Support-analyzers Contents, ContentLine, NVT, Zip, Contents_DNS, Contents_NCP, Contents_NetbiosSSN, Contents_Rlogin, Contents_Rsh, diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a755fde64e..6cca13de16 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -186,6 +186,9 @@ endmacro(BINPAC_TARGET) binpac_target(binpac-lib.pac) binpac_target(binpac_bro-lib.pac) + +binpac_target(ayiya.pac + ayiya-protocol.pac ayiya-analyzer.pac) binpac_target(bittorrent.pac bittorrent-protocol.pac bittorrent-analyzer.pac) binpac_target(dce_rpc.pac @@ -277,6 +280,7 @@ set(bro_SRCS Anon.cc ARP.cc Attr.cc + AYIYA.cc BackDoor.cc Base64.cc BitTorrent.cc diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac new file mode 100644 index 0000000000..888cc575a5 --- /dev/null +++ b/src/ayiya-analyzer.pac @@ -0,0 +1,25 @@ + +connection AYIYA_Conn(bro_analyzer: BroAnalyzer) + { + upflow = AYIYA_Flow; + downflow = AYIYA_Flow; + }; + +flow AYIYA_Flow + { + datagram = PDU withcontext(connection, this); + + function process_ayiya(pdu: PDU): bool + %{ + connection()->bro_analyzer()->ProtocolConfirmation(); + + // Not sure what to do here. + printf("packet: %s\n", ${pdu.packet}.data()); + return true; + %} + + }; + +refine typeattr PDU += &let { + proc_ayiya = $context.flow.process_ayiya(this); +}; diff --git a/src/ayiya-protocol.pac b/src/ayiya-protocol.pac new file mode 100644 index 0000000000..25aca23fb9 --- /dev/null +++ b/src/ayiya-protocol.pac @@ -0,0 +1,14 @@ + +type PDU = record { + identity_byte: uint8; + signature_byte: uint8; + auth_and_op_crap: uint8; + next_header: uint8; + epoch: uint32; + identity: bytestring &length=identity_len; + signature: bytestring &length=signature_len; + packet: bytestring &restofdata; +} &let { + identity_len = (1 << (identity_byte >> 4)); + signature_len = (signature_byte >> 4) * 4; +} &byteorder = littleendian; \ No newline at end of file diff --git a/src/ayiya.pac b/src/ayiya.pac new file mode 100644 index 0000000000..58fa196c15 --- /dev/null +++ b/src/ayiya.pac @@ -0,0 +1,10 @@ +%include binpac.pac +%include bro.pac + +analyzer AYIYA withcontext { + connection: AYIYA_Conn; + flow: AYIYA_Flow; +}; + +%include ayiya-protocol.pac +%include ayiya-analyzer.pac From 69ab13c88ff8598e29692d7fc166a56f7a807f05 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sat, 21 Apr 2012 15:10:30 -0400 Subject: [PATCH 244/651] Added some scripts for a tunnels framework. - The AYIYA analyzer is now enabled on it's default port. --- scripts/base/frameworks/tunnels/__load__.bro | 1 + scripts/base/frameworks/tunnels/main.bro | 8 ++++++++ scripts/base/init-default.bro | 1 + 3 files changed, 10 insertions(+) create mode 100644 scripts/base/frameworks/tunnels/__load__.bro create mode 100644 scripts/base/frameworks/tunnels/main.bro diff --git a/scripts/base/frameworks/tunnels/__load__.bro b/scripts/base/frameworks/tunnels/__load__.bro new file mode 100644 index 0000000000..d551be57d3 --- /dev/null +++ b/scripts/base/frameworks/tunnels/__load__.bro @@ -0,0 +1 @@ +@load ./main \ No newline at end of file diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro new file mode 100644 index 0000000000..901bee9a75 --- /dev/null +++ b/scripts/base/frameworks/tunnels/main.bro @@ -0,0 +1,8 @@ +module Tunnels; + +export { + +} + +const ports = { 5072/udp } &redef; +redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ports] }; diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro index 1cf125c3ab..ecaa19132c 100644 --- a/scripts/base/init-default.bro +++ b/scripts/base/init-default.bro @@ -29,6 +29,7 @@ @load base/frameworks/metrics @load base/frameworks/intel @load base/frameworks/reporter +@load base/frameworks/tunnels @load base/protocols/conn @load base/protocols/dns From dff3fabcea13d83caf68c985cac5ca8c049c657a Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sat, 21 Apr 2012 15:25:19 -0400 Subject: [PATCH 245/651] Added a DPD signature for AYIYA, but it's crashing Bro. --- scripts/base/frameworks/dpd/dpd.sig | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig index adda0ce54e..8408d7617a 100644 --- a/scripts/base/frameworks/dpd/dpd.sig +++ b/scripts/base/frameworks/dpd/dpd.sig @@ -149,3 +149,9 @@ signature dpd_ssl_client { payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/ tcp-state originator } + +#signature dpd_ayiya { +# ip-proto = udp +# payload /^..\x11\x29/ +# enable "ayiya" +#} From e2da96941530a475e4573af1ddb96e23de05b658 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sat, 21 Apr 2012 23:50:09 -0400 Subject: [PATCH 246/651] Return of Robin's old SOCKS analyzer/decapsulator and tunnel code checkpoint. - More discussion is needed to figure out how to integrate the SOCKS analyzer best. - Tunnels framework now logs for the SOCKS analyzer. --- scripts/base/frameworks/dpd/dpd.sig | 31 +++++ scripts/base/frameworks/tunnels/__load__.bro | 5 +- scripts/base/frameworks/tunnels/main.bro | 51 +++++++- scripts/base/init-default.bro | 1 + scripts/base/protocols/socks/__load__.bro | 1 + scripts/base/protocols/socks/main.bro | 116 +++++++++++++++++++ src/Analyzer.cc | 4 + src/AnalyzerTags.h | 1 + src/CMakeLists.txt | 3 + src/SOCKS.cc | 79 +++++++++++++ src/SOCKS.h | 45 +++++++ src/event.bif | 20 ++++ src/socks-analyzer.pac | 57 +++++++++ src/socks-protocol.pac | 34 ++++++ src/socks.pac | 24 ++++ 15 files changed, 468 insertions(+), 4 deletions(-) create mode 100644 scripts/base/protocols/socks/__load__.bro create mode 100644 scripts/base/protocols/socks/main.bro create mode 100644 src/SOCKS.cc create mode 100644 src/SOCKS.h create mode 100644 src/socks-analyzer.pac create mode 100644 src/socks-protocol.pac create mode 100644 src/socks.pac diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig index 8408d7617a..f5d3651104 100644 --- a/scripts/base/frameworks/dpd/dpd.sig +++ b/scripts/base/frameworks/dpd/dpd.sig @@ -155,3 +155,34 @@ signature dpd_ssl_client { # payload /^..\x11\x29/ # enable "ayiya" #} + +signature dpd_socks_client { + ip-proto == tcp + # '32' is a rather arbitrary max length for the user name. + payload /^\x04[\x01\x02].{0,32}\x00/ + tcp-state originator +} + +signature dpd_socks_server { + ip-proto == tcp + requires-reverse-signature dpd_socks_client + payload /^\x00[\x5a\x5b\x5c\x5d]/ + tcp-state responder + enable "socks" +} + +signature dpd_socks_reverse_client { + ip-proto == tcp + # '32' is a rather arbitrary max length for the user name. + payload /^\x04[\x01\x02].{0,32}\x00/ + tcp-state responder +} + +signature dpd_socks_reverse_server { + ip-proto == tcp + requires-reverse-signature dpd_socks_client + payload /^\x00[\x5a\x5b\x5c\x5d]/ + tcp-state originator + enable "socks" +} + diff --git a/scripts/base/frameworks/tunnels/__load__.bro b/scripts/base/frameworks/tunnels/__load__.bro index d551be57d3..3def3511f5 100644 --- a/scripts/base/frameworks/tunnels/__load__.bro +++ b/scripts/base/frameworks/tunnels/__load__.bro @@ -1 +1,4 @@ -@load ./main \ No newline at end of file +@load ./main + +const ports = { 5072/udp } &redef; +redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ports] }; diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 901bee9a75..987939eb6e 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -1,8 +1,53 @@ module Tunnels; export { - + redef enum Log::ID += { LOG }; + + type Action: enum { + DISCOVER, + CLOSE, + }; + + type Info: record { + ts: time &log; + uid: string &log; + id: conn_id &log; + action: Action &log; + tunnel_type: string &log; + user: string &log &optional; + }; + + global register: function(c: connection, tunnel_type: string); + + global active: table[conn_id] of Tunnels::Info = table(); } -const ports = { 5072/udp } &redef; -redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ports] }; +event bro_init() &priority=5 + { + Log::create_stream(Tunnels::LOG, [$columns=Info]); + } + +function register(c: connection, tunnel_type: string) + { + local tunnel: Info; + tunnel$ts = network_time(); + tunnel$uid = c$uid; + tunnel$id = c$id; + tunnel$action = DISCOVER; + tunnel$tunnel_type = tunnel_type; + + active[c$id] = tunnel; + Log::write(LOG, tunnel); + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c$id in active ) + { + local tunnel = active[c$id]; + tunnel$action=CLOSE; + Log::write(LOG, tunnel); + + delete active[c$id]; + } + } \ No newline at end of file diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro index ecaa19132c..91011738d1 100644 --- a/scripts/base/init-default.bro +++ b/scripts/base/init-default.bro @@ -37,6 +37,7 @@ @load base/protocols/http @load base/protocols/irc @load base/protocols/smtp +@load base/protocols/socks @load base/protocols/ssh @load base/protocols/ssl @load base/protocols/syslog diff --git a/scripts/base/protocols/socks/__load__.bro b/scripts/base/protocols/socks/__load__.bro new file mode 100644 index 0000000000..d551be57d3 --- /dev/null +++ b/scripts/base/protocols/socks/__load__.bro @@ -0,0 +1 @@ +@load ./main \ No newline at end of file diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro new file mode 100644 index 0000000000..61f569d56c --- /dev/null +++ b/scripts/base/protocols/socks/main.bro @@ -0,0 +1,116 @@ +@load base/frameworks/tunnels + +module SOCKS; + +export { + type RequestType: enum { + CONNECTION = 1, + PORT = 2, + }; +} + +event socks_request(c: connection, request_type: count, dstaddr: addr, dstname: string, p: port, user: string) + { + Tunnels::register(c, "SOCKS"); + } + +# +#global output = open_log_file("socks"); +# +#type socks_conn: record { +# id: conn_id; +# t: time; +# req: socks_request_type &optional; +# dstaddr: addr &optional; +# dstname: string &optional; +# p: port &optional; +# user: string &optional; +# service: string &optional; +# variant: string &default = "SOCKS v4"; +# granted: string &default = "no-reply"; +#}; +# +# +#global conns: table[conn_id] of socks_conn; +#global proxies: set[addr] &read_expire = 24hrs; +# +#event socks_request(c: connection, t: socks_request_type, dstaddr: addr, dstname: string, p: port, user: string) +# { +# local id = c$id; +# +# local sc: socks_conn; +# sc$id = id; +# sc$t = c$start_time; +# sc$req = t; +# +# if ( dstaddr != 0.0.0.0 ) +# sc$dstaddr = dstaddr; +# +# if ( dstname != "" ) +# sc$dstname = dstname; +# +# if ( p != 0/tcp ) +# sc$p = p; +# +# if ( user != "" ) +# sc$user = user; +# +# conns[id] = sc; +# } +# +#event socks_reply(c: connection, granted: bool, dst: addr, p: port) +# { +# local id = c$id; +# local sc: socks_conn; +# +# if ( id in conns ) +# sc = conns[id]; +# else +# { +# sc$id = id; +# sc$t = c$start_time; +# conns[id] = sc; +# } +# +# sc$granted = granted ? "ok" : "denied"; +# +# local proxy = c$id$resp_h; +# +# if ( proxy !in proxies ) +# { +# NOTICE([$note=SOCKSProxy, $src=proxy, $sub=sc$variant, +# $msg=fmt("SOCKS proxy seen at %s (%s)", proxy, sc$variant)]); +# add proxies[proxy]; +# } +# } +# +#function print_conn(sc: socks_conn) +# { +# local req = ""; +# if ( sc?$req ) +# { +# if ( sc$req == SOCKS_CONNECTION ) +# req = "relay-to"; +# if ( sc$req == SOCKS_PORT ) +# req = "bind-port"; +# } +# +# local p = sc?$p ? fmt("%s", sc$p) : ""; +# +# local dest = sc?$dstaddr +# ? (fmt("%s:%s%s", sc$dstaddr, p, (sc?$dstname ? fmt(" (%s)", sc$dstname) : ""))) +# : (sc?$dstname ? fmt("%s:%s", sc$dstname, p) : ""); +# local user = sc?$user ? fmt(" (user %s)", sc?$user) : ""; +# +# local service = sc?$service ? fmt(" [%s]", sc$service) : ""; +# +# print output, fmt("%.6f %s %s %s %s-> %s%s", sc$t, id_string(sc$id), req, +# dest, user, sc$granted, service); +# } +# +#event connection_state_remove(c: connection) +# { +# if ( c$id in conns ) +# print_conn(conns[c$id]); +# } +# diff --git a/src/Analyzer.cc b/src/Analyzer.cc index 70bb5567cc..f731b36a70 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -34,6 +34,7 @@ #include "NFS.h" #include "Portmap.h" #include "POP3.h" +#include "SOCKS.h" #include "SSH.h" #include "SSL-binpac.h" #include "Syslog-binpac.h" @@ -134,6 +135,9 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { { AnalyzerTag::AYIYA, "AYIYA", AYIYA_Analyzer::InstantiateAnalyzer, AYIYA_Analyzer::Available, 0, false }, + { AnalyzerTag::SOCKS, "SOCKS", + SOCKS_Analyzer::InstantiateAnalyzer, + SOCKS_Analyzer::Available, 0, false }, //{ AnalyzerTag::Teredo, "Teredo", // Teredo_Analyzer::InstantiateAnalyzer, // Teredo_Analyzer::Available, 0, false }, diff --git a/src/AnalyzerTags.h b/src/AnalyzerTags.h index 0f9794527e..1b65d5219e 100644 --- a/src/AnalyzerTags.h +++ b/src/AnalyzerTags.h @@ -36,6 +36,7 @@ namespace AnalyzerTag { // Decapsulation Analyzers //6to4, AYIYA, + SOCKS, //Teredo, // Other diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6cca13de16..0481dd1bcd 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -208,6 +208,8 @@ binpac_target(netflow.pac netflow-protocol.pac netflow-analyzer.pac) binpac_target(smb.pac smb-protocol.pac smb-pipe.pac smb-mailslot.pac) +binpac_target(socks.pac + socks-protocol.pac socks-analyzer.pac) binpac_target(ssl.pac ssl-defs.pac ssl-protocol.pac ssl-analyzer.pac) binpac_target(syslog.pac @@ -379,6 +381,7 @@ set(bro_SRCS SmithWaterman.cc SMB.cc SMTP.cc + SOCKS.cc SSH.cc SSL-binpac.cc Scope.cc diff --git a/src/SOCKS.cc b/src/SOCKS.cc new file mode 100644 index 0000000000..880f4032e9 --- /dev/null +++ b/src/SOCKS.cc @@ -0,0 +1,79 @@ +#include "SOCKS.h" +#include "socks_pac.h" +#include "TCP_Reassembler.h" + +SOCKS_Analyzer::SOCKS_Analyzer(Connection* conn) +: TCP_ApplicationAnalyzer(AnalyzerTag::SOCKS, conn) + { + interp = new binpac::SOCKS::SOCKS_Conn(this); + orig_done = resp_done = false; + pia = 0; + } + +SOCKS_Analyzer::~SOCKS_Analyzer() + { + delete interp; + } + +void SOCKS_Analyzer::EndpointDone(bool orig) + { + if ( orig ) + orig_done = true; + else + resp_done = true; + } + +void SOCKS_Analyzer::Done() + { + TCP_ApplicationAnalyzer::Done(); + + interp->FlowEOF(true); + interp->FlowEOF(false); + } + +void SOCKS_Analyzer::EndpointEOF(TCP_Reassembler* endp) + { + TCP_ApplicationAnalyzer::EndpointEOF(endp); + interp->FlowEOF(endp->IsOrig()); + } + +void SOCKS_Analyzer::DeliverStream(int len, const u_char* data, bool orig) + { + TCP_ApplicationAnalyzer::DeliverStream(len, data, orig); + + assert(TCP()); + + if ( TCP()->IsPartial() ) + // punt on partial. + return; + + if ( orig_done && resp_done ) + { + // Finished decapsulating tunnel layer. Now do standard processing + // with the rest of the conneciton. + // + // Note that we assume that no payload data arrives before both endpoints + // are done with there part of the SOCKS protocol. + + if ( ! pia ) + { + pia = new PIA_TCP(Conn()); + AddChildAnalyzer(pia); + pia->FirstPacket(true, 0); + pia->FirstPacket(false, 0); + } + + ForwardStream(len, data, orig); + } + else + { + interp->NewData(orig, data, data + len); + } + } + +void SOCKS_Analyzer::Undelivered(int seq, int len, bool orig) + { + TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); + interp->NewGap(orig, len); + } + diff --git a/src/SOCKS.h b/src/SOCKS.h new file mode 100644 index 0000000000..4e18e59e76 --- /dev/null +++ b/src/SOCKS.h @@ -0,0 +1,45 @@ +#ifndef socks_h +#define socks_h + +// SOCKS v4 analyzer. + +#include "TCP.h" +#include "PIA.h" + +namespace binpac { + namespace SOCKS { + class SOCKS_Conn; + } +} + + +class SOCKS_Analyzer : public TCP_ApplicationAnalyzer { +public: + SOCKS_Analyzer(Connection* conn); + ~SOCKS_Analyzer(); + + void EndpointDone(bool orig); + + virtual void Done(); + virtual void DeliverStream(int len, const u_char* data, bool orig); + virtual void Undelivered(int seq, int len, bool orig); + virtual void EndpointEOF(TCP_Reassembler* endp); + + static Analyzer* InstantiateAnalyzer(Connection* conn) + { return new SOCKS_Analyzer(conn); } + + static bool Available() + { + return socks_request || socks_reply; + } + +protected: + + bool orig_done; + bool resp_done; + + PIA_TCP *pia; + binpac::SOCKS::SOCKS_Conn* interp; +}; + +#endif diff --git a/src/event.bif b/src/event.bif index 1ce8907f0b..296a910478 100644 --- a/src/event.bif +++ b/src/event.bif @@ -5976,6 +5976,26 @@ event syslog_message%(c: connection, facility: count, severity: count, msg: stri ## to the event. event signature_match%(state: signature_state, msg: string, data: string%); +## Generated when a SOCKS request is analyzed. +## +## c: The parent connection of the proxy. +## +## t: The type of the request. +## +## dstaddr: Address that the tunneled traffic should be sent to. +## +## dstname: DNS name of the host that the tunneled traffic should be sent to. +## +## p: The destination port for the proxied traffic. +## +## user: Username given for the SOCKS connection. +event socks_request%(c: connection, request_type: count, dstaddr: addr, dstname: string, p: port, user: string%); + +## Generated when a SOCKS reply is analyzed. +## +## +event socks_reply%(c: connection, granted: bool, dst: addr, p: port%); + ## Generated when a protocol analyzer finds an identification of a software ## used on a system. This is a protocol-independent event that is fed by ## different analyzers. For example, the HTTP analyzer reports user-agent and diff --git a/src/socks-analyzer.pac b/src/socks-analyzer.pac new file mode 100644 index 0000000000..4c7b6e7a1d --- /dev/null +++ b/src/socks-analyzer.pac @@ -0,0 +1,57 @@ + +%header{ +StringVal* array_to_string(vector *a); +%} + +%code{ +StringVal* array_to_string(vector *a) + { + int len = a->size(); + char tmp[len]; + char *s = tmp; + for ( vector::iterator i = a->begin(); i != a->end(); *s++ = *i++ ); + + while ( len > 0 && tmp[len-1] == '\0' ) + --len; + + return new StringVal(len, tmp); + } +%} + +refine connection SOCKS_Conn += { + function socks_request(cmd: uint8, dstaddr: uint32, dstname: uint8[], p: uint16, user: uint8[]): bool + %{ + BifEvent::generate_socks_request(bro_analyzer(), + bro_analyzer()->Conn(), + cmd, + new AddrVal(htonl(dstaddr)), + array_to_string(dstname), + new PortVal(p | TCP_PORT_MASK), + array_to_string(user)); + + static_cast(bro_analyzer())->EndpointDone(true); + + return true; + %} + + function socks_reply(granted: bool, dst: uint32, p: uint16): bool + %{ + BifEvent::generate_socks_reply(bro_analyzer(), + bro_analyzer()->Conn(), + granted, + new AddrVal(htonl(dst)), + new PortVal(p | TCP_PORT_MASK)); + + bro_analyzer()->ProtocolConfirmation(); + static_cast(bro_analyzer())->EndpointDone(false); + return true; + %} +}; + +refine typeattr SOCKS_Request += &let { + proc: bool = $context.connection.socks_request(command, addr, empty, port, user); +}; + +refine typeattr SOCKS_Reply += &let { + proc: bool = $context.connection.socks_reply((status == 0x5a), addr, port); +}; diff --git a/src/socks-protocol.pac b/src/socks-protocol.pac new file mode 100644 index 0000000000..677daeb175 --- /dev/null +++ b/src/socks-protocol.pac @@ -0,0 +1,34 @@ +type SOCKS_Message(is_orig: bool) = case is_orig of { + true -> request: SOCKS_Request; + false -> reply: SOCKS_Reply; +}; + +type SOCKS_Request = record { + version: uint8; + command: uint8; + port: uint16; + addr: uint32; + user: uint8[] &until($element == 0); + + host: case v4a of { + true -> name: uint8[] &until($element == 0); # v4a + false -> empty: uint8[] &length=0; + } &requires(v4a); + + # FIXME: Can this be non-zero? If so we need to keep it for the + # next analyzer. + rest: bytestring &restofdata; +} &byteorder = bigendian &let { + v4a: bool = (addr <= 0x000000ff); +}; + +type SOCKS_Reply = record { + zero: uint8; + status: uint8; + port: uint16; + addr: uint32; + + # FIXME: Can this be non-zero? If so we need to keep it for the + # next analyzer. + rest: bytestring &restofdata; +} &byteorder = bigendian; \ No newline at end of file diff --git a/src/socks.pac b/src/socks.pac new file mode 100644 index 0000000000..4f16582690 --- /dev/null +++ b/src/socks.pac @@ -0,0 +1,24 @@ +%include binpac.pac +%include bro.pac + +%extern{ +#include "SOCKS.h" +%} + +analyzer SOCKS withcontext { + connection: SOCKS_Conn; + flow: SOCKS_Flow; +}; + +connection SOCKS_Conn(bro_analyzer: BroAnalyzer) { + upflow = SOCKS_Flow(true); + downflow = SOCKS_Flow(false); +}; + +%include socks-protocol.pac + +flow SOCKS_Flow(is_orig: bool) { + datagram = SOCKS_Message(is_orig) withcontext(connection, this); +}; + +%include socks-analyzer.pac \ No newline at end of file From 65eb974f5db90a6c52820899dcd54a2514db37bb Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 23 Apr 2012 11:17:13 -0500 Subject: [PATCH 247/651] Added an option to specify the 'etc' directory Addresses #801. --- configure | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/configure b/configure index 3c1cca8c9d..64b0090204 100755 --- a/configure +++ b/configure @@ -24,6 +24,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --prefix=PREFIX installation directory [/usr/local/bro] --scriptdir=PATH root installation directory for Bro scripts [PREFIX/share/bro] + --conf-files-dir=PATH config files installation directory [PREFIX/etc] Optional Features: --enable-debug compile in debugging mode @@ -91,6 +92,7 @@ append_cache_entry CMAKE_INSTALL_PREFIX PATH /usr/local/bro append_cache_entry BRO_ROOT_DIR PATH /usr/local/bro append_cache_entry PY_MOD_INSTALL_DIR PATH /usr/local/bro/lib/broctl append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING /usr/local/bro/share/bro +append_cache_entry BRO_ETC_INSTALL_DIR PATH /usr/local/bro/etc append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry BinPAC_SKIP_INSTALL BOOL true @@ -126,11 +128,18 @@ while [ $# -ne 0 ]; do if [ "$user_set_scriptdir" != "true" ]; then append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg/share/bro fi + if [ "$user_set_conffilesdir" != "true" ]; then + append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg/etc + fi ;; --scriptdir=*) append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg user_set_scriptdir="true" ;; + --conf-files-dir=*) + append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg + user_set_conffilesdir="true" + ;; --enable-debug) append_cache_entry ENABLE_DEBUG BOOL true ;; From b51dd191d78d64bea0db2d6503c1a6350137ae62 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 23 Apr 2012 13:15:29 -0500 Subject: [PATCH 248/651] Refactor IP-in-IP tunnel support. UDP tunnel support removed for now, to be re-added in specific analyzers later, but IP-in-IP is now decapsulated recursively so nested tunnels can be seen and the inner packets get sent through the IP fragment reassembler if necessary. --- scripts/base/init-bare.bro | 36 ++-- scripts/policy/frameworks/tunnel.bro | 5 +- src/Conn.cc | 19 +- src/Conn.h | 8 +- src/NetVar.cc | 7 - src/NetVar.h | 3 - src/Sessions.cc | 102 +++++---- src/Sessions.h | 10 +- src/TunnelHandler.cc | 202 ++++-------------- src/TunnelHandler.h | 156 +++++++------- src/const.bif | 2 +- src/event.bif | 11 + src/types.bif | 10 +- .../Baseline/core.tunnels.ip-in-ip/output | 22 ++ testing/btest/Traces/tunnels/4in4.pcap | Bin 0 -> 106 bytes testing/btest/Traces/tunnels/4in6.pcap | Bin 0 -> 134 bytes testing/btest/Traces/tunnels/6in4.pcap | Bin 0 -> 126 bytes .../Traces/tunnels/6in6-tunnel-change.pcap | Bin 0 -> 268 bytes testing/btest/Traces/tunnels/6in6.pcap | Bin 0 -> 146 bytes testing/btest/Traces/tunnels/6in6in6.pcap | Bin 0 -> 186 bytes testing/btest/core/tunnels/ip-in-ip.test | 30 +++ 21 files changed, 300 insertions(+), 323 deletions(-) create mode 100644 testing/btest/Baseline/core.tunnels.ip-in-ip/output create mode 100644 testing/btest/Traces/tunnels/4in4.pcap create mode 100644 testing/btest/Traces/tunnels/4in6.pcap create mode 100644 testing/btest/Traces/tunnels/6in4.pcap create mode 100644 testing/btest/Traces/tunnels/6in6-tunnel-change.pcap create mode 100644 testing/btest/Traces/tunnels/6in6.pcap create mode 100644 testing/btest/Traces/tunnels/6in6in6.pcap create mode 100644 testing/btest/core/tunnels/ip-in-ip.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 7129d8eb68..88f0910d1c 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -179,19 +179,27 @@ type AnalyzerID: count; module Tunnel; export { - ## Records the identity of a the parent of a tunneled connection. - type Parent: record { - ## The 4-tuple of the tunnel "connection". In case of an IP-in-IP + ## Records the identity of an encapsulating parent of a tunneled connection. + type EncapsulatingConn: record { + ## The 4-tuple of the encapsulating "connection". In case of an IP-in-IP ## tunnel the ports will be set to 0. The direction (i.e., orig and - ## resp) of the parent are set according to the tunneled connection + ## resp) are set according to the first tunneled packet seen ## and not according to the side that established the tunnel. cid: conn_id; ## The type of tunnel. - tunnel_type: Tunneltype; + tunnel_type: Tunnel::Type; } &log; } # end export module GLOBAL; +## A type alias for a vector of encapsulating "connections", i.e for when +## there are tunnels within tunnels. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type encapsulating_conns: vector of Tunnel::EncapsulatingConn; + ## Statistics about an endpoint. ## ## todo::Where is this used? @@ -239,8 +247,11 @@ type connection: record { ## used to tag and locate information associated with that connection. uid: string; ## If the connection is tunneled, this field contains information about - ## the encapsulating "connection". - tunnel_parent: Tunnel::Parent &optional; + ## the encapsulating "connection(s)" with the outermost one starting + ## at index zero. It's also always the first such enapsulation seen + ## for the connection unless the :bro:id:`tunnel_changed` event is handled + ## and re-assigns this field to the new encapsulation. + tunnel: encapsulating_conns &optional; }; ## Fields of a SYN packet. @@ -2616,16 +2627,10 @@ const record_all_packets = F &redef; ## .. bro:see:: conn_stats const ignore_keep_alive_rexmit = F &redef; -## Whether the analysis engine parses IP packets encapsulated in -## UDP tunnels. -## -## .. bro:see:: tunnel_port -const parse_udp_tunnels = F &redef; - module Tunnel; export { ## Whether to decapsulate IP tunnels (IPinIP, 6in4, 6to4) - const decapsulate_ip = F &redef; + const decapsulate_ip = T &redef; ## Whether to decapsulate UDP tunnels (e.g., Teredo, IPv4 in UDP) const decapsulate_udp = F &redef; @@ -2640,6 +2645,9 @@ export { ## If udp_tunnel_allports is T :bro:id:`udp_tunnel_ports` is ignored and we ## check every UDP packet for tunnels. const udp_tunnel_allports = F &redef; + + ## The maximum depth of a tunnel to decapsulate until giving up. + const max_depth: count = 2 &redef; } # end export module GLOBAL; diff --git a/scripts/policy/frameworks/tunnel.bro b/scripts/policy/frameworks/tunnel.bro index 98a860653c..fb9bf2f3f6 100644 --- a/scripts/policy/frameworks/tunnel.bro +++ b/scripts/policy/frameworks/tunnel.bro @@ -24,9 +24,8 @@ ##! is stored as the ``tunnel_parent`` member of :bro:type:`connection`, ##! which is of type :bro:type:`Tunnel::Parent`. ##! -##! *Limitation:* The decapsulated packets are not fed through the -##! defragmenter again and decapsulation happens only on the primary -##! path, i.e., it's not available for the secondary path. +##! *Limitation:* decapsulation happens only on the primary path, i.e. +##! it's not available for the secondary path. @load base/protocols/conn diff --git a/src/Conn.cc b/src/Conn.cc index c2008b1faa..80c026e781 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -112,7 +112,8 @@ unsigned int Connection::external_connections = 0; IMPLEMENT_SERIAL(Connection, SER_CONNECTION); -Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, TunnelParent* arg_tunnel_parent) +Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, + const Encapsulation& arg_encap) { sessions = s; key = k; @@ -156,7 +157,7 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, T uid = 0; // Will set later. - tunnel_parent = arg_tunnel_parent; + encapsulation = arg_encap; if ( conn_timer_mgr ) { @@ -182,7 +183,6 @@ Connection::~Connection() Unref(conn_val); } - delete tunnel_parent; delete key; delete root_analyzer; delete conn_timer_mgr; @@ -192,6 +192,15 @@ Connection::~Connection() --external_connections; } +void Connection::CheckEncapsulation(const Encapsulation& arg_encap) + { + if ( encapsulation != arg_encap ) + { + Event(tunnel_changed, 0, arg_encap.GetVectorVal()); + encapsulation = arg_encap; + } + } + void Connection::Done() { finished = 1; @@ -346,8 +355,8 @@ RecordVal* Connection::BuildConnVal() char tmp[20]; conn_val->Assign(9, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62))); - if ( tunnel_parent ) - conn_val->Assign(10, tunnel_parent->GetRecordVal()); + if ( encapsulation.Depth() > 0 ) + conn_val->Assign(10, encapsulation.GetVectorVal()); } if ( root_analyzer ) diff --git a/src/Conn.h b/src/Conn.h index 99af34a07a..9cdb746b7c 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -13,6 +13,7 @@ #include "RuleMatcher.h" #include "AnalyzerTags.h" #include "IPAddr.h" +#include "TunnelHandler.h" class Connection; class ConnectionTimer; @@ -51,9 +52,12 @@ class Analyzer; class Connection : public BroObj { public: - Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, TunnelParent *arg_tunnel_parent); + Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, + const Encapsulation& arg_encap); virtual ~Connection(); + void CheckEncapsulation(const Encapsulation& arg_encap); + // Invoked when connection is about to be removed. Use Ref(this) // inside Done to keep the connection object around (though it'll // no longer be accessible from the dictionary of active @@ -276,7 +280,7 @@ protected: double inactivity_timeout; RecordVal* conn_val; LoginConn* login_conn; // either nil, or this - TunnelParent* tunnel_parent; // nil if not tunneled + Encapsulation encapsulation; // tunnels int suppress_event; // suppress certain events to once per conn. unsigned int installed_status_timer:1; diff --git a/src/NetVar.cc b/src/NetVar.cc index 59cc1cc633..390598bb99 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -30,9 +30,6 @@ int partial_connection_ok; int tcp_SYN_ack_ok; int tcp_match_undelivered; -int encap_hdr_size; -int udp_tunnel_port; - double frag_timeout; double tcp_SYN_timeout; @@ -322,10 +319,6 @@ void init_net_var() tcp_SYN_ack_ok = opt_internal_int("tcp_SYN_ack_ok"); tcp_match_undelivered = opt_internal_int("tcp_match_undelivered"); - encap_hdr_size = opt_internal_int("encap_hdr_size"); - - udp_tunnel_port = opt_internal_int("udp_tunnel_port") & ~UDP_PORT_MASK; - frag_timeout = opt_internal_double("frag_timeout"); tcp_SYN_timeout = opt_internal_double("tcp_SYN_timeout"); diff --git a/src/NetVar.h b/src/NetVar.h index 425ea93e09..f5c17f64aa 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -33,9 +33,6 @@ extern int partial_connection_ok; extern int tcp_SYN_ack_ok; extern int tcp_match_undelivered; -extern int encap_hdr_size; -extern int udp_tunnel_port; - extern double frag_timeout; extern double tcp_SYN_timeout; diff --git a/src/Sessions.cc b/src/Sessions.cc index 7d829b602b..769bd68f52 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -126,12 +126,6 @@ NetSessions::NetSessions() arp_analyzer = new ARP_Analyzer(); else arp_analyzer = 0; - - - if ( BifConst::Tunnel::decapsulate_ip || BifConst::Tunnel::decapsulate_udp ) - tunnel_handler = new TunnelHandler(this); - else - tunnel_handler = 0; } NetSessions::~NetSessions() @@ -185,6 +179,8 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, if ( record_all_packets ) DumpPacket(hdr, pkt); + Encapsulation encapsulation; + if ( pkt_elem && pkt_elem->IPHdr() ) // Fast path for "normal" IP packets if an IP_Hdr is // already extracted when doing PacketSort. Otherwise @@ -192,7 +188,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, // difference here is that header extraction in // PacketSort does not generate Weird events. - DoNextPacket(t, hdr, pkt_elem->IPHdr(), pkt, hdr_size); + DoNextPacket(t, hdr, pkt_elem->IPHdr(), pkt, hdr_size, encapsulation); else { @@ -217,7 +213,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, if ( ip->ip_v == 4 ) { IP_Hdr ip_hdr(ip, false); - DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size); + DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size, encapsulation); } else if ( ip->ip_v == 6 ) @@ -229,7 +225,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, } IP_Hdr ip_hdr((const struct ip6_hdr*) (pkt + hdr_size), false, caplen); - DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size); + DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size, encapsulation); } else if ( ARP_Analyzer::IsARP(pkt, hdr_size) ) @@ -351,7 +347,7 @@ int NetSessions::CheckConnectionTag(Connection* conn) void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, - int hdr_size) + int hdr_size, Encapsulation& encapsulation) { uint32 caplen = hdr->caplen - hdr_size; const struct ip* ip4 = ip_hdr->IP4_Hdr(); @@ -458,24 +454,10 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } #endif - TunnelInfo *tunnel_info = 0; - if ( tunnel_handler ) - { - tunnel_info = tunnel_handler->DecapsulateTunnel(ip_hdr, len, caplen, hdr, pkt); - if (tunnel_info) - { - ip4 = tunnel_info->child->IP4_Hdr(); - ip_hdr = tunnel_info->child; - len -= tunnel_info->hdr_len; - caplen -= tunnel_info->hdr_len; - } - } - int proto = ip_hdr->NextProto(); if ( CheckHeaderTrunc(proto, len, caplen, hdr, pkt) ) { - delete tunnel_info; Remove(f); return; } @@ -540,9 +522,51 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, break; } + case IPPROTO_IP: + case IPPROTO_IPV4: + case IPPROTO_IPV6: + { + if ( ! BifConst::Tunnel::decapsulate_ip ) + { + reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "ip_tunnel"); + Remove(f); + return; + } + + if ( encapsulation.Depth() >= BifConst::Tunnel::max_depth ) + { + reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "tunnel_depth"); + Remove(f); + return; + } + + IP_Hdr* inner_ip; + if ( proto == IPPROTO_IPV6 ) + inner_ip = new IP_Hdr((const struct ip6_hdr*) data, false, caplen); + else + inner_ip = new IP_Hdr((const struct ip*) data, false); + + struct pcap_pkthdr fake_hdr; + fake_hdr.caplen = fake_hdr.len = caplen; + fake_hdr.ts = hdr->ts; + + EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), + ip_hdr->IP4_Hdr() ? + ( proto == IPPROTO_IPV6 ? + BifEnum::Tunnel::IP6_IN_IP4 : BifEnum::Tunnel::IP4_IN_IP4 ) : + ( proto == IPPROTO_IPV6 ? + BifEnum::Tunnel::IP6_IN_IP6 : BifEnum::Tunnel::IP4_IN_IP6 )); + encapsulation.Add(ec); + + DoNextPacket(t, &fake_hdr, inner_ip, data, 0, encapsulation); + + delete inner_ip; + Remove(f); + return; + } + default: Weird(fmt("unknown_protocol_%d", proto), hdr, pkt); - delete tunnel_info; Remove(f); return; } @@ -558,7 +582,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, conn = (Connection*) d->Lookup(h); if ( ! conn ) { - conn = NewConn(h, t, &id, data, proto, tunnel_info); + conn = NewConn(h, t, &id, data, proto, encapsulation); if ( conn ) d->Insert(h, conn); } @@ -569,7 +593,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( consistent < 0 ) { delete h; - delete tunnel_info; Remove(f); return; } @@ -580,18 +603,20 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, conn->Event(connection_reused, 0); Remove(conn); - conn = NewConn(h, t, &id, data, proto, tunnel_info); + conn = NewConn(h, t, &id, data, proto, encapsulation); if ( conn ) d->Insert(h, conn); } else + { delete h; + conn->CheckEncapsulation(encapsulation); + } } if ( ! conn ) { delete h; - delete tunnel_info; Remove(f); return; } @@ -618,8 +643,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, record_packet, record_content, hdr, pkt, hdr_size); - delete tunnel_info; - if ( f ) { // Above we already recorded the fragment in its entirety. @@ -651,11 +674,19 @@ bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, case IPPROTO_UDP: min_hdr_len = sizeof(struct udphdr); break; + case IPPROTO_IP: + case IPPROTO_IPV4: + min_hdr_len = sizeof(struct ip); + break; + case IPPROTO_IPV6: + min_hdr_len = sizeof(struct ip6_hdr); + break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: default: // Use for all other packets. min_hdr_len = ICMP_MINLEN; + break; } if ( len < min_hdr_len ) @@ -962,14 +993,14 @@ void NetSessions::GetStats(SessionStats& s) const } Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, - const u_char* data, int proto, TunnelInfo* tunnel_info) + const u_char* data, int proto, + const Encapsulation& encapsulation) { // FIXME: This should be cleaned up a bit, it's too protocol-specific. // But I'm not yet sure what the right abstraction for these things is. int src_h = ntohs(id->src_port); int dst_h = ntohs(id->dst_port); int flags = 0; - TunnelParent *tunnel_parent = 0; // Hmm... This is not great. TransportProto tproto = TRANSPORT_UNKNOWN; @@ -1019,10 +1050,7 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, id = &flip_id; } - if ( tunnel_info ) - tunnel_parent = new TunnelParent(&(tunnel_info->parent)); - - Connection* conn = new Connection(this, k, t, id, tunnel_parent); + Connection* conn = new Connection(this, k, t, id, encapsulation); conn->SetTransport(tproto); dpm->BuildInitialAnalyzerTree(tproto, conn, data); diff --git a/src/Sessions.h b/src/Sessions.h index edbb7a8ffd..e1afbeec5a 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -14,6 +14,7 @@ struct pcap_pkthdr; +class Encapsulation; class Connection; class ConnID; class OSFingerprint; @@ -26,9 +27,6 @@ class Discarder; class SteppingStoneManager; class PacketFilter; -class TunnelHandler; -class TunnelInfo; - class PacketSortElement; struct SessionStats { @@ -145,7 +143,7 @@ protected: friend class TimerMgrExpireTimer; Connection* NewConn(HashKey* k, double t, const ConnID* id, - const u_char* data, int proto, TunnelInfo *tunnel_info); + const u_char* data, int proto, const Encapsulation& encapsulation); // Check whether the tag of the current packet is consistent with // the given connection. Returns: @@ -178,7 +176,7 @@ protected: void DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, - int hdr_size); + int hdr_size, Encapsulation& encapsulation); void NextPacketSecondary(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size, @@ -216,8 +214,6 @@ protected: int num_packets_processed; PacketProfiler* pkt_profiler; - TunnelHandler *tunnel_handler; - // We may use independent timer managers for different sets of related // activity. The managers are identified by an unique tag. typedef std::map TimerMgrMap; diff --git a/src/TunnelHandler.cc b/src/TunnelHandler.cc index 9debf79d9c..4923b36f3d 100644 --- a/src/TunnelHandler.cc +++ b/src/TunnelHandler.cc @@ -1,178 +1,52 @@ -// $Id: Sessions.cc 7075 2010-09-13 02:39:38Z vern $ -// // See the file "COPYING" in the main distribution directory for copyright. - -#include "config.h" - -#include - -#include -#include - #include "TunnelHandler.h" -#include "Conn.h" -#include "Sessions.h" - -TunnelHandler::TunnelHandler(NetSessions *arg_s) +RecordVal* EncapsulatingConn::GetRecordVal() const { - s = arg_s; - PortVal *pv = 0; - TableVal *udp_tunnel_ports = BifConst::Tunnel::udp_tunnel_ports->AsTableVal(); - // Find UDP ports we want to analyze. Store them in an array for faster - // lookup. - for ( int i = 0; i< 65536; i++ ) - { - if ( pv ) - Unref(pv); - pv = new PortVal(i, TRANSPORT_UDP); - if ( udp_tunnel_ports->Lookup(pv, false) ) - udp_ports[i] = 1; - else - udp_ports[i] = 0; - } - Unref(pv); - } - -TunnelInfo* TunnelHandler::DecapsulateTunnel(const IP_Hdr *ip_hdr, int len, int caplen, - const struct pcap_pkthdr* hdr, const u_char* const pkt) - { - TunnelInfo *tunnel_info = 0; - - switch ( ip_hdr->NextProto() ) { - case IPPROTO_IPV6: /* 6in4 and 6to4 */ - if ( BifConst::Tunnel::decapsulate_ip ) - { - if ( len < (int)sizeof(struct ip6_hdr) || - caplen < (int)sizeof(struct ip6_hdr) ) - { - s->Weird("truncated_header", hdr, pkt); - return 0; - } - // TODO: check if IP6 header makes sense - tunnel_info = new TunnelInfo(); - tunnel_info->child = new IP_Hdr((const struct ip6_hdr*)ip_hdr->Payload(), false, caplen); - tunnel_info->parent.tunneltype = BifEnum::Tunnel::IP6_IN_IP; - tunnel_info->hdr_len = tunnel_info->child->HdrLen(); - tunnel_info->SetParentIPs(ip_hdr); - return tunnel_info; - } - break; - // TODO: IP in IP. Find test traces first. IP proto 0 and/or 4 - case IPPROTO_UDP: - if ( BifConst::Tunnel::decapsulate_udp ) - { - if ( len < (int)sizeof(struct udphdr) || - caplen < (int)sizeof(struct udphdr) ) - // No weird here. Main packet processing will raise it. - return 0; - return HandleUDP(ip_hdr, len, caplen); - } - + RecordVal *rv = + new RecordVal(BifType::Record::Tunnel::EncapsulatingConn); + TransportProto tproto; + switch ( type ) { + case BifEnum::Tunnel::IP6_IN_IP4: + case BifEnum::Tunnel::IP4_IN_IP4: + case BifEnum::Tunnel::IP6_IN_IP6: + case BifEnum::Tunnel::IP4_IN_IP6: + tproto = TRANSPORT_UNKNOWN; break; default: + tproto = TRANSPORT_UDP; break; - } /* end switch */ - return 0; + } // end switch + + RecordVal* id_val = new RecordVal(conn_id); + id_val->Assign(0, new AddrVal(src_addr)); + id_val->Assign(1, new PortVal(ntohs(src_port), tproto)); + id_val->Assign(2, new AddrVal(dst_addr)); + id_val->Assign(3, new PortVal(ntohs(dst_port), tproto)); + rv->Assign(0, id_val); + rv->Assign(1, new EnumVal(type, BifType::Enum::Tunnel::Type)); + return rv; } -TunnelInfo* TunnelHandler::HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen) +bool operator==(const Encapsulation& e1, const Encapsulation& e2) { - // We already know we that we have a valid UDP header - const u_char *data = ip_hdr->Payload(); - const struct udphdr* uh = (const struct udphdr*)data; - IP_Hdr *cand_ip_hdr = 0; - BifEnum::Tunnel::Tunneltype tunneltype = BifEnum::Tunnel::NONE; - - int hdr_len = sizeof(struct udphdr); - data += hdr_len; - - int datalen = (int)ntohs(uh->uh_ulen); - datalen = min(datalen, min(len, caplen)); - datalen -= hdr_len; - - if ( BifConst::Tunnel::udp_tunnel_allports || - udp_ports[ntohs(uh->uh_sport)] || - udp_ports[ntohs(uh->uh_dport)] ) - { - cand_ip_hdr = LookForIPHdr(data, datalen); - if ( cand_ip_hdr ) + if ( e1.conns ) + if ( e2.conns ) { - // Found and IP hdr directly in the UDP payload - tunneltype = (cand_ip_hdr->IP4_Hdr()) ? - BifEnum::Tunnel::IP4_IN_UDP : BifEnum::Tunnel::IP6_IN_UDP; + if ( e1.conns->size() != e2.conns->size() ) + return false; + else + for ( size_t i = 0; i < e1.conns->size(); ++i ) + if ( (*e1.conns)[i] != (*e2.conns)[i] ) + return false; + return true; } - else if ( datalen >= 8 ) - { - // Look for AYIAY tunnels - u_char id_byte = data[0]; - u_char sig_byte = data[1]; - u_char next_hdr = data[3]; - - // identity length field is high bits of id_byte. - // length in octets is 2 to the power of length field - int id_len = (1 << (id_byte>>4)); - - // signature length field is high bits of sig_byte - // length in octets 4 * length field - int sig_len = 4*(sig_byte>>4); - - datalen -= 8 + id_len + sig_len; - data += 8 + id_len + sig_len; - if ( datalen <= 0 ) - return 0; - cand_ip_hdr = LookForIPHdr(data, datalen); - if ( cand_ip_hdr ) - { - hdr_len += 8 + id_len + sig_len; - tunneltype = (cand_ip_hdr->IP4_Hdr()) ? - BifEnum::Tunnel::IP4_IN_AYIAY : BifEnum::Tunnel::IP6_IN_AYIAY; - } - } - if ( cand_ip_hdr ) - { - TunnelInfo *tunnel_info = new TunnelInfo(); - tunnel_info->child = cand_ip_hdr; - tunnel_info->parent.tunneltype = tunneltype; - tunnel_info->SetParentIPs(ip_hdr); - tunnel_info->SetParentPorts(uh); - tunnel_info->hdr_len = hdr_len + cand_ip_hdr->HdrLen(); - return tunnel_info; - }; - } - return 0; - } - -IP_Hdr* TunnelHandler::LookForIPHdr(const u_char *data, int datalen) - { - IP_Hdr *cand_ip_hdr = 0; - if (datalen < (int)sizeof(struct ip)) - return 0; - - const struct ip *ip4 = (const struct ip*)(data); - if ( ip4->ip_v == 4 ) - cand_ip_hdr = new IP_Hdr((const struct ip*)ip4, false); - else if ( ip4->ip_v == 6 && (datalen > (int)sizeof(struct ip6_hdr)) ) - cand_ip_hdr = new IP_Hdr((const struct ip6_hdr*)data, false, datalen); - - if ( cand_ip_hdr ) - { - switch ( cand_ip_hdr->NextProto() ) { - case IPPROTO_UDP: - case IPPROTO_TCP: - case IPPROTO_ICMP: - if ( (int)cand_ip_hdr->TotalLen() != datalen ) - { - delete cand_ip_hdr; - cand_ip_hdr = 0; - } - break; - default: - delete cand_ip_hdr; - cand_ip_hdr = 0; - break; - } // end switch - } - return cand_ip_hdr; + else + return false; + else + if ( e2.conns ) + return false; + else + return true; } diff --git a/src/TunnelHandler.h b/src/TunnelHandler.h index 9f3f3614e7..21e491cdc9 100644 --- a/src/TunnelHandler.h +++ b/src/TunnelHandler.h @@ -3,105 +3,109 @@ #ifndef tunnelhandler_h #define tunnelhandler_h -#include -#include "net_util.h" -#include "IP.h" +#include "config.h" +#include "NetVar.h" #include "IPAddr.h" -#include "Conn.h" -#include "Sessions.h" #include "Val.h" +#include -class TunnelParent { +class EncapsulatingConn { public: - TunnelParent() + EncapsulatingConn() + : src_port(0), dst_port(0), type(BifEnum::Tunnel::NONE) {} + + EncapsulatingConn(const IPAddr& s, const IPAddr& d, + BifEnum::Tunnel::Type t) + : src_addr(s), dst_addr(d), src_port(0), dst_port(0), type(t) {} + + EncapsulatingConn(const IPAddr& s, const IPAddr& d, uint16 sp, uint16 dp, + BifEnum::Tunnel::Type t) + : src_addr(s), dst_addr(d), src_port(sp), dst_port(dp), type(t) {} + + EncapsulatingConn(const EncapsulatingConn& other) + : src_addr(other.src_addr), dst_addr(other.dst_addr), + src_port(other.src_port), dst_port(other.dst_port), + type(other.type) {} + + ~EncapsulatingConn() {} + + RecordVal* GetRecordVal() const; + + friend bool operator==(const EncapsulatingConn& ec1, + const EncapsulatingConn& ec2) { - tunneltype = BifEnum::Tunnel::NONE; - src_port = dst_port = 0; + return ec1.type == ec2.type && ec1.src_addr == ec2.src_addr && + ec1.src_port == ec2.src_port && ec1.dst_port == ec2.dst_port; } - TunnelParent(TunnelParent *other) + friend bool operator!=(const EncapsulatingConn& ec1, + const EncapsulatingConn& ec2) { - tunneltype = other->tunneltype; - src_addr = other->src_addr; - dst_addr = other->dst_addr; - src_port = other->src_port; - dst_port = other->dst_port; - } - - RecordVal* GetRecordVal() const - { - RecordVal *rv = new RecordVal(BifType::Record::Tunnel::Parent); - TransportProto tproto; - switch ( tunneltype ) { - case BifEnum::Tunnel::IP6_IN_IP: - case BifEnum::Tunnel::IP4_IN_IP: - tproto = TRANSPORT_UNKNOWN; - break; - default: - tproto = TRANSPORT_UDP; - } // end switch - - RecordVal* id_val = new RecordVal(conn_id); - id_val->Assign(0, new AddrVal(src_addr)); - id_val->Assign(1, new PortVal(ntohs(src_port), tproto)); - id_val->Assign(2, new AddrVal(dst_addr)); - id_val->Assign(3, new PortVal(ntohs(dst_port), tproto)); - rv->Assign(0, id_val); - rv->Assign(1, new EnumVal(tunneltype, BifType::Enum::Tunnel::Tunneltype)); - return rv; + return ! ( ec1 == ec2 ); } IPAddr src_addr; IPAddr dst_addr; uint16 src_port; uint16 dst_port; - BifEnum::Tunnel::Tunneltype tunneltype; + BifEnum::Tunnel::Type type; }; -class TunnelInfo { +class Encapsulation { public: - TunnelInfo() + Encapsulation() : conns(0) {} + + Encapsulation(const Encapsulation& other) { - child = 0; - hdr_len = 0; - } - ~TunnelInfo() - { - if (child) delete child; + if ( other.conns ) + conns = new vector(*(other.conns)); + else + conns = 0; } - void SetParentIPs(const IP_Hdr *ip_hdr) + Encapsulation& operator=(const Encapsulation& other) { - parent.src_addr = ip_hdr->SrcAddr(); - parent.dst_addr = ip_hdr->DstAddr(); - } - void SetParentPorts(const struct udphdr *uh) - { - parent.src_port = uh->uh_sport; - parent.dst_port = uh->uh_dport; + if ( this == &other ) return *this; + delete conns; + if ( other.conns ) + conns = new vector(*(other.conns)); + else + conns = 0; + return *this; } - IP_Hdr *child; - TunnelParent parent; - int hdr_len; + ~Encapsulation() { delete conns; } + + void Add(const EncapsulatingConn& c) + { + if ( ! conns ) + conns = new vector(); + conns->push_back(c); + } + + size_t Depth() const + { + return conns ? conns->size() : 0; + } + + VectorVal* GetVectorVal() const + { + VectorVal* vv = new VectorVal(new VectorType( + BifType::Record::Tunnel::EncapsulatingConn->Ref())); + if ( conns ) + for ( size_t i = 0; i < conns->size(); ++i ) + vv->Assign(i, (*conns)[i].GetRecordVal(), 0); + return vv; + } + + friend bool operator==(const Encapsulation& e1, const Encapsulation& e2); + + friend bool operator!=(const Encapsulation& e1, const Encapsulation& e2) + { + return ! ( e1 == e2 ); + } + + vector* conns; }; -class TunnelHandler { -public: - TunnelHandler(NetSessions *arg_s); - ~TunnelHandler(); - - // Main entry point. Returns a nil if not tunneled. - TunnelInfo* DecapsulateTunnel(const IP_Hdr* ip_hdr, int len, int caplen, - // need those for passing them back to NetSessions::Weird() - const struct pcap_pkthdr* hdr, const u_char* const pkt); - -protected: - NetSessions *s; - short udp_ports[65536]; // which UDP ports to decapsulate - IP_Hdr* LookForIPHdr(const u_char *data, int datalen); - TunnelInfo* HandleUDP(const IP_Hdr *ip_hdr, int len, int caplen); -}; - - #endif diff --git a/src/const.bif b/src/const.bif index 0ebd210a95..b622d52ff3 100644 --- a/src/const.bif +++ b/src/const.bif @@ -4,7 +4,6 @@ const ignore_keep_alive_rexmit: bool; const skip_http_data: bool; -const parse_udp_tunnels: bool; const use_conn_size_analyzer: bool; const report_gaps_for_partial: bool; @@ -16,3 +15,4 @@ const Tunnel::decapsulate_ip: bool; const Tunnel::decapsulate_udp: bool; const Tunnel::udp_tunnel_ports: any; const Tunnel::udp_tunnel_allports: bool; +const Tunnel::max_depth: count; diff --git a/src/event.bif b/src/event.bif index 1ce8907f0b..b92354e632 100644 --- a/src/event.bif +++ b/src/event.bif @@ -141,6 +141,17 @@ event dns_mapping_altered%(dm: dns_mapping, old_addrs: addr_set, new_addrs: addr ## event. event new_connection%(c: connection%); +## Generated for a connection whose tunneling has changed. This could +## be from a previously seen connection now being encapsulated in a tunnel, +## or from the outer encapsulation changing. Note that the connection's +## *tunnel* field is NOT automatically assigned to the new encapsulation value +## internally after this event is raised. +## +## c: The connection whose tunnel/encapsulation changed. +## +## e: The new encapsulation. +event tunnel_changed%(c: connection, e: encapsulating_conns%); + ## Generated when reassembly starts for a TCP connection. The event is raised ## at the moment when Bro's TCP analyzer enables stream reassembly for a ## connection. diff --git a/src/types.bif b/src/types.bif index 0017c4b6ff..60f8631a23 100644 --- a/src/types.bif +++ b/src/types.bif @@ -170,16 +170,18 @@ enum ID %{ module Tunnel; -enum Tunneltype %{ +enum Type %{ NONE, - IP6_IN_IP, - IP4_IN_IP, + IP6_IN_IP4, + IP4_IN_IP4, + IP6_IN_IP6, + IP4_IN_IP6, IP6_IN_UDP, IP4_IN_UDP, IP6_IN_AYIAY, IP4_IN_AYIAY, %} -type Parent: record; +type EncapsulatingConn: record; module GLOBAL; diff --git a/testing/btest/Baseline/core.tunnels.ip-in-ip/output b/testing/btest/Baseline/core.tunnels.ip-in-ip/output new file mode 100644 index 0000000000..7ed712aec8 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.ip-in-ip/output @@ -0,0 +1,22 @@ +new_connection: tunnel + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] +new_connection: tunnel + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + encap: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6], [cid=[orig_h=babe::beef, orig_p=0/unknown, resp_h=dead::babe, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] +new_connection: tunnel + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + encap: [[cid=[orig_h=1.2.3.4, orig_p=0/unknown, resp_h=5.6.7.8, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP4]] +new_connection: tunnel + conn_id: [orig_h=70.55.213.211, orig_p=31337/tcp, resp_h=192.88.99.1, resp_p=80/tcp] + encap: [[cid=[orig_h=2002:4637:d5d3::4637:d5d3, orig_p=0/unknown, resp_h=2001:4860:0:2001::68, resp_p=0/unknown], tunnel_type=Tunnel::IP4_IN_IP6]] +new_connection: tunnel + conn_id: [orig_h=10.0.0.1, orig_p=30000/udp, resp_h=10.0.0.2, resp_p=13000/udp] + encap: [[cid=[orig_h=1.2.3.4, orig_p=0/unknown, resp_h=5.6.7.8, resp_p=0/unknown], tunnel_type=Tunnel::IP4_IN_IP4]] +new_connection: tunnel + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] +tunnel_changed: + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + old: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] + new: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] diff --git a/testing/btest/Traces/tunnels/4in4.pcap b/testing/btest/Traces/tunnels/4in4.pcap new file mode 100644 index 0000000000000000000000000000000000000000..b0d89eeddac2cc9c5ef25313c7d7d1f7f5f0c383 GIT binary patch literal 106 zcmca|c+)~A1{MYw_+QV!zzF0-O`GapvWA<%3CIRv27%ihm)@V)b7I=11sn{n3=Ae9 qWezM^n;4mxSysJNrB<& H5e5bT)BYSs literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/tunnels/6in4.pcap b/testing/btest/Traces/tunnels/6in4.pcap new file mode 100644 index 0000000000000000000000000000000000000000..2d0cd5c8c79476a337a9ea4e00b0c09989b370a7 GIT binary patch literal 126 zcmca|c+)~A1{MYw_+QV!zzF1sOq=Teq@9f+49Esy27%ihm)@V)b7I=11sn{n3=AG1 yWe%EIm5fZxEUawo90?#*Jc17Q)`A!?uzET6D6AV1fmm+`wWOGd;A)Eib XMK)pIdsP0Ze+eLIqHS)x5D5eTP+LeQ literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/tunnels/6in6.pcap b/testing/btest/Traces/tunnels/6in6.pcap new file mode 100644 index 0000000000000000000000000000000000000000..ff8aa607bb022bd189b553aad153cb3f32fe5b57 GIT binary patch literal 146 zcmca|c+)~A1{MYw_+QV!zzF2rubAksRLa7T1!RNpe=vZkYP*{NVwh+;C@`}80BUAu wdeHd)U(o;msQg*kV0Ao#4)@lAG{C^V_ox2B*dTh>zET6D6AV1fmm+`w0Hb6myZ`_I literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/tunnels/6in6in6.pcap b/testing/btest/Traces/tunnels/6in6in6.pcap new file mode 100644 index 0000000000000000000000000000000000000000..192524aa788e7e099648f6dc2fc7b452c97525ed GIT binary patch literal 186 zcmca|c+)~A1{MYw_+QV!zzF2b?3wD{{g0Dj5|9nT|G@yFs_kw9h!Lad@b4`|1VZk6 zkIFywF99TBqUo?}A4CO&-1q+8T4esNeF-2gkDvpp_`dh2{-LTXH848Cz|(vw0tf)T C4=c6+ literal 0 HcmV?d00001 diff --git a/testing/btest/core/tunnels/ip-in-ip.test b/testing/btest/core/tunnels/ip-in-ip.test new file mode 100644 index 0000000000..f526575d48 --- /dev/null +++ b/testing/btest/core/tunnels/ip-in-ip.test @@ -0,0 +1,30 @@ +# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in6.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in6in6.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in4.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/tunnels/4in6.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/tunnels/4in4.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in6-tunnel-change.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: btest-diff output + +event new_connection(c: connection) + { + if ( c?$tunnel ) + { + print "new_connection: tunnel"; + print fmt(" conn_id: %s", c$id); + print fmt(" encap: %s", c$tunnel); + } + else + { + print "new_connection: no tunnel"; + } + } + +event tunnel_changed(c: connection, e: encapsulating_conns) + { + print "tunnel_changed:"; + print fmt(" conn_id: %s", c$id); + if ( c?$tunnel ) + print fmt(" old: %s", c$tunnel); + print fmt(" new: %s", e); + } From 5ce00bda8a5ee2233e25b3d2a445527e4de31b2e Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 23 Apr 2012 13:24:02 -0500 Subject: [PATCH 249/651] Rename TunnelHandler.{cc,h} to Tunnels.{cc,h}. --- src/CMakeLists.txt | 2 +- src/Conn.cc | 2 +- src/Conn.h | 2 +- src/Sessions.cc | 2 +- src/{TunnelHandler.cc => Tunnels.cc} | 2 +- src/{TunnelHandler.h => Tunnels.h} | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) rename src/{TunnelHandler.cc => Tunnels.cc} (97%) rename src/{TunnelHandler.h => Tunnels.h} (98%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a755fde64e..503bd0298e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -394,7 +394,7 @@ set(bro_SRCS Timer.cc Traverse.cc Trigger.cc - TunnelHandler.cc + Tunnels.cc Type.cc UDP.cc Val.cc diff --git a/src/Conn.cc b/src/Conn.cc index 80c026e781..55cfb3b3cb 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -13,7 +13,7 @@ #include "Timer.h" #include "PIA.h" #include "binpac.h" -#include "TunnelHandler.h" +#include "Tunnels.h" void ConnectionTimer::Init(Connection* arg_conn, timer_func arg_timer, int arg_do_expire) diff --git a/src/Conn.h b/src/Conn.h index 9cdb746b7c..67300601ce 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -13,7 +13,7 @@ #include "RuleMatcher.h" #include "AnalyzerTags.h" #include "IPAddr.h" -#include "TunnelHandler.h" +#include "Tunnels.h" class Connection; class ConnectionTimer; diff --git a/src/Sessions.cc b/src/Sessions.cc index 769bd68f52..326306c2fd 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -30,7 +30,7 @@ #include "DPM.h" #include "PacketSort.h" -#include "TunnelHandler.h" +#include "Tunnels.h" // These represent NetBIOS services on ephemeral ports. They're numbered // so that we can use a single int to hold either an actual TCP/UDP server diff --git a/src/TunnelHandler.cc b/src/Tunnels.cc similarity index 97% rename from src/TunnelHandler.cc rename to src/Tunnels.cc index 4923b36f3d..950e94628e 100644 --- a/src/TunnelHandler.cc +++ b/src/Tunnels.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "TunnelHandler.h" +#include "Tunnels.h" RecordVal* EncapsulatingConn::GetRecordVal() const { diff --git a/src/TunnelHandler.h b/src/Tunnels.h similarity index 98% rename from src/TunnelHandler.h rename to src/Tunnels.h index 21e491cdc9..d4dfee8f97 100644 --- a/src/TunnelHandler.h +++ b/src/Tunnels.h @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef tunnelhandler_h -#define tunnelhandler_h +#ifndef TUNNELS_H +#define TUNNELS_H #include "config.h" #include "NetVar.h" From 2a79fe95ec1b7514382e0de8b94fb0f5c4ea2af1 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 24 Apr 2012 01:05:35 -0400 Subject: [PATCH 250/651] Another tunneling checkpoint. - AYIYA works. - AYIYA analyzed connections are still labelled wrong in conn.log (logged as syslog) - Some clean up for left over code. - Small refactoring to pass packets back from analyzers to core. - $uid is now optional in conn logs since ip-in-ip tunnel parent's won't have an actual connection. --- scripts/base/frameworks/tunnels/main.bro | 2 +- scripts/base/init-bare.bro | 16 +----- scripts/base/protocols/conn/main.bro | 12 +++++ src/AYIYA.cc | 63 ------------------------ src/AYIYA.h | 27 +--------- src/Analyzer.cc | 3 -- src/AnalyzerTags.h | 1 - src/Conn.h | 4 +- src/Sessions.h | 8 +-- src/ayiya-analyzer.pac | 33 +++++++++++-- src/const.bif | 3 -- src/types.bif | 3 +- 12 files changed, 53 insertions(+), 122 deletions(-) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 987939eb6e..952f382e56 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -10,7 +10,7 @@ export { type Info: record { ts: time &log; - uid: string &log; + uid: string &log &optional; id: conn_id &log; action: Action &log; tunnel_type: string &log; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 88f0910d1c..45ec94baa8 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2631,21 +2631,7 @@ module Tunnel; export { ## Whether to decapsulate IP tunnels (IPinIP, 6in4, 6to4) const decapsulate_ip = T &redef; - - ## Whether to decapsulate UDP tunnels (e.g., Teredo, IPv4 in UDP) - const decapsulate_udp = F &redef; - - ## If decapsulating UDP: the set of ports for which to do so. - ## Can be overridden by :bro:id:`Tunnel::udp_tunnel_allports` - const udp_tunnel_ports: set[port] = { - 3544/udp, # Teredo - 5072/udp, # AYIAY - } &redef; - - ## If udp_tunnel_allports is T :bro:id:`udp_tunnel_ports` is ignored and we - ## check every UDP packet for tunnels. - const udp_tunnel_allports = F &redef; - + ## The maximum depth of a tunnel to decapsulate until giving up. const max_depth: count = 2 &redef; } # end export diff --git a/scripts/base/protocols/conn/main.bro b/scripts/base/protocols/conn/main.bro index 34ec12fa56..e4dbb28790 100644 --- a/scripts/base/protocols/conn/main.bro +++ b/scripts/base/protocols/conn/main.bro @@ -101,6 +101,9 @@ export { resp_pkts: count &log &optional; ## Number IP level bytes the responder sent. See ``orig_pkts``. resp_ip_bytes: count &log &optional; + ## If this connection was over a tunnel, indicate the + ## `uid` value for the parent connection or connections. + parents: vector of string &log &optional; }; ## Event that can be handled to access the :bro:type:`Conn::Info` @@ -190,6 +193,15 @@ function set_conn(c: connection, eoc: bool) c$conn$ts=c$start_time; c$conn$uid=c$uid; c$conn$id=c$id; + if ( ! c$conn?$parents && c?$tunnel ) + { + c$conn$parents = vector(); + for ( i in c$tunnel ) + { + # TODO: maybe we should be storing uid's in the $tunnel field? + #c$conn$parents[|c$conn$parents|] = lookup_connection(c$tunnel[i]$cid)$uid; + } + } c$conn$proto=get_port_transport_proto(c$id$resp_p); if( |Site::local_nets| > 0 ) c$conn$local_orig=Site::is_local_addr(c$id$orig_h); diff --git a/src/AYIYA.cc b/src/AYIYA.cc index d69db642b3..c70af87fa8 100644 --- a/src/AYIYA.cc +++ b/src/AYIYA.cc @@ -1,13 +1,10 @@ #include "AYIYA.h" -#include "TCP_Reassembler.h" AYIYA_Analyzer::AYIYA_Analyzer(Connection* conn) : Analyzer(AnalyzerTag::SYSLOG_BINPAC, conn) { interp = new binpac::AYIYA::AYIYA_Conn(this); did_session_done = 0; - //ADD_ANALYZER_TIMER(&AYIYA_Analyzer::ExpireTimer, - // network_time + Syslog_session_timeout, 1, TIMER_Syslog_EXPIRE); } AYIYA_Analyzer::~AYIYA_Analyzer() @@ -28,63 +25,3 @@ void AYIYA_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, int s Analyzer::DeliverPacket(len, data, orig, seq, ip, caplen); interp->NewData(orig, data, data + len); } - -//void AYIYA_Analyzer::ExpireTimer(double t) -// { -// // The - 1.0 in the following is to allow 1 second for the -// // common case of a single request followed by a single reply, -// // so we don't needlessly set the timer twice in that case. -// if ( t - Conn()->LastTime() >= Syslog_session_timeout - 1.0 || terminating ) -// { -// Event(connection_timeout); -// sessions->Remove(Conn()); -// } -// else -// ADD_ANALYZER_TIMER(&AYIYA_Analyzer::ExpireTimer, -// t + Syslog_session_timeout, 1, TIMER_Syslog_EXPIRE); -// } - -//Syslog_TCP_Analyzer_binpac::Syslog_TCP_Analyzer_binpac(Connection* conn) -//: TCP_ApplicationAnalyzer(AnalyzerTag::Syslog_TCP_BINPAC, conn) -// { -// interp = new binpac::Syslog_on_TCP::Syslog_TCP_Conn(this); -// } - -//Syslog_TCP_Analyzer_binpac::~Syslog_TCP_Analyzer_binpac() -// { -// delete interp; -// } - -//void Syslog_TCP_Analyzer_binpac::Done() -// { -// TCP_ApplicationAnalyzer::Done(); -// -// interp->FlowEOF(true); -// interp->FlowEOF(false); -// } - -//void Syslog_TCP_Analyzer_binpac::EndpointEOF(TCP_Reassembler* endp) -// { -// TCP_ApplicationAnalyzer::EndpointEOF(endp); -// interp->FlowEOF(endp->IsOrig()); -// } - -//void Syslog_TCP_Analyzer_binpac::DeliverStream(int len, const u_char* data, -// bool orig) -// { -// TCP_ApplicationAnalyzer::DeliverStream(len, data, orig); -// -// assert(TCP()); -// -// if ( TCP()->IsPartial() || TCP()->HadGap(orig) ) -// // punt-on-partial or stop-on-gap. -// return; -// -// interp->NewData(orig, data, data + len); -// } - -//void Syslog_TCP_Analyzer_binpac::Undelivered(int seq, int len, bool orig) -// { -// TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); -// interp->NewGap(orig, len); -// } diff --git a/src/AYIYA.h b/src/AYIYA.h index 294eeca1ea..f7c67ced5b 100644 --- a/src/AYIYA.h +++ b/src/AYIYA.h @@ -1,9 +1,6 @@ #ifndef AYIYA_h #define AYIYA_h -#include "UDP.h" -#include "TCP.h" - #include "ayiya_pac.h" class AYIYA_Analyzer : public Analyzer { @@ -19,7 +16,7 @@ public: { return new AYIYA_Analyzer(conn); } static bool Available() - { return true; } + { return BifConst::Tunnel::decapsulate_ip; } protected: friend class AnalyzerTimer; @@ -30,26 +27,4 @@ protected: binpac::AYIYA::AYIYA_Conn* interp; }; -// #include "Syslog_tcp_pac.h" -// -//class Syslog_TCP_Analyzer_binpac : public TCP_ApplicationAnalyzer { -//public: -// Syslog_TCP_Analyzer_binpac(Connection* conn); -// virtual ~Syslog_TCP_Analyzer_binpac(); -// -// virtual void Done(); -// virtual void DeliverStream(int len, const u_char* data, bool orig); -// virtual void Undelivered(int seq, int len, bool orig); -// virtual void EndpointEOF(TCP_Reassembler* endp); -// -// static Analyzer* InstantiateAnalyzer(Connection* conn) -// { return new Syslog_TCP_Analyzer_binpac(conn); } -// -// static bool Available() -// { return (Syslog_request || Syslog_full_request) && FLAGS_use_binpac; } -// -//protected: -// binpac::Syslog_on_TCP::Syslog_TCP_Conn* interp; -//}; -// #endif diff --git a/src/Analyzer.cc b/src/Analyzer.cc index f731b36a70..f9570d707f 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -129,9 +129,6 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { Syslog_Analyzer_binpac::InstantiateAnalyzer, Syslog_Analyzer_binpac::Available, 0, false }, - //{ AnalyzerTag::6to4, "6to4", - // 6to4_Analyzer::InstantiateAnalyzer, - // 6to4_Anylzer::Available, 0, false }, { AnalyzerTag::AYIYA, "AYIYA", AYIYA_Analyzer::InstantiateAnalyzer, AYIYA_Analyzer::Available, 0, false }, diff --git a/src/AnalyzerTags.h b/src/AnalyzerTags.h index 1b65d5219e..05de68f2b3 100644 --- a/src/AnalyzerTags.h +++ b/src/AnalyzerTags.h @@ -34,7 +34,6 @@ namespace AnalyzerTag { HTTP_BINPAC, SSL, SYSLOG_BINPAC, // Decapsulation Analyzers - //6to4, AYIYA, SOCKS, //Teredo, diff --git a/src/Conn.h b/src/Conn.h index 67300601ce..7f998c9d9a 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -245,7 +245,9 @@ public: void SetTransport(TransportProto arg_proto) { proto = arg_proto; } void SetUID(uint64 arg_uid) { uid = arg_uid; } - + + Encapsulation* GetEncapsulation() { return &encapsulation; } + protected: Connection() { persistent = 0; } diff --git a/src/Sessions.h b/src/Sessions.h index e1afbeec5a..fb76d29831 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -131,6 +131,10 @@ public: return tcp_conns.Length() + udp_conns.Length() + icmp_conns.Length(); } + + void DoNextPacket(double t, const struct pcap_pkthdr* hdr, + const IP_Hdr* ip_hdr, const u_char* const pkt, + int hdr_size, Encapsulation& encapsulation); unsigned int ConnectionMemoryUsage(); unsigned int ConnectionMemoryUsageConnVals(); @@ -174,10 +178,6 @@ protected: const u_char* const pkt, int hdr_size, PacketSortElement* pkt_elem); - void DoNextPacket(double t, const struct pcap_pkthdr* hdr, - const IP_Hdr* ip_hdr, const u_char* const pkt, - int hdr_size, Encapsulation& encapsulation); - void NextPacketSecondary(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size, const PktSrc* src_ps); diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 888cc575a5..3834ccc3b6 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -11,10 +11,37 @@ flow AYIYA_Flow function process_ayiya(pdu: PDU): bool %{ - connection()->bro_analyzer()->ProtocolConfirmation(); + Connection *c = connection()->bro_analyzer()->Conn(); - // Not sure what to do here. - printf("packet: %s\n", ${pdu.packet}.data()); + if ( c->GetEncapsulation()->Depth() >= BifConst::Tunnel::max_depth ) + { + reporter->Weird(c->OrigAddr(), c->RespAddr(), "tunnel_depth"); + // TODO: this should stop this analyzer instance + return false; + } + + IP_Hdr* inner_ip; + if ( ${pdu.next_header} == IPPROTO_IPV6 ) + inner_ip = new IP_Hdr((const struct ip6_hdr*) ${pdu.packet}.data(), false, ${pdu.packet}.length()); + else + inner_ip = new IP_Hdr((const struct ip*) ${pdu.packet}.data(), false); + + if ( inner_ip != 0) + connection()->bro_analyzer()->ProtocolConfirmation(); + else + connection()->bro_analyzer()->ProtocolViolation("ayiya_tunnel_non_ip"); + + struct pcap_pkthdr fake_hdr; + fake_hdr.caplen = fake_hdr.len = ${pdu.packet}.length(); + // Not sure what to do with this timestamp. + //fake_hdr.ts = network_time(); + + EncapsulatingConn ec(c->OrigAddr(), c->RespAddr(), BifEnum::Tunnel::AYIYA); + c->GetEncapsulation()->Add(ec); + + sessions->DoNextPacket(network_time(), &fake_hdr, inner_ip, ${pdu.packet}.data(), 0, *c->GetEncapsulation()); + + delete inner_ip; return true; %} diff --git a/src/const.bif b/src/const.bif index b622d52ff3..db5fbbd2cb 100644 --- a/src/const.bif +++ b/src/const.bif @@ -12,7 +12,4 @@ const NFS3::return_data_max: count; const NFS3::return_data_first_only: bool; const Tunnel::decapsulate_ip: bool; -const Tunnel::decapsulate_udp: bool; -const Tunnel::udp_tunnel_ports: any; -const Tunnel::udp_tunnel_allports: bool; const Tunnel::max_depth: count; diff --git a/src/types.bif b/src/types.bif index 60f8631a23..526198b2f5 100644 --- a/src/types.bif +++ b/src/types.bif @@ -178,8 +178,7 @@ enum Type %{ IP4_IN_IP6, IP6_IN_UDP, IP4_IN_UDP, - IP6_IN_AYIAY, - IP4_IN_AYIAY, + AYIYA, %} type EncapsulatingConn: record; From a4af69461066e4353c3a06ff0cb5a52e3dcd7745 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 24 Apr 2012 01:17:45 -0400 Subject: [PATCH 251/651] AYIYA analyzer ignores non-packet forwarding packets now. --- src/ayiya-analyzer.pac | 6 ++++++ src/ayiya-protocol.pac | 18 ++++++++++-------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 3834ccc3b6..763077c6a8 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -20,6 +20,12 @@ flow AYIYA_Flow return false; } + if ( ${pdu.op} != 1 ) + { + // 1 is the "forward" command. + return false; + } + IP_Hdr* inner_ip; if ( ${pdu.next_header} == IPPROTO_IPV6 ) inner_ip = new IP_Hdr((const struct ip6_hdr*) ${pdu.packet}.data(), false, ${pdu.packet}.length()); diff --git a/src/ayiya-protocol.pac b/src/ayiya-protocol.pac index 25aca23fb9..7801708c7d 100644 --- a/src/ayiya-protocol.pac +++ b/src/ayiya-protocol.pac @@ -1,14 +1,16 @@ type PDU = record { - identity_byte: uint8; - signature_byte: uint8; - auth_and_op_crap: uint8; - next_header: uint8; - epoch: uint32; - identity: bytestring &length=identity_len; - signature: bytestring &length=signature_len; - packet: bytestring &restofdata; + identity_byte: uint8; + signature_byte: uint8; + auth_and_op: uint8; + next_header: uint8; + epoch: uint32; + identity: bytestring &length=identity_len; + signature: bytestring &length=signature_len; + packet: bytestring &restofdata; } &let { identity_len = (1 << (identity_byte >> 4)); signature_len = (signature_byte >> 4) * 4; + auth = auth_and_op >> 4; + op = auth_and_op & 0xF; } &byteorder = littleendian; \ No newline at end of file From 85bb5deb9207395273598aad6840b8eb4bf93d7b Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 24 Apr 2012 11:40:05 -0500 Subject: [PATCH 252/651] Fix AYIYA analyzer from modifying parent connection's encapsulation. --- src/Conn.h | 3 ++- src/ayiya-analyzer.pac | 11 +++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/Conn.h b/src/Conn.h index 7f998c9d9a..9abc75ff68 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -246,7 +246,8 @@ public: void SetUID(uint64 arg_uid) { uid = arg_uid; } - Encapsulation* GetEncapsulation() { return &encapsulation; } + const Encapsulation& GetEncapsulation() const + { return encapsulation; } protected: diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 763077c6a8..0530207c20 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -13,7 +13,7 @@ flow AYIYA_Flow %{ Connection *c = connection()->bro_analyzer()->Conn(); - if ( c->GetEncapsulation()->Depth() >= BifConst::Tunnel::max_depth ) + if ( c->GetEncapsulation().Depth() >= BifConst::Tunnel::max_depth ) { reporter->Weird(c->OrigAddr(), c->RespAddr(), "tunnel_depth"); // TODO: this should stop this analyzer instance @@ -42,10 +42,13 @@ flow AYIYA_Flow // Not sure what to do with this timestamp. //fake_hdr.ts = network_time(); - EncapsulatingConn ec(c->OrigAddr(), c->RespAddr(), BifEnum::Tunnel::AYIYA); - c->GetEncapsulation()->Add(ec); + Encapsulation encap(c->GetEncapsulation()); + EncapsulatingConn ec(c->OrigAddr(), c->RespAddr(), + c->OrigPort(), c->RespPort(), + BifEnum::Tunnel::AYIYA); + encap.Add(ec); - sessions->DoNextPacket(network_time(), &fake_hdr, inner_ip, ${pdu.packet}.data(), 0, *c->GetEncapsulation()); + sessions->DoNextPacket(network_time(), &fake_hdr, inner_ip, ${pdu.packet}.data(), 0, encap); delete inner_ip; return true; From 4d86f38be0d76ef812fb5a40d66984a75227dfbf Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 24 Apr 2012 14:12:42 -0500 Subject: [PATCH 253/651] Remove invalid IP-in-IP encapsulated protocol value. --- src/Sessions.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Sessions.cc b/src/Sessions.cc index 326306c2fd..dbf6f1c22a 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -522,7 +522,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, break; } - case IPPROTO_IP: case IPPROTO_IPV4: case IPPROTO_IPV6: { @@ -674,7 +673,6 @@ bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, case IPPROTO_UDP: min_hdr_len = sizeof(struct udphdr); break; - case IPPROTO_IP: case IPPROTO_IPV4: min_hdr_len = sizeof(struct ip); break; From bd01525a86b9bfbbc0dc5475439b7acfdb09ae10 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 24 Apr 2012 14:25:47 -0500 Subject: [PATCH 254/651] Remove Tunnel::decapsulate_ip option. Setting Tunnel::max_depth to zero effectively disables tunnel decapsulation. --- scripts/base/init-bare.bro | 4 +--- src/AYIYA.h | 2 +- src/Sessions.cc | 7 ------- src/const.bif | 1 - 4 files changed, 2 insertions(+), 12 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 45ec94baa8..a04063b55a 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2629,10 +2629,8 @@ const ignore_keep_alive_rexmit = F &redef; module Tunnel; export { - ## Whether to decapsulate IP tunnels (IPinIP, 6in4, 6to4) - const decapsulate_ip = T &redef; - ## The maximum depth of a tunnel to decapsulate until giving up. + ## Setting this to zero will disable tunnel decapsulation. const max_depth: count = 2 &redef; } # end export module GLOBAL; diff --git a/src/AYIYA.h b/src/AYIYA.h index f7c67ced5b..bf1fb0bf2c 100644 --- a/src/AYIYA.h +++ b/src/AYIYA.h @@ -16,7 +16,7 @@ public: { return new AYIYA_Analyzer(conn); } static bool Available() - { return BifConst::Tunnel::decapsulate_ip; } + { return BifConst::Tunnel::max_depth > 0; } protected: friend class AnalyzerTimer; diff --git a/src/Sessions.cc b/src/Sessions.cc index dbf6f1c22a..cb2b5633ca 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -525,13 +525,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, case IPPROTO_IPV4: case IPPROTO_IPV6: { - if ( ! BifConst::Tunnel::decapsulate_ip ) - { - reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "ip_tunnel"); - Remove(f); - return; - } - if ( encapsulation.Depth() >= BifConst::Tunnel::max_depth ) { reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "tunnel_depth"); diff --git a/src/const.bif b/src/const.bif index db5fbbd2cb..09ce769261 100644 --- a/src/const.bif +++ b/src/const.bif @@ -11,5 +11,4 @@ const NFS3::return_data: bool; const NFS3::return_data_max: count; const NFS3::return_data_first_only: bool; -const Tunnel::decapsulate_ip: bool; const Tunnel::max_depth: count; From c10ff6fd69dc0c912f5137d205be0490d1f8fa1b Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 24 Apr 2012 16:58:03 -0400 Subject: [PATCH 255/651] Add some extra TLS extension values. - extended_random is an expired draft rfc, but we see it in live traffic. - http://tools.ietf.org/html/draft-rescorla-tls-extended-random-01 - heartbeat RFC was ratified in Feb. 2012. - http://tools.ietf.org/html/rfc6520 --- scripts/base/protocols/ssl/consts.bro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro index ab130c4318..6c33e6e438 100644 --- a/scripts/base/protocols/ssl/consts.bro +++ b/scripts/base/protocols/ssl/consts.bro @@ -77,7 +77,9 @@ export { [12] = "srp", [13] = "signature_algorithms", [14] = "use_srtp", + [15] = "heartbeat", [35] = "SessionTicket TLS", + [40] = "extended_random", [13172] = "next_protocol_negotiation", [65281] = "renegotiation_info" } &default=function(i: count):string { return fmt("unknown-%d", i); }; From 2235647ab7af0aafcf1ee8c54000e4485dee3ab1 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 24 Apr 2012 17:30:37 -0400 Subject: [PATCH 256/651] Some improvements to the AYIYA analyzer. - Reenabled AYIYA dpd sigs. --- scripts/base/frameworks/dpd/dpd.sig | 10 +++++----- src/ayiya-analyzer.pac | 16 +++++++++------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig index f5d3651104..b1fb9e6f19 100644 --- a/scripts/base/frameworks/dpd/dpd.sig +++ b/scripts/base/frameworks/dpd/dpd.sig @@ -150,11 +150,11 @@ signature dpd_ssl_client { tcp-state originator } -#signature dpd_ayiya { -# ip-proto = udp -# payload /^..\x11\x29/ -# enable "ayiya" -#} +signature dpd_ayiya { + ip-proto = udp + payload /^..\x11\x29/ + enable "ayiya" +} signature dpd_socks_client { ip-proto == tcp diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 0530207c20..856d22c417 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -15,8 +15,7 @@ flow AYIYA_Flow if ( c->GetEncapsulation().Depth() >= BifConst::Tunnel::max_depth ) { - reporter->Weird(c->OrigAddr(), c->RespAddr(), "tunnel_depth"); - // TODO: this should stop this analyzer instance + reporter->Weird(c, "tunnel_depth"); return false; } @@ -29,13 +28,16 @@ flow AYIYA_Flow IP_Hdr* inner_ip; if ( ${pdu.next_header} == IPPROTO_IPV6 ) inner_ip = new IP_Hdr((const struct ip6_hdr*) ${pdu.packet}.data(), false, ${pdu.packet}.length()); - else + else if ( ${pdu.next_header} == IPPROTO_IPV4 ) inner_ip = new IP_Hdr((const struct ip*) ${pdu.packet}.data(), false); - - if ( inner_ip != 0) - connection()->bro_analyzer()->ProtocolConfirmation(); else - connection()->bro_analyzer()->ProtocolViolation("ayiya_tunnel_non_ip"); + { + reporter->Weird(c, "ayiya_tunnel_non_ip"); + return false; + } + + if ( inner_ip != 0 ) + connection()->bro_analyzer()->ProtocolConfirmation(); struct pcap_pkthdr fake_hdr; fake_hdr.caplen = fake_hdr.len = ${pdu.packet}.length(); From bdbb6d8068f70b03b8839c608587d97ff469a4e5 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Apr 2012 14:52:09 -0700 Subject: [PATCH 257/651] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- aux/broctl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/broccoli b/aux/broccoli index bead1168ae..827a2e76a5 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit bead1168ae9c2d2ae216dd58522fbc05498ff2c8 +Subproject commit 827a2e76a527f17e15faf3be5eb8849f1045e887 diff --git a/aux/broctl b/aux/broctl index 19d7956c89..55c317607b 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 19d7956c89ddd8d74d2759dee8cf46983fed3c9b +Subproject commit 55c317607bf89753ddd790c9350556a7ca46578e From 8766a2e2fc5fa4636ac5127d313ff215660194ef Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Apr 2012 15:04:39 -0700 Subject: [PATCH 258/651] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- aux/broctl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/broccoli b/aux/broccoli index 827a2e76a5..55f368b0ad 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 827a2e76a527f17e15faf3be5eb8849f1045e887 +Subproject commit 55f368b0ad283b2e7d68ef72922b5d9683e2a880 diff --git a/aux/broctl b/aux/broctl index 55c317607b..ff35c3c144 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 55c317607bf89753ddd790c9350556a7ca46578e +Subproject commit ff35c3c144885902c898bf8b47e351c7b8d55e10 From c91563fe7590d88e1609609668b71a070ed00768 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Apr 2012 17:57:05 -0700 Subject: [PATCH 259/651] DataSeries tuning. - Now using the new DS interface from git to remove warning. - New leak tests, not yet tried, --- doc/logging-dataseries.rst | 9 +++-- src/logging/writers/DataSeries.cc | 8 +---- src/logging/writers/DataSeries.h | 2 +- .../btest/core/leaks/dataseries-rotate.bro | 34 +++++++++++++++++++ testing/btest/core/leaks/dataseries.bro | 9 +++++ 5 files changed, 52 insertions(+), 10 deletions(-) create mode 100644 testing/btest/core/leaks/dataseries-rotate.bro create mode 100644 testing/btest/core/leaks/dataseries.bro diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index 6eef223a90..67f95ecf3b 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -24,8 +24,8 @@ distributed on `HP Labs' web site to use recent developments of both packages with Bro, which you can download from github like this:: - git clone http://github.com/eric-anderson/Lintel - git clone http://github.com/eric-anderson/DataSeries + git clone http://github.com/dataseries/Lintel + git clone http://github.com/dataseries/DataSeries To then build and install the two into ````, do:: @@ -109,8 +109,13 @@ TODO Warning, while packing field not_valid_after of record 11, error was > 10%: (1346460000 / 1000000 = 1346.46, round() = 1346) + See Eric's mail. + * For testing our script-level options: - Can we get the extentsize from a ``.ds`` file? - Can we get the compressio level from a ``.ds`` file? + See Eric's mail. + +* Do we have a leak? diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index aacef01f80..a3d193be97 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -329,13 +329,7 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con else Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); - const ExtentType& type = log_types.registerTypeR(schema); - - // Note: This is a bit dicey as it depends on the implementation of - // registerTypeR(), but its what the DataSeries guys recommended - // given that we function we originally used has been deprecated. - log_type = &type; - + log_type = log_types.registerTypePtr(schema); log_series.setType(*log_type); return OpenLog(path); diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index ab2bcec88c..0d9ab67e95 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -100,7 +100,7 @@ private: // Internal DataSeries structures we need to keep track of. vector schema_list; ExtentTypeLibrary log_types; - const ExtentType *log_type; + ExtentType::Ptr log_type; ExtentSeries log_series; ExtentMap extents; int compress_type; diff --git a/testing/btest/core/leaks/dataseries-rotate.bro b/testing/btest/core/leaks/dataseries-rotate.bro new file mode 100644 index 0000000000..188de9717b --- /dev/null +++ b/testing/btest/core/leaks/dataseries-rotate.bro @@ -0,0 +1,34 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r %DIR/../rotation.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef Log::default_rotation_postprocessor_cmd = "echo"; + +event bro_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); +} + +event new_connection(c: connection) + { + Log::write(Test::LOG, [$t=network_time(), $id=c$id]); + } diff --git a/testing/btest/core/leaks/dataseries.bro b/testing/btest/core/leaks/dataseries.bro new file mode 100644 index 0000000000..886ee54dd9 --- /dev/null +++ b/testing/btest/core/leaks/dataseries.bro @@ -0,0 +1,9 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES From 8c14b5a911edff7b1ad8dfe1b33fd2c6766aec6d Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 25 Apr 2012 14:38:11 -0400 Subject: [PATCH 260/651] Added Carrier Grade NAT CIDR and link local IPv6 to "private_address_space" --- scripts/base/utils/site.bro | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/base/utils/site.bro b/scripts/base/utils/site.bro index 4aeb70fe3f..e8db91f3d1 100644 --- a/scripts/base/utils/site.bro +++ b/scripts/base/utils/site.bro @@ -10,8 +10,10 @@ export { const private_address_space: set[subnet] = { 10.0.0.0/8, 192.168.0.0/16, + 172.16.0.0/12, + 100.64.0.0/10, # RFC6598 Carrier Grade NAT 127.0.0.0/8, - 172.16.0.0/12 + [fe80::]/16, } &redef; ## Networks that are considered "local". From c561a44326f696826011f5212501ca09251856fc Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 26 Apr 2012 10:45:28 -0400 Subject: [PATCH 261/651] Fixed a problem where cluster workers were still processing notices in some cases. --- scripts/base/frameworks/notice/cluster.bro | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/base/frameworks/notice/cluster.bro b/scripts/base/frameworks/notice/cluster.bro index 281901cf31..087c3ead51 100644 --- a/scripts/base/frameworks/notice/cluster.bro +++ b/scripts/base/frameworks/notice/cluster.bro @@ -23,7 +23,10 @@ redef Cluster::worker2manager_events += /Notice::cluster_notice/; @if ( Cluster::local_node_type() != Cluster::MANAGER ) # The notice policy is completely handled by the manager and shouldn't be # done by workers or proxies to save time for packet processing. -redef policy = {}; +event bro_init() &priority=-11 + { + Notice::policy = table(); + } event Notice::begin_suppression(n: Notice::Info) { From b8e1604ab571bd84a598fb57f6ad450731a64a56 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 26 Apr 2012 12:29:59 -0500 Subject: [PATCH 262/651] Make tunnels always identifiable by UID, tunnel.log now gets populated. conn.log now sets a field indicating all the parent tunnel UIDs over which a connection operated and cross reference the UIDs found in the tunnel.log. Also some renaming of tunnel related types at the scripting layer. --- doc/scripts/DocSourcesList.cmake | 3 +- scripts/base/frameworks/tunnels/main.bro | 134 ++++++++++++++---- scripts/base/init-bare.bro | 7 +- scripts/base/protocols/conn/main.bro | 24 ++-- scripts/base/protocols/socks/main.bro | 2 +- scripts/policy/frameworks/tunnel.bro | 83 ----------- src/Conn.cc | 10 -- src/Conn.h | 12 +- src/Sessions.cc | 6 +- src/Tunnels.cc | 32 ++++- src/Tunnels.h | 27 ++-- src/ayiya-analyzer.pac | 10 +- src/event.bif | 2 +- src/types.bif | 9 +- .../Baseline/core.print-bpf-filters/conn.log | 6 +- .../Baseline/core.print-bpf-filters/output | 8 +- .../Baseline/core.tunnels.ip-in-ip/output | 16 +-- .../btest/Baseline/core.vlan-mpls/conn.log | 10 +- .../canonified_loaded_scripts.log | 4 + .../conn.log | 14 +- .../conn.log | 16 +-- testing/btest/core/tunnels/ip-in-ip.test | 2 +- 22 files changed, 224 insertions(+), 213 deletions(-) delete mode 100644 scripts/policy/frameworks/tunnel.bro diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index e6ed45502a..a35d6894d1 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -53,6 +53,7 @@ rest_target(${psd} base/frameworks/packet-filter/netstats.bro) rest_target(${psd} base/frameworks/reporter/main.bro) rest_target(${psd} base/frameworks/signatures/main.bro) rest_target(${psd} base/frameworks/software/main.bro) +rest_target(${psd} base/frameworks/tunnels/main.bro) rest_target(${psd} base/protocols/conn/contents.bro) rest_target(${psd} base/protocols/conn/inactivity.bro) rest_target(${psd} base/protocols/conn/main.bro) @@ -71,6 +72,7 @@ rest_target(${psd} base/protocols/irc/main.bro) rest_target(${psd} base/protocols/smtp/entities-excerpt.bro) rest_target(${psd} base/protocols/smtp/entities.bro) rest_target(${psd} base/protocols/smtp/main.bro) +rest_target(${psd} base/protocols/socks/main.bro) rest_target(${psd} base/protocols/ssh/main.bro) rest_target(${psd} base/protocols/ssl/consts.bro) rest_target(${psd} base/protocols/ssl/main.bro) @@ -97,7 +99,6 @@ rest_target(${psd} policy/frameworks/metrics/http-example.bro) rest_target(${psd} policy/frameworks/metrics/ssl-example.bro) rest_target(${psd} policy/frameworks/software/version-changes.bro) rest_target(${psd} policy/frameworks/software/vulnerable.bro) -rest_target(${psd} policy/frameworks/tunnel.bro) rest_target(${psd} policy/integration/barnyard2/main.bro) rest_target(${psd} policy/integration/barnyard2/types.bro) rest_target(${psd} policy/misc/analysis-groups.bro) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 952f382e56..df978d9510 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -1,53 +1,127 @@ -module Tunnels; +module Tunnel; export { + ## The tunnel logging stream identifier. redef enum Log::ID += { LOG }; - + + ## Types of interesting activity that can occur with a tunnel. type Action: enum { + ## A new tunnel (encapsulating "connection") has been seen. DISCOVER, + ## A tunnel connection has closed. CLOSE, + ## No new connections over a tunnel happened in the past day. + EXPIRE, }; - + + ## The record type which contains column fields of the tunnel log. type Info: record { - ts: time &log; - uid: string &log &optional; - id: conn_id &log; - action: Action &log; - tunnel_type: string &log; - user: string &log &optional; + ## Time at which some tunnel activity occurred. + ts: time &log; + ## The unique identifier for the tunnel, which may correspond + ## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels. + uid: string &log &optional; + ## The tunnel "connection" 4-tuple of endpoint addresses/ports. + ## For an IP tunnel, the ports will be 0. + id: conn_id &log; + ## The type of activity that occurred. + action: Action &log; + ## The type of tunnel. + tunnel_type: Tunnel::Type &log; + user: string &log &optional; }; - - global register: function(c: connection, tunnel_type: string); - - global active: table[conn_id] of Tunnels::Info = table(); + + ## Logs all tunnels in an ecapsulation chain with action + ## :bro:see:`Tunnel::DISCOVER` that aren't already in the + ## :bro:id:`Tunnel::active` table and adds them if not. + global register_all: function(ecv: EncapsulatingConnVector); + + ## Logs a single tunnel "connection" with action + ## :bro:see:`Tunnel::DISCOVER` if it's not already in the + ## :bro:id:`Tunnel::active` table and adds it if not. + global register: function(ec: EncapsulatingConn); + + ## Logs a single tunnel "connection" with action + ## :bro:see:`Tunnel::EXPIRE` and removes it from the + ## :bro:id:`Tunnel::active` table. + ## + ## t: A table of tunnels. + ## + ## idx: The index of the tunnel table corresponding to the tunnel to expire. + ## + ## Returns: 0secs, which when this function is used as an + ## :bro:attr:`&expire_func`, indicates to remove the element at + ## *idx* immediately. + global expire: function(t: table[conn_id] of Info, idx: conn_id): interval; + + ## Removes a single tunnel from the :bro:id:`Tunnel::active` table + ## and logs the closing/expiration of the tunnel. + ## + ## tunnel: The tunnel which has closed or expired. + ## + ## action: The specific reason for the tunnel ending. + global close: function(tunnel: Info, action: Action); + + ## Currently active tunnels. That is, tunnels for which new, encapsulated + ## connections have been seen in the last day. + global active: table[conn_id] of Info = table() &synchronized &read_expire=24hrs &expire_func=expire; } event bro_init() &priority=5 { - Log::create_stream(Tunnels::LOG, [$columns=Info]); + Log::create_stream(Tunnel::LOG, [$columns=Info]); } -function register(c: connection, tunnel_type: string) +function register_all(ecv: EncapsulatingConnVector) { - local tunnel: Info; + for ( i in ecv ) + register(ecv[i]); + } + +function register(ec: EncapsulatingConn) + { + if ( ec$cid !in active ) + { + local tunnel: Info; + tunnel$ts = network_time(); + tunnel$uid = ec$uid; + tunnel$id = ec$cid; + tunnel$action = DISCOVER; + tunnel$tunnel_type = ec$tunnel_type; + active[ec$cid] = tunnel; + Log::write(LOG, tunnel); + } + } + +function close(tunnel: Info, action: Action) + { + tunnel$action = action; tunnel$ts = network_time(); - tunnel$uid = c$uid; - tunnel$id = c$id; - tunnel$action = DISCOVER; - tunnel$tunnel_type = tunnel_type; - - active[c$id] = tunnel; Log::write(LOG, tunnel); + delete active[tunnel$id]; + } + +function expire(t: table[conn_id] of Info, idx: conn_id): interval + { + close(t[idx], EXPIRE); + return 0secs; + } + +event new_connection(c: connection) &priority=5 + { + if ( c?$tunnel ) + register_all(c$tunnel); + } + +event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 + { + if ( c?$tunnel ) + register_all(c$tunnel); + register_all(e); } event connection_state_remove(c: connection) &priority=-5 { if ( c$id in active ) - { - local tunnel = active[c$id]; - tunnel$action=CLOSE; - Log::write(LOG, tunnel); - - delete active[c$id]; - } - } \ No newline at end of file + close(active[c$id], CLOSE); + } diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index a04063b55a..59075de439 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -188,6 +188,9 @@ export { cid: conn_id; ## The type of tunnel. tunnel_type: Tunnel::Type; + ## A globally unique identifier that, for non-IP-in-IP tunnels, + ## cross-references the *uid* field of :bro:type:`connection`. + uid: string &optional; } &log; } # end export module GLOBAL; @@ -198,7 +201,7 @@ module GLOBAL; ## .. todo:: We need this type definition only for declaring builtin functions ## via ``bifcl``. We should extend ``bifcl`` to understand composite types ## directly and then remove this alias. -type encapsulating_conns: vector of Tunnel::EncapsulatingConn; +type EncapsulatingConnVector: vector of Tunnel::EncapsulatingConn; ## Statistics about an endpoint. ## @@ -251,7 +254,7 @@ type connection: record { ## at index zero. It's also always the first such enapsulation seen ## for the connection unless the :bro:id:`tunnel_changed` event is handled ## and re-assigns this field to the new encapsulation. - tunnel: encapsulating_conns &optional; + tunnel: EncapsulatingConnVector &optional; }; ## Fields of a SYN packet. diff --git a/scripts/base/protocols/conn/main.bro b/scripts/base/protocols/conn/main.bro index e4dbb28790..03e2cfaf71 100644 --- a/scripts/base/protocols/conn/main.bro +++ b/scripts/base/protocols/conn/main.bro @@ -102,8 +102,9 @@ export { ## Number IP level bytes the responder sent. See ``orig_pkts``. resp_ip_bytes: count &log &optional; ## If this connection was over a tunnel, indicate the - ## `uid` value for the parent connection or connections. - parents: vector of string &log &optional; + ## *uid* values for any encapsulating parent connections + ## used over the lifetime of this inner connection. + parents: set[string] &log; }; ## Event that can be handled to access the :bro:type:`Conn::Info` @@ -193,15 +194,8 @@ function set_conn(c: connection, eoc: bool) c$conn$ts=c$start_time; c$conn$uid=c$uid; c$conn$id=c$id; - if ( ! c$conn?$parents && c?$tunnel ) - { - c$conn$parents = vector(); - for ( i in c$tunnel ) - { - # TODO: maybe we should be storing uid's in the $tunnel field? - #c$conn$parents[|c$conn$parents|] = lookup_connection(c$tunnel[i]$cid)$uid; - } - } + if ( c?$tunnel && |c$tunnel| > 0 ) + add c$conn$parents[c$tunnel[|c$tunnel|-1]$uid]; c$conn$proto=get_port_transport_proto(c$id$resp_p); if( |Site::local_nets| > 0 ) c$conn$local_orig=Site::is_local_addr(c$id$orig_h); @@ -239,6 +233,14 @@ event content_gap(c: connection, is_orig: bool, seq: count, length: count) &prio c$conn$missed_bytes = c$conn$missed_bytes + length; } + +event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 + { + set_conn(c, F); + if ( |e| > 0 ) + add c$conn$parents[e[|e|-1]$uid]; + c$tunnel = e; + } event connection_state_remove(c: connection) &priority=5 { diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro index 61f569d56c..bd27f4fb85 100644 --- a/scripts/base/protocols/socks/main.bro +++ b/scripts/base/protocols/socks/main.bro @@ -11,7 +11,7 @@ export { event socks_request(c: connection, request_type: count, dstaddr: addr, dstname: string, p: port, user: string) { - Tunnels::register(c, "SOCKS"); + Tunnel::register([$cid=c$id, $tunnel_type=Tunnel::SOCKS, $uid=c$uid]); } # diff --git a/scripts/policy/frameworks/tunnel.bro b/scripts/policy/frameworks/tunnel.bro deleted file mode 100644 index fb9bf2f3f6..0000000000 --- a/scripts/policy/frameworks/tunnel.bro +++ /dev/null @@ -1,83 +0,0 @@ -##! Handle tunneled connections. -##! -##! Bro can decapsulate IPinIP and IPinUDP tunnels, were "IP" can be either -##! IPv4 or IPv6. The most common case will be decapsulating Teredo, 6to4, -##! 6in4, and AYIAY. When this script is loaded, decapsulation will be -##! enabled. "tunnel.log" will log the "parent" for each tunneled -##! connection. The identity (and existence) of the tunnel connection -##! is otherwise lost. -##! -##! Currently handles: -##! -##! * IP6 in IP{4,6}. (IP4 in IP is easy to add, but omitted due to lack -##! of test cases. -##! * IP{4,6} in UDP. This decapsulates e.g., standard *Teredo* packets -##! (without authentication or origin indicator) -##! * IP{4,6} in AYIAY -##! * Only checks for UDP tunnels on Teredo's and AYIAY's default -##! ports. See :bro:id:`udp_tunnel_ports` and -##! :bro:id:`udp_tunnel_allports` -##! -##! Decapsulation happens early in a packets processing, right after IP -##! defragmentation but before there is a connection context. The tunnel -##! headers are stripped from packet and the identity of the parent is -##! is stored as the ``tunnel_parent`` member of :bro:type:`connection`, -##! which is of type :bro:type:`Tunnel::Parent`. -##! -##! *Limitation:* decapsulation happens only on the primary path, i.e. -##! it's not available for the secondary path. - -@load base/protocols/conn - -module Tunnel; - -redef Tunnel::decapsulate_ip = T; -redef Tunnel::decapsulate_udp = T; -redef Tunnel::udp_tunnel_allports = T; - -export { - ## The Tunnel logging stream identifier. - redef enum Log::ID += { LOG }; - - ## This record describing a tunneled connection will be logged. - type Info : record { - ## This is the time of the first record - ts: time &log; - ## The uid of the child connection, i.e. the connection in the tunnel - uid: string &log; - ## The connection id of the child - id: conn_id &log; - ## The child's transport protocol - proto: transport_proto &log; - ## The parent connection of IP-pair - parent: Parent &log; - }; - - ## Event that can be handled to access the :bro:type:`Tunnel::Info` - ## record as it is sent on to the logging framework. - global log_tunnel: event(rec: Info); - - redef record Conn::Info += { - ## If the connection is tunneled, the type of tunnel. - tunnel_type: Tunneltype &log &optional; - }; -} - -event bro_init() - { - Log::create_stream(Tunnel::LOG, [$columns=Info, $ev=log_tunnel]); - } - -event new_connection(c: connection) - { - if (c?$tunnel_parent) - { - local info: Info; - info$ts = c$start_time; - info$uid = c$uid; - info$id = c$id; - info$proto = get_port_transport_proto(c$id$resp_p); - info$parent = c$tunnel_parent; - Log::write(Tunnel::LOG, info); - } - } diff --git a/src/Conn.cc b/src/Conn.cc index 55cfb3b3cb..6333f98f3e 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -13,7 +13,6 @@ #include "Timer.h" #include "PIA.h" #include "binpac.h" -#include "Tunnels.h" void ConnectionTimer::Init(Connection* arg_conn, timer_func arg_timer, int arg_do_expire) @@ -192,15 +191,6 @@ Connection::~Connection() --external_connections; } -void Connection::CheckEncapsulation(const Encapsulation& arg_encap) - { - if ( encapsulation != arg_encap ) - { - Event(tunnel_changed, 0, arg_encap.GetVectorVal()); - encapsulation = arg_encap; - } - } - void Connection::Done() { finished = 1; diff --git a/src/Conn.h b/src/Conn.h index 9abc75ff68..d5622efe03 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -23,7 +23,6 @@ class RuleHdrTest; class Specific_RE_Matcher; class TransportLayerAnalyzer; class RuleEndpointState; -class TunnelParent; typedef enum { NUL_IN_LINE, @@ -56,7 +55,14 @@ public: const Encapsulation& arg_encap); virtual ~Connection(); - void CheckEncapsulation(const Encapsulation& arg_encap); + void CheckEncapsulation(const Encapsulation& arg_encap) + { + if ( encapsulation != arg_encap ) + { + Event(tunnel_changed, 0, arg_encap.GetVectorVal()); + encapsulation = arg_encap; + } + } // Invoked when connection is about to be removed. Use Ref(this) // inside Done to keep the connection object around (though it'll @@ -245,6 +251,8 @@ public: void SetTransport(TransportProto arg_proto) { proto = arg_proto; } void SetUID(uint64 arg_uid) { uid = arg_uid; } + + uint64 GetUID() const { return uid; } const Encapsulation& GetEncapsulation() const { return encapsulation; } diff --git a/src/Sessions.cc b/src/Sessions.cc index cb2b5633ca..fa6faba85c 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -543,11 +543,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, fake_hdr.ts = hdr->ts; EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), - ip_hdr->IP4_Hdr() ? - ( proto == IPPROTO_IPV6 ? - BifEnum::Tunnel::IP6_IN_IP4 : BifEnum::Tunnel::IP4_IN_IP4 ) : - ( proto == IPPROTO_IPV6 ? - BifEnum::Tunnel::IP6_IN_IP6 : BifEnum::Tunnel::IP4_IN_IP6 )); + BifEnum::Tunnel::IP); encapsulation.Add(ec); DoNextPacket(t, &fake_hdr, inner_ip, data, 0, encapsulation); diff --git a/src/Tunnels.cc b/src/Tunnels.cc index 950e94628e..7ae87912d7 100644 --- a/src/Tunnels.cc +++ b/src/Tunnels.cc @@ -1,6 +1,20 @@ // See the file "COPYING" in the main distribution directory for copyright. #include "Tunnels.h" +#include "util.h" +#include "Conn.h" + +EncapsulatingConn::EncapsulatingConn(Connection* c, BifEnum::Tunnel::Type t) + : src_addr(c->OrigAddr()), dst_addr(c->RespAddr()), + src_port(c->OrigPort()), dst_port(c->RespPort()), + type(t), uid(c->GetUID()) + { + if ( ! uid ) + { + uid = calculate_unique_id(); + c->SetUID(uid); + } + } RecordVal* EncapsulatingConn::GetRecordVal() const { @@ -8,15 +22,17 @@ RecordVal* EncapsulatingConn::GetRecordVal() const new RecordVal(BifType::Record::Tunnel::EncapsulatingConn); TransportProto tproto; switch ( type ) { - case BifEnum::Tunnel::IP6_IN_IP4: - case BifEnum::Tunnel::IP4_IN_IP4: - case BifEnum::Tunnel::IP6_IN_IP6: - case BifEnum::Tunnel::IP4_IN_IP6: - tproto = TRANSPORT_UNKNOWN; - break; - default: + case BifEnum::Tunnel::AYIYA: + case BifEnum::Tunnel::TEREDO: tproto = TRANSPORT_UDP; break; + case BifEnum::Tunnel::SOCKS: + tproto = TRANSPORT_TCP; + break; + case BifEnum::Tunnel::IP: + default: + tproto = TRANSPORT_UNKNOWN; + break; } // end switch RecordVal* id_val = new RecordVal(conn_id); @@ -26,6 +42,8 @@ RecordVal* EncapsulatingConn::GetRecordVal() const id_val->Assign(3, new PortVal(ntohs(dst_port), tproto)); rv->Assign(0, id_val); rv->Assign(1, new EnumVal(type, BifType::Enum::Tunnel::Type)); + char tmp[20]; + rv->Assign(2, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62))); return rv; } diff --git a/src/Tunnels.h b/src/Tunnels.h index d4dfee8f97..e6e3de6d76 100644 --- a/src/Tunnels.h +++ b/src/Tunnels.h @@ -9,25 +9,27 @@ #include "Val.h" #include +class Connection; + class EncapsulatingConn { public: - EncapsulatingConn() - : src_port(0), dst_port(0), type(BifEnum::Tunnel::NONE) {} - EncapsulatingConn(const IPAddr& s, const IPAddr& d, BifEnum::Tunnel::Type t) - : src_addr(s), dst_addr(d), src_port(0), dst_port(0), type(t) {} + : src_addr(s), dst_addr(d), src_port(0), dst_port(0), type(t) + { + uid = calculate_unique_id(); + } - EncapsulatingConn(const IPAddr& s, const IPAddr& d, uint16 sp, uint16 dp, - BifEnum::Tunnel::Type t) - : src_addr(s), dst_addr(d), src_port(sp), dst_port(dp), type(t) {} + EncapsulatingConn(Connection* c, BifEnum::Tunnel::Type t); EncapsulatingConn(const EncapsulatingConn& other) : src_addr(other.src_addr), dst_addr(other.dst_addr), src_port(other.src_port), dst_port(other.dst_port), - type(other.type) {} + type(other.type), uid(other.uid) + {} - ~EncapsulatingConn() {} + ~EncapsulatingConn() + {} RecordVal* GetRecordVal() const; @@ -35,7 +37,8 @@ public: const EncapsulatingConn& ec2) { return ec1.type == ec2.type && ec1.src_addr == ec2.src_addr && - ec1.src_port == ec2.src_port && ec1.dst_port == ec2.dst_port; + ec1.src_port == ec2.src_port && ec1.dst_port == ec2.dst_port && + ec1.uid == ec2.uid; } friend bool operator!=(const EncapsulatingConn& ec1, @@ -49,11 +52,13 @@ public: uint16 src_port; uint16 dst_port; BifEnum::Tunnel::Type type; + uint64 uid; }; class Encapsulation { public: - Encapsulation() : conns(0) {} + Encapsulation() : conns(0) + {} Encapsulation(const Encapsulation& other) { diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 856d22c417..1a91cb1229 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -36,18 +36,14 @@ flow AYIYA_Flow return false; } - if ( inner_ip != 0 ) - connection()->bro_analyzer()->ProtocolConfirmation(); + connection()->bro_analyzer()->ProtocolConfirmation(); struct pcap_pkthdr fake_hdr; fake_hdr.caplen = fake_hdr.len = ${pdu.packet}.length(); - // Not sure what to do with this timestamp. - //fake_hdr.ts = network_time(); + fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; Encapsulation encap(c->GetEncapsulation()); - EncapsulatingConn ec(c->OrigAddr(), c->RespAddr(), - c->OrigPort(), c->RespPort(), - BifEnum::Tunnel::AYIYA); + EncapsulatingConn ec(c, BifEnum::Tunnel::AYIYA); encap.Add(ec); sessions->DoNextPacket(network_time(), &fake_hdr, inner_ip, ${pdu.packet}.data(), 0, encap); diff --git a/src/event.bif b/src/event.bif index 07fde79cfa..4f586c4f79 100644 --- a/src/event.bif +++ b/src/event.bif @@ -150,7 +150,7 @@ event new_connection%(c: connection%); ## c: The connection whose tunnel/encapsulation changed. ## ## e: The new encapsulation. -event tunnel_changed%(c: connection, e: encapsulating_conns%); +event tunnel_changed%(c: connection, e: EncapsulatingConnVector%); ## Generated when reassembly starts for a TCP connection. The event is raised ## at the moment when Bro's TCP analyzer enables stream reassembly for a diff --git a/src/types.bif b/src/types.bif index 526198b2f5..444c33eee9 100644 --- a/src/types.bif +++ b/src/types.bif @@ -172,13 +172,10 @@ enum ID %{ module Tunnel; enum Type %{ NONE, - IP6_IN_IP4, - IP4_IN_IP4, - IP6_IN_IP6, - IP4_IN_IP6, - IP6_IN_UDP, - IP4_IN_UDP, + IP, AYIYA, + TEREDO, + SOCKS, %} type EncapsulatingConn: record; diff --git a/testing/btest/Baseline/core.print-bpf-filters/conn.log b/testing/btest/Baseline/core.print-bpf-filters/conn.log index 5ce968d5e6..ca81844a4a 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/conn.log +++ b/testing/btest/Baseline/core.print-bpf-filters/conn.log @@ -3,6 +3,6 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -#types time string addr port addr port enum string interval count count string bool count string count count count count -1128727435.450898 UWkUyAuUGXf 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1128727435.450898 UWkUyAuUGXf 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) diff --git a/testing/btest/Baseline/core.print-bpf-filters/output b/testing/btest/Baseline/core.print-bpf-filters/output index a2bf430fb4..b4a52965cb 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output +++ b/testing/btest/Baseline/core.print-bpf-filters/output @@ -5,7 +5,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1328294052.330721 - ip or not ip T T +1335456050.312960 - ip or not ip T T #separator \x09 #set_separator , #empty_field (empty) @@ -13,7 +13,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1328294052.542418 - ((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T +1335456050.557822 - ((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T #separator \x09 #set_separator , #empty_field (empty) @@ -21,7 +21,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1328294052.748480 - port 42 T T +1335456050.805695 - port 42 T T #separator \x09 #set_separator , #empty_field (empty) @@ -29,4 +29,4 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1328294052.952845 - port 56730 T T +1335456051.042953 - port 56730 T T diff --git a/testing/btest/Baseline/core.tunnels.ip-in-ip/output b/testing/btest/Baseline/core.tunnels.ip-in-ip/output index 7ed712aec8..4c8738290f 100644 --- a/testing/btest/Baseline/core.tunnels.ip-in-ip/output +++ b/testing/btest/Baseline/core.tunnels.ip-in-ip/output @@ -1,22 +1,22 @@ new_connection: tunnel conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] - encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] + encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] new_connection: tunnel conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] - encap: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6], [cid=[orig_h=babe::beef, orig_p=0/unknown, resp_h=dead::babe, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] + encap: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf], [cid=[orig_h=babe::beef, orig_p=0/unknown, resp_h=dead::babe, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=arKYeMETxOg]] new_connection: tunnel conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] - encap: [[cid=[orig_h=1.2.3.4, orig_p=0/unknown, resp_h=5.6.7.8, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP4]] + encap: [[cid=[orig_h=1.2.3.4, orig_p=0/unknown, resp_h=5.6.7.8, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] new_connection: tunnel conn_id: [orig_h=70.55.213.211, orig_p=31337/tcp, resp_h=192.88.99.1, resp_p=80/tcp] - encap: [[cid=[orig_h=2002:4637:d5d3::4637:d5d3, orig_p=0/unknown, resp_h=2001:4860:0:2001::68, resp_p=0/unknown], tunnel_type=Tunnel::IP4_IN_IP6]] + encap: [[cid=[orig_h=2002:4637:d5d3::4637:d5d3, orig_p=0/unknown, resp_h=2001:4860:0:2001::68, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] new_connection: tunnel conn_id: [orig_h=10.0.0.1, orig_p=30000/udp, resp_h=10.0.0.2, resp_p=13000/udp] - encap: [[cid=[orig_h=1.2.3.4, orig_p=0/unknown, resp_h=5.6.7.8, resp_p=0/unknown], tunnel_type=Tunnel::IP4_IN_IP4]] + encap: [[cid=[orig_h=1.2.3.4, orig_p=0/unknown, resp_h=5.6.7.8, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] new_connection: tunnel conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] - encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] + encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] tunnel_changed: conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] - old: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] - new: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP6_IN_IP6]] + old: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] + new: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=k6kgXLOoSKl]] diff --git a/testing/btest/Baseline/core.vlan-mpls/conn.log b/testing/btest/Baseline/core.vlan-mpls/conn.log index f3c958ea99..20903d1db8 100644 --- a/testing/btest/Baseline/core.vlan-mpls/conn.log +++ b/testing/btest/Baseline/core.vlan-mpls/conn.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -#types time string addr port addr port enum string interval count count string bool count string count count count count -952109346.874907 UWkUyAuUGXf 10.1.2.1 11001 10.34.0.1 23 tcp - 2.102560 26 0 SH - 0 SADF 11 470 0 0 -1128727435.450898 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 -1278600802.069419 k6kgXLOoSKl 10.20.80.1 50343 10.0.0.15 80 tcp - 0.004152 9 3429 SF - 0 ShADadfF 7 381 7 3801 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +952109346.874907 UWkUyAuUGXf 10.1.2.1 11001 10.34.0.1 23 tcp - 2.102560 26 0 SH - 0 SADF 11 470 0 0 (empty) +1128727435.450898 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) +1278600802.069419 k6kgXLOoSKl 10.20.80.1 50343 10.0.0.15 80 tcp - 0.004152 9 3429 SF - 0 ShADadfF 7 381 7 3801 (empty) diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 92deb62edb..88a85fc827 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -61,6 +61,8 @@ scripts/base/init-default.bro scripts/base/frameworks/intel/./main.bro scripts/base/frameworks/reporter/__load__.bro scripts/base/frameworks/reporter/./main.bro + scripts/base/frameworks/tunnels/__load__.bro + scripts/base/frameworks/tunnels/./main.bro scripts/base/protocols/conn/__load__.bro scripts/base/protocols/conn/./main.bro scripts/base/protocols/conn/./contents.bro @@ -85,6 +87,8 @@ scripts/base/init-default.bro scripts/base/protocols/smtp/./main.bro scripts/base/protocols/smtp/./entities.bro scripts/base/protocols/smtp/./entities-excerpt.bro + scripts/base/protocols/socks/__load__.bro + scripts/base/protocols/socks/./main.bro scripts/base/protocols/ssh/__load__.bro scripts/base/protocols/ssh/./main.bro scripts/base/protocols/ssl/__load__.bro diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log index bcb05ef415..5704153b07 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -#types time string addr port addr port enum string interval count count string bool count string count count count count -1329843175.736107 arKYeMETxOg 141.142.220.235 37604 199.233.217.249 56666 tcp ftp-data 0.112432 0 342 SF - 0 ShAdfFa 4 216 4 562 -1329843179.871641 k6kgXLOoSKl 141.142.220.235 59378 199.233.217.249 56667 tcp ftp-data 0.111218 0 77 SF - 0 ShAdfFa 4 216 4 297 -1329843194.151526 nQcgTWjvg4c 199.233.217.249 61920 141.142.220.235 33582 tcp ftp-data 0.056211 342 0 SF - 0 ShADaFf 5 614 3 164 -1329843197.783443 j4u32Pc5bif 199.233.217.249 61918 141.142.220.235 37835 tcp ftp-data 0.056005 77 0 SF - 0 ShADaFf 5 349 3 164 -1329843161.968492 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 tcp ftp 38.055625 180 3146 SF - 0 ShAdDfFa 38 2164 25 4458 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1329843175.736107 arKYeMETxOg 141.142.220.235 37604 199.233.217.249 56666 tcp ftp-data 0.112432 0 342 SF - 0 ShAdfFa 4 216 4 562 (empty) +1329843179.871641 k6kgXLOoSKl 141.142.220.235 59378 199.233.217.249 56667 tcp ftp-data 0.111218 0 77 SF - 0 ShAdfFa 4 216 4 297 (empty) +1329843194.151526 nQcgTWjvg4c 199.233.217.249 61920 141.142.220.235 33582 tcp ftp-data 0.056211 342 0 SF - 0 ShADaFf 5 614 3 164 (empty) +1329843197.783443 j4u32Pc5bif 199.233.217.249 61918 141.142.220.235 37835 tcp ftp-data 0.056005 77 0 SF - 0 ShADaFf 5 349 3 164 (empty) +1329843161.968492 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 tcp ftp 38.055625 180 3146 SF - 0 ShAdDfFa 38 2164 25 4458 (empty) diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log index c4a515710d..e3d458bae7 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -#types time string addr port addr port enum string interval count count string bool count string count count count count -1329327783.316897 arKYeMETxOg 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49186 2001:470:4867:99::21 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 -1329327786.524332 k6kgXLOoSKl 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49187 2001:470:4867:99::21 57087 tcp ftp-data 0.217501 0 43 SF - 0 ShAdfFa 5 372 4 343 -1329327787.289095 nQcgTWjvg4c 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49188 2001:470:4867:99::21 57088 tcp ftp-data 0.217941 0 77 SF - 0 ShAdfFa 5 372 4 377 -1329327795.571921 j4u32Pc5bif 2001:470:4867:99::21 55785 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 -1329327777.822004 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 -1329327800.017649 TEfuqmmG4bh 2001:470:4867:99::21 55647 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1329327783.316897 arKYeMETxOg 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49186 2001:470:4867:99::21 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 (empty) +1329327786.524332 k6kgXLOoSKl 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49187 2001:470:4867:99::21 57087 tcp ftp-data 0.217501 0 43 SF - 0 ShAdfFa 5 372 4 343 (empty) +1329327787.289095 nQcgTWjvg4c 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49188 2001:470:4867:99::21 57088 tcp ftp-data 0.217941 0 77 SF - 0 ShAdfFa 5 372 4 377 (empty) +1329327795.571921 j4u32Pc5bif 2001:470:4867:99::21 55785 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 (empty) +1329327777.822004 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 (empty) +1329327800.017649 TEfuqmmG4bh 2001:470:4867:99::21 55647 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 (empty) diff --git a/testing/btest/core/tunnels/ip-in-ip.test b/testing/btest/core/tunnels/ip-in-ip.test index f526575d48..38f4610445 100644 --- a/testing/btest/core/tunnels/ip-in-ip.test +++ b/testing/btest/core/tunnels/ip-in-ip.test @@ -20,7 +20,7 @@ event new_connection(c: connection) } } -event tunnel_changed(c: connection, e: encapsulating_conns) +event tunnel_changed(c: connection, e: EncapsulatingConnVector) { print "tunnel_changed:"; print fmt(" conn_id: %s", c$id); From 44c4d41d0d16017d8dae51c66954f2528a882fc8 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 26 Apr 2012 12:53:20 -0500 Subject: [PATCH 263/651] Add summary documentation to tunnels/main.bro. --- scripts/base/frameworks/tunnels/main.bro | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index df978d9510..b40aa519f9 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -1,3 +1,10 @@ +##! This script handles the tracking/logging of tunnels (e.g. Teredo, +##! AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6). +##! +##! For any connection that occurs over a tunnel, information about its +##! its encapsulating tunnels is also found in the *tunnel* field of +##! :bro:type:`connection`. + module Tunnel; export { From 8791ac7337d729d0296eeacb136822e83bec6df7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 26 Apr 2012 13:05:53 -0500 Subject: [PATCH 264/651] Fix AYIYA analyzer tag. --- src/AYIYA.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AYIYA.cc b/src/AYIYA.cc index c70af87fa8..ef845a5368 100644 --- a/src/AYIYA.cc +++ b/src/AYIYA.cc @@ -1,7 +1,7 @@ #include "AYIYA.h" AYIYA_Analyzer::AYIYA_Analyzer(Connection* conn) -: Analyzer(AnalyzerTag::SYSLOG_BINPAC, conn) +: Analyzer(AnalyzerTag::AYIYA, conn) { interp = new binpac::AYIYA::AYIYA_Conn(this); did_session_done = 0; From 8f91ecee7197329ba7ddc0dbf4cf01831b86e17a Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 27 Apr 2012 01:24:41 -0400 Subject: [PATCH 265/651] Fixed IPv6 link local unicast CIDR and added IPv6 loopback to private address space. --- scripts/base/utils/site.bro | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/base/utils/site.bro b/scripts/base/utils/site.bro index e8db91f3d1..e6afd1c6a5 100644 --- a/scripts/base/utils/site.bro +++ b/scripts/base/utils/site.bro @@ -13,7 +13,8 @@ export { 172.16.0.0/12, 100.64.0.0/10, # RFC6598 Carrier Grade NAT 127.0.0.0/8, - [fe80::]/16, + [fe80::]/10, + [::1]/128, } &redef; ## Networks that are considered "local". From 064c5dddb82528ed24a4e38fde2865f739539a56 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 27 Apr 2012 10:28:46 -0500 Subject: [PATCH 266/651] Fix for IP tunnel UID persistence. --- scripts/base/frameworks/tunnels/main.bro | 2 +- scripts/base/init-bare.bro | 2 +- src/Conn.cc | 1 + src/Sessions.cc | 20 +++++++++-- src/Sessions.h | 5 +++ src/Tunnels.h | 12 ++++++- .../core.tunnels.ip-tunnel-uid/output | 33 ++++++++++++++++++ .../btest/Traces/tunnels/ping6-in-ipv4.pcap | Bin 0 -> 1524 bytes testing/btest/core/tunnels/ip-tunnel-uid.test | 33 ++++++++++++++++++ 9 files changed, 102 insertions(+), 6 deletions(-) create mode 100644 testing/btest/Baseline/core.tunnels.ip-tunnel-uid/output create mode 100644 testing/btest/Traces/tunnels/ping6-in-ipv4.pcap create mode 100644 testing/btest/core/tunnels/ip-tunnel-uid.test diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index b40aa519f9..743098cd6d 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -27,7 +27,7 @@ export { ts: time &log; ## The unique identifier for the tunnel, which may correspond ## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels. - uid: string &log &optional; + uid: string &log; ## The tunnel "connection" 4-tuple of endpoint addresses/ports. ## For an IP tunnel, the ports will be 0. id: conn_id &log; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 59075de439..cc798ecdc5 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -190,7 +190,7 @@ export { tunnel_type: Tunnel::Type; ## A globally unique identifier that, for non-IP-in-IP tunnels, ## cross-references the *uid* field of :bro:type:`connection`. - uid: string &optional; + uid: string; } &log; } # end export module GLOBAL; diff --git a/src/Conn.cc b/src/Conn.cc index 6333f98f3e..0e34903bed 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -13,6 +13,7 @@ #include "Timer.h" #include "PIA.h" #include "binpac.h" +#include "Tunnels.h" void ConnectionTimer::Init(Connection* arg_conn, timer_func arg_timer, int arg_do_expire) diff --git a/src/Sessions.cc b/src/Sessions.cc index fa6faba85c..d3d5d294bc 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -542,9 +542,23 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, fake_hdr.caplen = fake_hdr.len = caplen; fake_hdr.ts = hdr->ts; - EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), - BifEnum::Tunnel::IP); - encapsulation.Add(ec); + IPPair tunnel_idx; + if ( ip_hdr->SrcAddr() < ip_hdr->DstAddr() ) + tunnel_idx = IPPair(ip_hdr->SrcAddr(), ip_hdr->DstAddr()); + else + tunnel_idx = IPPair(ip_hdr->DstAddr(), ip_hdr->SrcAddr()); + + IPTunnelMap::const_iterator it = ip_tunnels.find(tunnel_idx); + + if ( it == ip_tunnels.end() ) + { + EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), + BifEnum::Tunnel::IP); + ip_tunnels[tunnel_idx] = ec; + encapsulation.Add(ec); + } + else + encapsulation.Add(it->second); DoNextPacket(t, &fake_hdr, inner_ip, data, 0, encapsulation); diff --git a/src/Sessions.h b/src/Sessions.h index fb76d29831..54ff74ded9 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -11,6 +11,8 @@ #include "PacketFilter.h" #include "Stats.h" #include "NetVar.h" +#include "Tunnels.h" +#include struct pcap_pkthdr; @@ -202,6 +204,9 @@ protected: PDict(Connection) udp_conns; PDict(Connection) icmp_conns; PDict(FragReassembler) fragments; + typedef pair IPPair; + typedef std::map IPTunnelMap; + IPTunnelMap ip_tunnels; ARP_Analyzer* arp_analyzer; diff --git a/src/Tunnels.h b/src/Tunnels.h index e6e3de6d76..b8d693ea59 100644 --- a/src/Tunnels.h +++ b/src/Tunnels.h @@ -13,6 +13,10 @@ class Connection; class EncapsulatingConn { public: + EncapsulatingConn() + : src_port(0), dst_port(0), type(BifEnum::Tunnel::NONE), uid(0) + {} + EncapsulatingConn(const IPAddr& s, const IPAddr& d, BifEnum::Tunnel::Type t) : src_addr(s), dst_addr(d), src_port(0), dst_port(0), type(t) @@ -36,7 +40,13 @@ public: friend bool operator==(const EncapsulatingConn& ec1, const EncapsulatingConn& ec2) { - return ec1.type == ec2.type && ec1.src_addr == ec2.src_addr && + if ( ec1.type != ec2.type ) + return false; + if ( ec1.type == BifEnum::Tunnel::IP ) + return ec1.uid == ec2.uid && + ((ec1.src_addr == ec2.src_addr && ec1.dst_addr == ec2.dst_addr) || + (ec1.src_addr == ec2.dst_addr && ec1.dst_addr == ec2.src_addr)); + return ec1.src_addr == ec2.src_addr && ec1.dst_addr == ec2.dst_addr && ec1.src_port == ec2.src_port && ec1.dst_port == ec2.dst_port && ec1.uid == ec2.uid; } diff --git a/testing/btest/Baseline/core.tunnels.ip-tunnel-uid/output b/testing/btest/Baseline/core.tunnels.ip-tunnel-uid/output new file mode 100644 index 0000000000..afb5837b23 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.ip-tunnel-uid/output @@ -0,0 +1,33 @@ +new_connection: tunnel + conn_id: [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + encap: [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +NEW_PACKET: + [orig_h=2001:db8:0:1::1, orig_p=128/icmp, resp_h=2001:db8:0:1::2, resp_p=129/icmp] + [[cid=[orig_h=10.0.0.1, orig_p=0/unknown, resp_h=10.0.0.2, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] diff --git a/testing/btest/Traces/tunnels/ping6-in-ipv4.pcap b/testing/btest/Traces/tunnels/ping6-in-ipv4.pcap new file mode 100644 index 0000000000000000000000000000000000000000..5e0995f80e0fd05ca0d03a58b6d99abd524097a5 GIT binary patch literal 1524 zcmca|c+)~A1{MZ5P+(wS1ai6`CU`u}XJ=>wvO)L|qZ1R5WB`#I46Y0e6%3pV4F5Hk z2XX;505MYnP!)rXm4gB!?+zdfWC#c_qVbs;80z{Z?to;Nm|0la*f}`4xOsT__yq)o zghfQf#3dx9q-A8~8Z-Z6ckn#JRFGBju}=6SCJT2o)oz%LmSCe zm|?{O4l6&>!^*%87*@g>output 2>&1 +# @TEST-EXEC: btest-diff output + +event new_connection(c: connection) + { + if ( c?$tunnel ) + { + print "new_connection: tunnel"; + print fmt(" conn_id: %s", c$id); + print fmt(" encap: %s", c$tunnel); + } + else + { + print "new_connection: no tunnel"; + } + } + +event tunnel_changed(c: connection, e: EncapsulatingConnVector) + { + print "tunnel_changed:"; + print fmt(" conn_id: %s", c$id); + if ( c?$tunnel ) + print fmt(" old: %s", c$tunnel); + print fmt(" new: %s", e); + } + +event new_packet(c: connection, p: pkt_hdr) + { + print "NEW_PACKET:"; + print fmt(" %s", c$id); + if ( c?$tunnel ) + print fmt(" %s", c$tunnel); + } From 88807df269d2fab91777b44a3e63e7e8ba0bd8ce Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 27 Apr 2012 11:32:29 -0400 Subject: [PATCH 267/651] Fixed parsing of TLS server extensions. --- src/ssl-protocol.pac | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/ssl-protocol.pac b/src/ssl-protocol.pac index 627645e4da..5bfa2c51f1 100644 --- a/src/ssl-protocol.pac +++ b/src/ssl-protocol.pac @@ -425,6 +425,10 @@ type ServerHello(rec: SSLRecord) = record { session_id : uint8[session_len]; cipher_suite : uint16[1]; compression_method : uint8; + # This weirdness is to deal with the possible existence or absence + # of the following fields. + ext_len: uint16[] &until($element == 0 || $element != 0); + extensions : SSLExtension(rec)[] &until($input.length() == 0); } &let { state_changed : bool = $context.connection.transition(STATE_CLIENT_HELLO_RCVD, From bff3cba129720f208a8931d59861b9e2ba841e83 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 27 Apr 2012 16:18:14 -0700 Subject: [PATCH 268/651] Add two more TLS extension values that we see in live traffic. - origin_bound_certificates is a current draft http://tools.ietf.org/html/draft-balfanz-tls-obc-01 - encrypted client certificates is a draft that expired yesterday. http://tools.ietf.org/html/draft-agl-tls-encryptedclientcerts-00 --- scripts/base/protocols/ssl/consts.bro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro index 6c33e6e438..42989a4cb9 100644 --- a/scripts/base/protocols/ssl/consts.bro +++ b/scripts/base/protocols/ssl/consts.bro @@ -81,6 +81,8 @@ export { [35] = "SessionTicket TLS", [40] = "extended_random", [13172] = "next_protocol_negotiation", + [13175] = "origin_bound_certificates", + [13180] = "encrypted_client_certificates", [65281] = "renegotiation_info" } &default=function(i: count):string { return fmt("unknown-%d", i); }; From 0a6104fe6615822376db875dce0ee11df38c6f3c Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 3 May 2012 10:52:24 -0400 Subject: [PATCH 269/651] More bugfixs, cleanup, and test for SSL analyzer - SSL related files and classes renamed to remove the "binpac" term. - A small fix for DPD scripts to make the DPD log more helpful if there are multiple continued failures. Also, fixed the SSL analyzer to make it stop doing repeated violation messages for some handshake failures. - Added a $issuer_subject to the SSL log. - Created a basic test for SSL. --- scripts/base/frameworks/dpd/main.bro | 3 + scripts/base/protocols/ssl/main.bro | 3 + src/Analyzer.cc | 6 +- src/CMakeLists.txt | 2 +- src/{SSL-binpac.cc => SSL.cc} | 17 +++--- src/{SSL-binpac.h => SSL.h} | 13 ++-- src/ssl-analyzer.pac | 56 ++++++++++-------- src/ssl-defs.pac | 29 --------- src/ssl-protocol.pac | 23 ------- .../scripts.base.protocols.ssl.basic/ssl.log | 8 +++ .../Traces/tls-conn-with-extensions.trace | Bin 0 -> 24111 bytes .../scripts/base/protocols/ssl/basic.test | 4 ++ 12 files changed, 68 insertions(+), 96 deletions(-) rename src/{SSL-binpac.cc => SSL.cc} (66%) rename src/{SSL-binpac.h => SSL.h} (74%) create mode 100644 testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log create mode 100644 testing/btest/Traces/tls-conn-with-extensions.trace create mode 100644 testing/btest/scripts/base/protocols/ssl/basic.test diff --git a/scripts/base/frameworks/dpd/main.bro b/scripts/base/frameworks/dpd/main.bro index e8488c3ec1..9eb0b467f8 100644 --- a/scripts/base/frameworks/dpd/main.bro +++ b/scripts/base/frameworks/dpd/main.bro @@ -105,5 +105,8 @@ event protocol_violation(c: connection, atype: count, aid: count, reason: string) &priority=-5 { if ( c?$dpd ) + { Log::write(DPD::LOG, c$dpd); + delete c$dpd; + } } diff --git a/scripts/base/protocols/ssl/main.bro b/scripts/base/protocols/ssl/main.bro index 0b280a6bcf..b5f74d5122 100644 --- a/scripts/base/protocols/ssl/main.bro +++ b/scripts/base/protocols/ssl/main.bro @@ -24,6 +24,8 @@ export { session_id: string &log &optional; ## Subject of the X.509 certificate offered by the server. subject: string &log &optional; + ## Subject of the signer of the X.509 certificate offered by the server. + issuer_subject: string &log &optional; ## NotValidBefore field value from the server certificate. not_valid_before: time &log &optional; ## NotValidAfter field value from the serve certificate. @@ -146,6 +148,7 @@ event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: coun # Also save other certificate information about the primary cert. c$ssl$subject = cert$subject; + c$ssl$issuer_subject = cert$issuer; c$ssl$not_valid_before = cert$not_valid_before; c$ssl$not_valid_after = cert$not_valid_after; } diff --git a/src/Analyzer.cc b/src/Analyzer.cc index 92ca3ecc50..a2a35490e8 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -34,7 +34,7 @@ #include "Portmap.h" #include "POP3.h" #include "SSH.h" -#include "SSL-binpac.h" +#include "SSL.h" #include "Syslog-binpac.h" #include "ConnSizeAnalyzer.h" @@ -121,8 +121,8 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { HTTP_Analyzer_binpac::InstantiateAnalyzer, HTTP_Analyzer_binpac::Available, 0, false }, { AnalyzerTag::SSL, "SSL", - SSL_Analyzer_binpac::InstantiateAnalyzer, - SSL_Analyzer_binpac::Available, 0, false }, + SSL_Analyzer::InstantiateAnalyzer, + SSL_Analyzer::Available, 0, false }, { AnalyzerTag::SYSLOG_BINPAC, "SYSLOG_BINPAC", Syslog_Analyzer_binpac::InstantiateAnalyzer, Syslog_Analyzer_binpac::Available, 0, false }, diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ce1b25dd42..9f9eb8a60f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -376,7 +376,7 @@ set(bro_SRCS SMB.cc SMTP.cc SSH.cc - SSL-binpac.cc + SSL.cc Scope.cc SerializationFormat.cc SerialObj.cc diff --git a/src/SSL-binpac.cc b/src/SSL.cc similarity index 66% rename from src/SSL-binpac.cc rename to src/SSL.cc index db9a7004d6..218b17080b 100644 --- a/src/SSL-binpac.cc +++ b/src/SSL.cc @@ -1,21 +1,21 @@ -#include "SSL-binpac.h" +#include "SSL.h" #include "TCP_Reassembler.h" #include "Reporter.h" #include "util.h" -SSL_Analyzer_binpac::SSL_Analyzer_binpac(Connection* c) +SSL_Analyzer::SSL_Analyzer(Connection* c) : TCP_ApplicationAnalyzer(AnalyzerTag::SSL, c) { interp = new binpac::SSL::SSL_Conn(this); had_gap = false; } -SSL_Analyzer_binpac::~SSL_Analyzer_binpac() +SSL_Analyzer::~SSL_Analyzer() { delete interp; } -void SSL_Analyzer_binpac::Done() +void SSL_Analyzer::Done() { TCP_ApplicationAnalyzer::Done(); @@ -23,23 +23,22 @@ void SSL_Analyzer_binpac::Done() interp->FlowEOF(false); } -void SSL_Analyzer_binpac::EndpointEOF(TCP_Reassembler* endp) +void SSL_Analyzer::EndpointEOF(TCP_Reassembler* endp) { TCP_ApplicationAnalyzer::EndpointEOF(endp); interp->FlowEOF(endp->IsOrig()); } -void SSL_Analyzer_binpac::DeliverStream(int len, const u_char* data, bool orig) +void SSL_Analyzer::DeliverStream(int len, const u_char* data, bool orig) { TCP_ApplicationAnalyzer::DeliverStream(len, data, orig); assert(TCP()); - if ( TCP()->IsPartial() ) return; if ( had_gap ) - // XXX: If only one side had a content gap, we could still try to + // If only one side had a content gap, we could still try to // deliver data to the other side if the script layer can handle this. return; @@ -53,7 +52,7 @@ void SSL_Analyzer_binpac::DeliverStream(int len, const u_char* data, bool orig) } } -void SSL_Analyzer_binpac::Undelivered(int seq, int len, bool orig) +void SSL_Analyzer::Undelivered(int seq, int len, bool orig) { TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); had_gap = true; diff --git a/src/SSL-binpac.h b/src/SSL.h similarity index 74% rename from src/SSL-binpac.h rename to src/SSL.h index 8dab19d00c..c9f8d9be91 100644 --- a/src/SSL-binpac.h +++ b/src/SSL.h @@ -1,14 +1,13 @@ -#ifndef ssl_binpac_h -#define ssl_binpac_h +#ifndef ssl_h +#define ssl_h #include "TCP.h" - #include "ssl_pac.h" -class SSL_Analyzer_binpac : public TCP_ApplicationAnalyzer { +class SSL_Analyzer : public TCP_ApplicationAnalyzer { public: - SSL_Analyzer_binpac(Connection* conn); - virtual ~SSL_Analyzer_binpac(); + SSL_Analyzer(Connection* conn); + virtual ~SSL_Analyzer(); // Overriden from Analyzer. virtual void Done(); @@ -19,7 +18,7 @@ public: virtual void EndpointEOF(TCP_Reassembler* endp); static Analyzer* InstantiateAnalyzer(Connection* conn) - { return new SSL_Analyzer_binpac(conn); } + { return new SSL_Analyzer(conn); } static bool Available() { diff --git a/src/ssl-analyzer.pac b/src/ssl-analyzer.pac index f41fb8639b..32f060adf4 100644 --- a/src/ssl-analyzer.pac +++ b/src/ssl-analyzer.pac @@ -25,6 +25,7 @@ string orig_label(bool is_orig); void free_X509(void *); X509* d2i_X509_binpac(X509** px, const uint8** in, int len); + string handshake_type_label(int type); %} %code{ @@ -46,6 +47,27 @@ string orig_label(bool is_orig) return d2i_X509(px, (u_char**) in, len); #endif } + + string handshake_type_label(int type) + { + switch ( type ) { + case HELLO_REQUEST: return string("HELLO_REQUEST"); + case CLIENT_HELLO: return string("CLIENT_HELLO"); + case SERVER_HELLO: return string("SERVER_HELLO"); + case SESSION_TICKET: return string("SESSION_TICKET"); + case CERTIFICATE: return string("CERTIFICATE"); + case SERVER_KEY_EXCHANGE: return string("SERVER_KEY_EXCHANGE"); + case CERTIFICATE_REQUEST: return string("CERTIFICATE_REQUEST"); + case SERVER_HELLO_DONE: return string("SERVER_HELLO_DONE"); + case CERTIFICATE_VERIFY: return string("CERTIFICATE_VERIFY"); + case CLIENT_KEY_EXCHANGE: return string("CLIENT_KEY_EXCHANGE"); + case FINISHED: return string("FINISHED"); + case CERTIFICATE_URL: return string("CERTIFICATE_URL"); + case CERTIFICATE_STATUS: return string("CERTIFICATE_STATUS"); + default: return string(fmt("UNKNOWN (%d)", type)); + } + } + %} @@ -88,15 +110,15 @@ refine connection SSL_Conn += { eof=0; %} - %eof{ - if ( ! eof && - state_ != STATE_CONN_ESTABLISHED && - state_ != STATE_TRACK_LOST && - state_ != STATE_INITIAL ) - bro_analyzer()->ProtocolViolation(fmt("unexpected end of connection in state %s", - state_label(state_).c_str())); - ++eof; - %} + #%eof{ + # if ( ! eof && + # state_ != STATE_CONN_ESTABLISHED && + # state_ != STATE_TRACK_LOST && + # state_ != STATE_INITIAL ) + # bro_analyzer()->ProtocolViolation(fmt("unexpected end of connection in state %s", + # state_label(state_).c_str())); + # ++eof; + #%} %cleanup{ %} @@ -133,11 +155,6 @@ refine connection SSL_Conn += { cipher_suites16 : uint16[], cipher_suites24 : uint24[]) : bool %{ - if ( state_ == STATE_TRACK_LOST ) - bro_analyzer()->ProtocolViolation(fmt("unexpected client hello message from %s in state %s", - orig_label(${rec.is_orig}).c_str(), - state_label(old_state_).c_str())); - if ( ! version_ok(version) ) bro_analyzer()->ProtocolViolation(fmt("unsupported client SSL version 0x%04x", version)); @@ -175,11 +192,6 @@ refine connection SSL_Conn += { cipher_suites24 : uint24[], comp_method : uint8) : bool %{ - if ( state_ == STATE_TRACK_LOST ) - bro_analyzer()->ProtocolViolation(fmt("unexpected server hello message from %s in state %s", - orig_label(${rec.is_orig}).c_str(), - state_label(old_state_).c_str())); - if ( ! version_ok(version) ) bro_analyzer()->ProtocolViolation(fmt("unsupported server SSL version 0x%04x", version)); else @@ -229,11 +241,6 @@ refine connection SSL_Conn += { function proc_certificate(rec: SSLRecord, certificates : bytestring[]) : bool %{ - if ( state_ == STATE_TRACK_LOST ) - bro_analyzer()->ProtocolViolation(fmt("unexpected certificate message from %s in state %s", - orig_label(${rec.is_orig}).c_str(), - state_label(old_state_).c_str())); - if ( certificates->size() == 0 ) return true; @@ -362,6 +369,7 @@ refine connection SSL_Conn += { handshake_type_label(${hs.msg_type}).c_str(), orig_label(is_orig).c_str(), state_label(old_state_).c_str())); + return true; %} diff --git a/src/ssl-defs.pac b/src/ssl-defs.pac index 31d90338f5..b13b7c4881 100644 --- a/src/ssl-defs.pac +++ b/src/ssl-defs.pac @@ -17,35 +17,6 @@ enum ContentType { UNKNOWN_OR_V2_ENCRYPTED = 400 }; -%code{ - string* record_type_label(int type) - { - switch ( type ) { - case CHANGE_CIPHER_SPEC: - return new string("CHANGE_CIPHER_SPEC"); - case ALERT: - return new string("ALERT"); - case HANDSHAKE: - return new string("HANDSHAKE"); - case APPLICATION_DATA: - return new string("APPLICATION_DATA"); - case V2_ERROR: - return new string("V2_ERROR"); - case V2_CLIENT_HELLO: - return new string("V2_CLIENT_HELLO"); - case V2_CLIENT_MASTER_KEY: - return new string("V2_CLIENT_MASTER_KEY"); - case V2_SERVER_HELLO: - return new string("V2_SERVER_HELLO"); - case UNKNOWN_OR_V2_ENCRYPTED: - return new string("UNKNOWN_OR_V2_ENCRYPTED"); - - default: - return new string(fmt("UNEXPECTED (%d)", type)); - } - } -%} - enum SSLVersions { UNKNOWN_VERSION = 0x0000, SSLv20 = 0x0002, diff --git a/src/ssl-protocol.pac b/src/ssl-protocol.pac index 5bfa2c51f1..0019478518 100644 --- a/src/ssl-protocol.pac +++ b/src/ssl-protocol.pac @@ -23,7 +23,6 @@ type uint24 = record { string state_label(int state_nr); double get_time_from_asn1(const ASN1_TIME * atime); - string handshake_type_label(int type); %} extern type to_int; @@ -268,28 +267,6 @@ enum HandshakeType { CERTIFICATE_STATUS = 22, # RFC 3546 }; -%code{ - string handshake_type_label(int type) - { - switch ( type ) { - case HELLO_REQUEST: return string("HELLO_REQUEST"); - case CLIENT_HELLO: return string("CLIENT_HELLO"); - case SERVER_HELLO: return string("SERVER_HELLO"); - case SESSION_TICKET: return string("SESSION_TICKET"); - case CERTIFICATE: return string("CERTIFICATE"); - case SERVER_KEY_EXCHANGE: return string("SERVER_KEY_EXCHANGE"); - case CERTIFICATE_REQUEST: return string("CERTIFICATE_REQUEST"); - case SERVER_HELLO_DONE: return string("SERVER_HELLO_DONE"); - case CERTIFICATE_VERIFY: return string("CERTIFICATE_VERIFY"); - case CLIENT_KEY_EXCHANGE: return string("CLIENT_KEY_EXCHANGE"); - case FINISHED: return string("FINISHED"); - case CERTIFICATE_URL: return string("CERTIFICATE_URL"); - case CERTIFICATE_STATUS: return string("CERTIFICATE_STATUS"); - default: return string(fmt("UNKNOWN (%d)", type)); - } - } -%} - ###################################################################### # V3 Change Cipher Spec Protocol (7.1.) diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log new file mode 100644 index 0000000000..74156362e5 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ssl +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher server_name session_id subject issuer_subject not_valid_before not_valid_after last_alert +#types time string addr port addr port string string string string string string time time string +1335538392.319381 UWkUyAuUGXf 192.168.1.105 62045 74.125.224.79 443 TLSv10 TLS_ECDHE_RSA_WITH_RC4_128_SHA ssl.gstatic.com - CN=*.gstatic.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority,O=Google Inc,C=US 1334102677.000000 1365639277.000000 - diff --git a/testing/btest/Traces/tls-conn-with-extensions.trace b/testing/btest/Traces/tls-conn-with-extensions.trace new file mode 100644 index 0000000000000000000000000000000000000000..a3b724b3a184e320a93fb23db4c361672e96bdc5 GIT binary patch literal 24111 zcmb@NWmH|s^7aqz?iSqL-JPI81A*WY++Bl9g1c*QcXtcH-62>A65M&u$=uBU&g9N~ zdb3uqus7#acRyA2tM1;LnckWlZ~!FW*T3gy000blu~3t$q#g?iPz3%AY5)w>C?W^* zPu92=$gluO06^riNCY4Pt<2W-3Cu#ydqe3=7pzq&;s_D+;VSUw=aeP@02~sk4h#YU z3=9kwUT{@(1@t>e@IS0qu0jIjfY+e*huuH=09q#j2rLi-0Nl_WnIOxc9>7}9bc5wj z%78r}WutQ&=PLZPo(KsJ3(w@ikPWg9>4vYlQxcOB6Hf|)K z^>RPd0Qz7OpaOr4_XK1aWHDhK5^w=D25P@q-1WKzS)AbepB8Zg=yh@aY7rX(3{Vaf zxflXWDYa%{m>YH6E%Ukrd2%%^a(1*|t$68aIF2=CR!nIg0Fdc|@4^Ix0zv@10G=+C zE+{}CfU^sx3$6>T3j^o~34kep9l+Lw*oE8$-USIr1c(CN2?b#8g6%>FU;{9^kh&nA zo&oUSz#h4Q!^Z@m0{|$Fj#f-&j!p(n7Dh})w${&JAg{v%U;)qo8~`o=4}bsweSsVR zV0Ze%Uli<6f2#VMzc?01e)_B6Z~iI=`m56q6>tI_`-cWVMVo=XIsvH~EC2=g37a9OS3-aY3^F{zbLIA+P|1(=uXh%C^cV;$Nb0;S|W>zLvpk-(+1OT)e09pkA zk;D=J5yuh${*Vm;4h9Ym&vm(^3Nodv$ASb6M;9y;e2oMK1_cFRkzz$)frExnhlIue zS65}lU_k?2z+=Em+uE90nUKiZ7_pMEkbrJNVBr0E)5*la#>9z4)Y-}0*1^Kbo#h1% z5-S@E2P-QJ3kwH3w>I#S{f|pl&?Sr6|Lg}A176&~%EHvv!N$UX6`KVU)DH{>l7g+X zjgx_e4T*+@iR=F!)PFt z;c7

0saKRqAiTI=Wo%Ykf}={U(yWo|Tp|$g&0v&kWy*6lU$rB8eUTJfY%Z zgkeBmb?k_voyd0g^4rbmicrN{OnH5rt2~jHbM_^fum~Kk$w9e~w--GFF&e9dx-_wE z!gk!6Hp66fNqb|}6s9=fK=*^?1%R2b1b`W^5CJDkLJv7AO89Y{EuHF zLt;Z>_BeEXSq%7^BA2RGZcjv~miXY`G5_IIS(`t6`ln0(={}Q@gB6SE zuYrPrL29#Tv1t6dW&!{I*pCwnKWGwYkQ^**0e|�K$KfApnz($=bxp0mzB}y8GX8 z01R~qU;u!{#ZO^>$&EFsVP@1*e5ftVNdKs=oQ^t1u@jsU2={(j=ezUJ+&;ab!2KHi z$0qvsjRvSA0EpSKrd_Qg5!Q%LciF-QYn5DnrCeX|i%K}gYuj0L$Mph9+JAh350a_C zqu%ChH9D$6y68X1#pgGo4|ijvuk(>6i5vT>_Cp#50A2^o$|_*P`y(?U5X|iVHcl4B zzorT*3o=jyU`oPB+B;jA8o04Cve1L>AYf4Z=MIUgiIKB|35mFggOi1+g^_`i$zM~G zg_nhkg$J0Q>}>x@O)eI0R^FeN|KAYsZ<#lI)R-TUuDKT|B;p--@p01LNHt~8-3hZ& zB88nKHp+8{2C=2wO4jH7TaMR?@E$B<6Z3tVl1H;8bB3yxjxyd zNTiL-k-Tzjqzd7Dw_Ln4^SXfk!3>Z0E)EyhYHgJ=9GM+cy5|H;+5PLI#bi-e*_Nj5 zADQQ$$Ks#;Q%)if+hv5H05U)tgWBKO4$ikg24Z_##$VZf$^Elh-~vw6fJ+Wg4gOUv zP{6>RAt8ahh5oP9{hyin->ECJYko^VRg*9;y+BTas>Gfgoi)RP4vKmVkUdxyXyAXq zD#yYHxX26lczrst^hMS6RyhFV4am%d=s(d4# zviL@w_O?JpjEQfQC(@07^yWSxq^&LbR+$Y{2uFd-ut5j_jwmF(JB)5|w%t?6E^<8PlmP9&kGCCTP)YI8Kjdwina*^Vm98cl0g9<=#b$MUXjh%hbz z{B&~;r%LylxXEhC3(DD40Gam_Y5cyQP5MW4`4I6!?1tx`X3X5h9m<7?ZIki^*ilhT zH^vV(Iczw|#*G@Da03=cMoPOqOCb|6dvOl^mLrAh*kL_zJ?VORMEgd24y4}jkJ(Dd29_6ZP?hm##~)T z7X$n5$-T_!w!bn1e9`q|vD6J{AAV@F3O1SPgB=z32(N@-9;nf6j&r?FX@M$tE3B`% znIFah8e<98cl=P{xW28tHLYKjre+XGVd#M1=wOp$9K+H(9)c-9dS9D&xaHorniQGN zRFVS{=~xd6Fai_^)P57G4Qd4>l2pK7MLLfCDLYW4ON76Q1RMs~d+&`R>6;ac69|9W zFy_41Yv$o`f|@abm)tqo-ivZ&y%bHFDi@TGykGCss(EB09izYOV=p-sNYgC87s?^9 zfqg?zbNrqxUK|PKRi9?BAqEru(`kNe*C$c^@=-rQjp9)@y?MieUZ&F*kwcN_!rhIf zy{ksKNARLCJi_{F6Q2=h+E%O@$+lsN4klD263Oy&6T!IT_ft4j4Aqstn*;8jb>6by zPa7ZS8}|C#D_TIfYeIMI^nOLq?YC;()Vk-jkM(Ss!N%ASPCeMc@KXrxW27iCvk!k^ zFO{%55%@NV;JOT93=1E8ih)A&1f#iF6-S%=`99>Od-DhbMm?ItFwVqT3C%DQ6Z<#+ zZi2wD7Pz;icFwU7&D4NMAzwgWDEDxCxq?$F!lJ9>y^eZ^{iWXoMBly35}*gLxG@j1f$4d56d4= zkjtU$%WQl}cea{Q1hUQ`&t7-`UqbyI)BuhJ;06G=4a@y2sILW;B2MxBP{B|ifO+^I zP_GKEiXnq6QVc=?D1dPSYJax~hP^-mve@4FKP}>dp*|)2X>ko?5fuLw!2M|n04%Hv zkniR#nmuhumAliYm(@S#pp~HTR57BZwV{M9q648P8o^y(Au8x(d1<@vo)d1i^=77a zhe2o|^>hO*pmQ{~Drk$+_;?nU@U`9oKhoo@S5LyFS>==Rq*aDLk)c)=TfWZRg zPro|;)eASoKm7{yg6R5Ry#N9SO?4w1>>AqY8$M0~WK%f2!7QttvZykFfrbba6HQYM z0!`7twY->^U{LoH!0!KfLd%n&2{~Z?j|s^G_u!Zw*uPKc_6rn17#LEZ_V)>au`W>k z46DBd>W%!b3CSP*IiYn>Sb-*V)ANP-jbE>-B4^LOfKX@l;zg$b;F^auK0urfaz((0 z&#IjkXYaZhjhNAU5CvuI78}c98-4mF7KzR2nK8r}K`+5jxv3m~AKVWo(zxZz2L4`L zxYF5vDED&XKip(9pWl3a0yMbhDl8D+EO!D!Ai8Ig>+Ny<*pCSg+|686?5iedDn zb;6a48yi>W#c~57r%oxtA}O0wLp_J=CL-0Ks679fB^(&!Bsg_e#++B*fk968mC^GF zYzv+#WNG@Cq~YLaJFbn!Mb551VVqR*YRwR9YN48=W!9-c~JYi zgP=SXs6h^D*7|G2-OB!QkkgCb9Yolo<=o@E)#red)%xA@B8joZfl<-K%AH>L2wooO zK`4@g052B(l~;7@S|?#(%w8q-X(ie}LGIn(w`|~v zJ>F=NjiB%b+ci_9lSAGFDOZgf=Ewx$$Ow-!Dq?$|NcGsD^^W!v)oPUMn&EOZ|CPNCOtWy{eB5Wv`dM^Odyhw8Jtu zU*poUJ;cddfn7a#=Xct<)ZU}imk;NzpK?}I8l?s&1vJEBppZ6Q-4C&=C=KU{F-|l* z3;Sxsae1wToJBzGN*pq98Qm?rHcb!2@4rmvp;a2~vt66-mAx_MdPVeAj88v5QL9zF z!Mk~vEg&X+t2e6p3^h>mTp;b1<`W;zq_`ug?mCAAf(w%fOA$ACB{p$h#$rmnPs4(Z z+^c;C4EM1g@qfg31^8G z79NkL0Zj5Ey=J>%GJeLsEe8F2yRdgst&O1=v-pRe%y6Xi&n&e(qrx-C5wQs`ce)L{ zXNFYBIQt?s;g|=AEjyJvsjFJ(=3Ju_lEb|TX>j`_v?;f~;hxzLKoF$PJ=Iz!pmy1} zyj3R=O-pE=W8I;o*m%`?HvD1nwBA_~!!)8vb-SX;jz>-@x_ znRtMR%2g4*eqclaA(GTtFF3sbhuhN)C7NC-{!FoDu|>Op|8BRQu;aeq0#ehWm0d&c z`j-ADZE6f8t6}+p=87AmX(Na)*`G3G2CzrJRg$2jzaz_7l3_hpeINLw_hpM;>b@&8 zNC~DM9;Rc=%!#_9b!+}bsX)3GDgbGdz~S-KqAUo0xFHqs%<53yWhZp{B9p`^tP1wB zK71`!Pa4|_GJ?R&GbA^JHQ6_`Jd=P2-n}fCM(_>yx37l&8@L+w!Ku@xv{Bd0LP?k% z$q^SbVPyeGea?il$EIPCZv$!@!8zJ1;IOkKD_Y7Z2k;ip6Hv;nldhaA>}J=&_?ryg zo5XCN;=mthdG!w^bRN24vlSL#a!IsXd18;BHwDbc<@}sYW7mZi)dDrz)=w zy{@H=>%^9Ln!!1#LsCtx^huVlvSfiQ;CPMF8JL;RqraRFeu7VZF&)y=XX@y2Fh3jV ze0ih z#QM$6lb(IZo~D(N71irm3Wrrl62m;gm<2{^DD^UInHN#0V2Q~VD9G)LE_ccPJ2Op_ z(=m35i(}(q3E&3X`K-Rnov!vNDW(tz!v zMAE#1*y|{*e0aI-BA0r_x_WxF-Oml;vL2Aj|6GbAM?j?SCitE7PapkIeM4Pz{v1&cphpaUZ0>)RVkl7<+%?iViRLu>bJN66CZC=;tu$<9 za-i=PHmwjjK5aHtqY*ygOJw2Heuo}^eyRBlihR&ybi0tTQrK?RD@^z^Hch@DJO^im zVZ7w6($geVK|VIetMy$7Cb(9@k_{(1BG&#QxiO+TRhkD%pM+Stu9Af`_3x=I9X-)+ zvPQcxJ#a=Z}@bL+tUaDXAokpt5m(?RBW2)j7EMglK=s$CSbo|M5;CVsk3Jt&90HD< zP~#ilGqbhT4s$hkvl}hNgwlw78z)hXn`Aa5Hh)d9y&Eu-AjE}EqhqVeqsub8YuYV_V&nZ+J>eU7TdvqSBtBD+1pXi-_VoFj z!?59=d(vB0QJY!^dB*8<`+Y;C`vti{lK{IalHkk&jl1m`(bZUx?rIG-M2n)bs{^@= z-1&FnHXg4SqK^}b{#BjbnOXXtHioM>RbXQB;EbQ6C8;e=vAixD4KJNbVLC3agDX#0 zo7x0kHwHDd%V??2z@BNv*ZknOQbo1^!*l6{;Of;g)~8x-FD|Y&OzENy{PB6du#GTf z=DcQCHap{^-vM6of=Na~j%1r& z^JPvn{pc_XX9-gFlIpsv&JPQN4fxd(CzA=A5xuf*sg5$#_sJxo7&7u#54Tf36gjI?^<0aGj8L+_ zridFA(bvyuHC*%O$i|?lSbIF%G4*zU#}?4rG^ewlw*XLhcpyipXrIbkvt7$mOjT1g zMCa01O|+|o=Hz?mjf6-#dQxgG8*9Tr?7Dmi^*VhEWi8kShxDZwe5fJ&?Q(y`ed8ou zy70soYwKz@O}*JIV>Eu2Vbo*ipmdK71vFP4!d@ZC^8q%$uI#tG(f#{SI(|!cv#)Y1 zMS~u#cBO(_=|^{Y+&1A-qhJf`DG|SW(mfLQ@ZwR(x5`zi3wR%3(}*w4lpZIWXBzrD zKeSvIJHJ454u<%4+QDTMQ}+!Tej>}1O~;Hu!5j`;6&FN$XCRmVne?2wAkyPX{6=~O z2;ln1H`E^MchbLE{8!QgpP>QQ^csI5JeL zc!Cv~h3*^L<9<Sp*pxI2=+G@vL&E`Ji+AS9Z}i8gx4hE&63a4W4|(pv7?z&-4m~;uW#)d| zywZ;ljCvqO27h0VC1*(cL~X`GX3ehCvPB73J$WR2@OXQ*FNQYSLSJBKH(i`_cWeEH z-?Ynpd?odXar7HLUJ&6ZtCd8IXl?yjzs!a@{{HTT#>;mxmM^;Hu+Gucst4_8iJ@Ib zR=ZyDd~9wtWgS5d)GH#-dzMNbSZzc?=z)rykg-ZbE_=HwNQCO)-Z#178s30@i1A$v z6{DEQ-1rN&(Vez}ZYSf@SHcN5-hy~WCvdw3(v}K>95rk9Qymqdez;qA5{<@*$`5eh z8%6TZiboo~-S@d7oWuMbVjKq11Gpz2bnMJ$I;FbZ0bKdJ8Wq`cG1P6og!I<_llV6E zu|t*D2+VdAg$ud3TF9svH8T8)zI%M=+h^aT?YcPYVlCUy0!~6scZ*ihIZ&fjs_h3A za=pQrbo+S2X0*@PQPx(~!Y;@K1$Om3YTjN_k)6-XWe^y{RlwsM3!31Q#iRVN%FK&+ z>jiJm8PN<0zg0yQ%ME<6YUfk7u=ccUy(kC%S?S0ukSMTfu)ajyV^L)@tJe&^>W+d| zKUk!wnDv8=!wZii-^ZEiFh#1^!&7pCsB`uFwSJZ4w#}OmM&`9PUYr}7d&aCUO<9Q) zm}R0oLqduUH_E=AUgk>NC*Nne3Cn+&U&M@NJ=ulVwXQ_)xyU9^Zq0q;^Sr@l;3@rP zg9XM^Bqx2}QjAImGwPX@tl9YKMZ@cycUDitwmX3v9$9D5*aiWnzA3i3GEpivy1{fj z2QLzPvM&UdZC`<1ZO`~^8mDvmCw?E8(Ok=zzWT73lne!!)IMIBtF*ezSY`Y8t1H%PR_6-m0bW& zMjDYSI_tfpQHJByU@7**6BGha7RKRgx!L8JJAc}8M|soT;|`!Rj>_~A5E9&SB=3-n zq|ONBDmBH(ikc{=sdi`)go%iD6d9sb8GUlZFSW6#{Q-?{{Dand`>v@lzM+({`ZK`` zzGeacYBq`3aOT(yJ+o(IZ05V~aSG&tWBOd5WU|%BcMSumBH`2CpdO*A)*n{&q+liC z3K?5j4qfmp^w``Fn7!Mw-MOwf*AUGRSeMGLKeF37@obSuqz^#$I@D7ZcH)m)#uKz4^}&2207YkS_Q)sRTG>0CPPbt#?42Q3dwtYm)O?7LWAwdNJ` zR7Eqo+i|B)NxJU!r3!+yMZ|a4xCDz^%rNxSL zs%){qnVns#!WGlaY1TasyA!e7PF4w9-RPcfaaX|;L%{8sz@^C9p=RgBw1j3&ydDBf zO>vqiNOY3E@nb~^jR7y5hX~QWIt}IeNO@ClaZ4rpHc!6tCAqp^`WS~Mfm(%1OL(Xt zxej&90O$~q^qL&K?d5k9v?L*0=Q6Q(qerd*>GC?&iitaJb~lh%tYk=zfv)=GO zpMQwM{|~+QfTv~s=Ka6xjlTY`m6iTyWmWr|-h03FUd{SlZ$cTtA6tn=Ar6PBdLOm{ zo$*=G>Moexnrt4kZ=8OfmSu1{z$dNaw|wPd#|R2i_)w9m7)MhU?vVBgP^B!89cZhU zXsHU%krJ449C$9$X{zMC`n>Ri%CB9E< zIM2ALaJ*%ny^cj+BfIpoe9!2N^uk6%>Sux6`u4WjpyVk&EHrgY?!g^VL5IouuWq$+ z>SNUDD|UC>%t~x4*x?QtWm)YN(lL9jcQrO|L){-SlmjbqFwu1<5D6bFx% zyhOUmKDIZ`>lUBb5Gv5@{%W^!<+XC=OcedeJ-)SGq?~Y@WzeK-PBOsrUG$*4dsSC< z^_Zw=A-20+t~6o>HiLBNt3X#izxYV$R8NX*i6}COr<)%}?=Cn#a}kuN+#QtmP^F7q zsD^m_&@7ToxT`^CobybeUS*!poJ{Roq#K!=rSho59;R9)zQ{FA8-i`|xM}drRQ_)J zwWqGlKeiM0Kwzb4WbGOZBEm}N$h~R)D>iTcOWDJj-mM2$S81Cfvmw+p%Bwd>J402d z;E41JU$|&uxfIfM?PHdRz|LBVlzWM5L~HE*M`kW>A)bOJ24CQJLT|G14GL{pJU@5Z zxG1wj*q^P{CZ1rR;zA3@Pg3Y%_jEzco^RdL5BO%F{NPJBgDrCeujbcS`9 zBDQEjpkFsii3$BAUhYTP!ZPJ#N-I(dEtp3duy7?u7W??}S~9^d&5;=de{=&^hZjAF z)pm`&uzXwr(BCuA@xroL)GtoyjSPloajQEYC5Sb^-YKts%=OIl0P(N5L1w2B*)=E_s- zuR=_@+eCf0?h5Xyt??J9W0#!N4W-8H8pkR+Cmtk@njK#d{QNFfF&J`^a3y!kJ$rmf z#!JpDB%|rI+w=<42&MYgdq3xVB!6GGUr0+*4<8!d;ew5znbY@zI^WiV9gi~kqlBM= zED|Kk)2`S?QRZ+#>5UlAVn2A0$JexI=32}ZTz{uQ{$sh-y9CQoHlJ9dJT+Swj~sCV z3iS|OCZ-m{#LOP=Aol*6RoEsCQSy(xj~QC3Beun)JO$GcFUEKJ#&=2>p0~Ms1(s4W zT}>HxvM}SzHPqzaqyxI?QrTriVx9DTi@2hfZWNmcN3^3Vaz|SO^rxcn*2@;OINQwlVfAxz89J>Uvex+P zzLknJ(i^Tr&_UdTr7m zhUbHxnDo_tYk*ZIH_|ocdzg0jHmUVCBj=+8E*3sZB zOg^J_$j!yZDPE~E2ErFHgoKoKNNr4P3mU3Q*Xqk$ze`JLV?#T4`)<*-LZ_kAGgPs* zhlHSfY8Qnf-L)#U^8=rXATd4Aid7-kNNSI$RogNU8QfY3Q2KzT|-> zRwsAafR^+?yIwdZOvG)M;+P}2kAORkVieNHcuv3sCLn^D1$Cj=vQKdma!|{*S)PUu zB04|e1r&j^`kkW|F+l;G(TyP) z?;%J&QCeR_Y8qfTOSnaU%ZSwvNK#1&XMpHc*I3blXMLqYP$Uu;ywEqpG3|;-Czz=r zK2@_b=zsh2o%cROKtaXl?uZK*iL@l<K;vePDm3|D$>fxSz&AmxUqjlBJiR#FX$0XLy;8ntSF8Qehc4A8@^a(QAOt-_EP6L6Pwc$MgEpGD{3!OM~%QOe6Ux^(v5SH2^N zVxn0hGcQgqBS%}cS1hMX7k*W+TdOn7Xn`!pT)_bIaPHaC9_;}M8yu|LM(CF13-zo( zu{?YB@P(Pn*>csp=PH?yyn;@0>9d)s`QhBgX^W};<-E8eZgn)4A6=qz2qI{;vdd&_ zpQ%JohC2#$F&`jng{MFH0aWo6cWMvVon-Q7*#VdWo@}}Ubi=Q;j?#R=mo8qj%aMVF zBeIBx${)j(u^*D!bvujob_Va{P&9Hfk&e!!YFo-a_K}}=>@!alfW?Z+2Ey|x(r11L z3&iica#t{cJ?%)*g08=xsuyaO7Ad;Jpxee8VJ(0vQ__~;)2jaji^Y=nz6;WKt!^tT zuOJ87uBK0{v@wPeC5by{ zexJqAQ08J9>^@w&=SnSc$mdAmbDI`qN#@;!w>IMis+7{-U7ypm-0jafPI^p75b5>*!)4&h1Ps4O@3sH0q-Xp|`d5D?J*ML?(pRzn57Org-Zi_7@t@@R z!CTR%RjpU<&CZZ9FiV$RDXs}3UnEr7E?CamrmKq@>p8HBfx{Fx(S*wa#0!_^U>_1i zA&6Rr{Mwe_6<+#+XVO(_c5&T{2a6_MagDW8?L3EB3+|jRwaSc~-OjKQ)SoL+k)#N7 z%Evz16)|p#K9W4+e?Gp1fujDn#~*$jCiNk@T?D`FN?Eq>$1a|mu`5=W1kxRz$(yP7 zOi}dnh}Vfz0Zk-9H&2Rov&agly}_UDy}SWfypnW8`0i-4?`bbq<`OKMC?oU0zlWEb za!1Wigk~gib?yql6_7+5A?2(>nF(A!NmUJ#_uBElgw~60!1*|-!^n) z8Nu^LE2+%3pWXj$61wK&6B3e2gSKEmCHr{S8~wMI8}{;Ned<(R$(Kn_YHE1sk(BI2 zLFEOuG@ma9yqDk|4XH&%x4!mRpp%G&*5?sY2jjxlX z3xJ-dY#X2o#t8n1I?%H&kTV{Zyv9Gvbe%)tGl*;r9R{X(O(0K1jr?x1ZnWI5DW>0f zJuUDR*~Mo#_iO8BLBHV-`M8c^Hjqw~nV)j=P7rwH{d+1a=aWUY8*E>X^pb4wFX6o+ZD;{&MkH?$w5#EiSMkT(hGTd!` zcBz`~!y;sV3p`ei!_`B5_RSqQT*mGGN{`t7h-b`}u?_l7Fp-&BUS^OhTU()xeuvj< za|M;YVeY(`h)h7d`hoxjP743~9JGEN$7hKTFAk%R_aOST-)G}pRuWjM@IA6?07{#6 zpsZDFCrMV0A1hj)HhZ2Lx8tt^EuDkhl4PloeF`v$Ghn6nmKpAOw|T2eaQI!GCb1oK zUJcc+AA6MNLnDYO5&sx@+JfT`jBK?(tfirAp9ma(d55MihUbFB_hF7|Sk+sNQzn{U zY$MjhC8{kg4e&$a<@b-4v$Mmut zCN)eEMEQU%dTE*v;Z_bO+PY!L61gu0n=(8l@tohshv*^|C-tw_~X_g?|>4L*C3w}c-noF&ZC|C4~fm}$DB>lFyS<|F> z{Z+c{wN0&>6SC9WIAPKk5$<}Mtl_%KPz~j8wD#^#(9Z`3`xer>6@A{_xIR)g;>bMr z=NXE-tILjoMUxd8`hJm_u9$fRt7M?~j1=HLA5F|OGNJoH;*N0q86^dPWl&AmZKoLf zu*?2E$RYXaQ{#vLUKM?rK{tn6&0W;^uG$Awjn6ad2e{hw=g)1--aK)tVnG!_@GHBo za8K;NnkSlyh6$lB^JUgNu10EQBu>7y8<$tgGb!T33}m2o4h!faWc!5m;X0S#v|y6V z>D8ImGJ0EMcv3gb*}0#dzqp~jVOgh~qf(h7Jg-8KWm`c7I-hK2@1bOL266+{)SP$$ zq5!e$d(qERY1m%k2n5bu&F*!(XEIbS81!g&Bhc;f6muMY|QS;3(a_0WJP``Ny}ODRfzQ<^~z&O0TrU^rS=BNG@}U;RiuBZJt# zipMsZKe^EbyPJHuTu1uW6aTpq#OZ}rppwt5Qd zSuw!@twBvShts!)8e3>p=(*$=q!9L+0)w=K$m8AuEDoO?78>QIQP|Q! zU-NY*BcHW!aS`oRMX6oX+H4l}jFGz8mz?ZIW_*^t9C%e_Sj9FM(^gvseB^%Ta`)P-66@HSO9!cNe0)_9(;^g5?CKx}T}W9sY4xk(ZIz7rGfwdk2S~=!6iqrNl7?hK;U0b(=d@ z#bG6)`I1z_pE#tlMxe^4RPl9tJ0U?)=u(2*;Ns$UPvT0)Itk2J<^a@5bTq?5*VZ{O zzpD8qh~7cy`ydqsfdzSlc>!7nCt1nM>LOA(sne3$%9pC8?^TCLkiU7Nqif)1f?e)> zg>x0`?SQ#Z2wZ+ca8L(EVyDpU`Hr#>H);Uk#USk^slMQX%bKM7u*7?*9vS33Tpb)K z&q)Q`1uqBbvTIX5LF}f*i~fg!>k0GEIEA)%;9T=gI-i2dvU^rEiJ+-!&c0~N6r)IX zOGWAqnbT^0tu%|+t5G`hN0)(Fkas}kbuVWa?sF+CF(#T8zRIgiVcXn}$>NR_3iGC~ zagocZnHw>xV!&>n>p;n_PXD1@?{tE*f>s;>kv)z^z`I+%lb zl58hx@qsrV(rCCym^5MI=0#^e*Gr1l(I?z7=euA2 z0Hqm18}w;i=nE#@=dsieXE^>;OKr@fdewqO>N0~?Ha@q-!D@H$bMmjz!pEKuv|lc* zlfC&UdOi$MNyKN+iz_l+!$3jA0JWY2skKv2F7{dcf-YTSBttcC(^+|I*;R(|sIxPQ zcm=Z=oHBTAG|Rzt_99{x5H3$cZ{#Gad4iWuz-V1GN}Qx7pWEarf&}L&p~m4l)EITk zI^apQL?qQg63OMhvYt)o13Aq6aIivb?CoNCvu?lVJZWf1D~pHr^f_dI{RC(25N75O za4sZSf|x*U?u^{-gGV116YQuj4z(LSBBHflW6t^lb?GJZ5 z9208JJ0rV~>={LYw+pEf=0>gd-rJA`G402b)&zT7#XdsCX)UlC171AsFHDNz9!yNW zmTu3R9Ma}i1kj}kEOD6aaD*pwoTs+>Tq$(Dd@-YTe>Sr^HY=`0R3!0s3%jZ}h(ghe zZXup5Sc<9cm2x0LDV%Q8=<_;GM7HIZFs65rWarI7#bDdr4!%8P=jtKYL1}mAOZ0HE zscEJB*jmm`*3B{dG!+W{`Oga$xK8?0-$eR?+UZcD@*)P#3QW8ulM3!K9mHro$U27>lO`#$47->;J1U{5?vO2f8B9D8sRG9M^EL)O62#xzH@zmw$313J?{0a zB$4&lWjYe=4%?=CA#n~L2YcG}pih_rw%+NnK zn?YWuLltdeZ9vg|8W5j(-n#@CgKs5_q2U`CoThN}Wli1FhTQj5mJrbw%5!VrK$@=H zvoh}|(cNMRFJ{u%_aF9FU3yUUbcb1y>*#FO9v_azU2TKU7g=N@7bk@R$W9?%T&qYhGr+28z@ zxlJp5-nZNtY!*yIh@7ugJ>e-qIMP#VwZ-uwi#xiouy(_R=5Y4WDZSJ2oOi(t#8kh;ni>g!;r_*Q|z1hE`0NVz+s16{_vFCXp`&gHkIpaVFs|rJ1@^t(r5^Hxdb=x_wZ6uP8eFlo z;b^2C@5%_pey#Zf0)2M`|5!A5tk%Df%CxQ^Y%_O-<6+H6;{y~a-hJQKEp+?lQ?HL^ zBK@{L%WHeRKO#@u)4%gOhQ?_qqm`ThK6VLz?K|#Y?BY2j%-a6r>KUigr1b23UIvRST&6cyH^$du<>m9S}r`)6;TjYdro zXo|h%r%}T~2n5;M07aY88{0Rc4g3kC4ts~hD0I;rSmrbP6~5Br-8lr1if_1rK%`d# za`~T04_E?`UIFoU(o>>=NUt>YC+S&!k)D|o`rrOug!L!s75_?lO0-|3uS)-u^tZsB z0u^YiL6d*L#TGkGqW~uU@Vk6`(f2A&Zw?p5B(Oq2C zBxKAToe+@7d!Tg%z^ol%y_adHaP@yqoo2N~Mv&3j*Iku$h0Up+!wvaC@I%emTBH0!11uRkVi+lXk1X{TX>b|(Pup=cw{{? z!;ILG=fiK&^TE|o6dEdqku4qBo$&ppR16?tuLTejB%Qx;9n(m9pJ@?3^_e*>($Tn zMJh7RqU%9X>#XN-6ljhz>~~X4%U-l z8J`q>?!%$&Zf$!Aa;W3iPPOlRs0Y5QUn~v7^n&h4RP{jG4m#4V38R~v>@@s?Q`IwT zb9A=U-f7fHVH9AuHVv%ZA|Hil`4`UfsUEAvFfOs^26O2cyOtZDsG1^c8kGYz=d?17%B)SHa|b8mlnImNW>G zo=|VMOC?Zo=5Z5U@vpx}SmclWyppgdIBHZvRvlE(( zNSiBe<3pagJ?#(k!L00cFn<_bJRu$q<2gUn^c6W9k<@eujFe;IEX2)Gf?$ zfQmC+jr2PcesYNN%RLxG)J*#pcM zCBv>&GVr1$pSm5zYP?&s*ftM`a#9-vtIavOlc}R8Ds2mJpo|3g#4jQ#m%sAJk=QEp z?5s0?mkKysplh0gNNppAer^)Bbsv_buX@zxfK_azU4sU*JfQHM|^Exx^C#?!GRBh{o^$6-6z>Q$S9uTC;>pia#ujXhaOKQ3*3aqQ;b#>fUM zP;F5C=CbUha3&(`kZ8FK6^8>4kx$zXe&e<%0gVcgxs*o|X;%mGpF) zzeryZ@Ehp?A@O{2gAdz&nATi$I>tF=wnKeZ=3|kh-=U=K^KYExA(p2VbojM3)LbFK z3!`*;8A2+NV98o_D8uF7FVWJQ)EWb}|XY;1%1L{I&?)Xl8x72IBMJL!wEhdYu7>OSdVaUOdY9c&$)q;Bqy zAa%}x>io0bM!g`tzmff}H$6G%v`kw2cfHT~{5l;UY@G*OocZ?dNHP^aEi-51-`y8R6O2aD@{5-LR4%hu>Lw+PTrF$Rc z(A}bJTS`>HCh>L)zp$epi(PC8LO4R-=HI#?WpTcEgiP(nWZ*-zB6n&;$c5faa^aMJ z3Erl#wg*yubIo zjq}Fb3l=@ox?+2!12nlaCZCrv$Fgo4Y;$tI)L7M~s6eb3b+JD!OS-&JH_s_DIow8U ztAYi^LL%G)h*^xM6fv##W{KZlztWrE_)fzfSEiZpnfv}$!xo1&CuRrS87OMLU^p4` znl7+YY#C|*P#ILmHHOshbatQ~-Mg(?F$-W_w7lm$(0z23fux5WsgG0?VoyQ8q#%Mw z(O!5#dpPH$16MR>T_e7&9M`mNX{*uC5eDI46LEr@j%lTH7el&L2b3&`>)(wOPkC&- zxJCl5mX7Uqer!)9@&zWsn|MUD$I%xQGx>$5%(hZp#LMftU!q2*t{uE;|GxDuP4rp$ z`d3$`jX>}24KvkdUn~tiO5c;69F6xv6;G%nUtyVl%PaSR7@4OP1Ky!qzFLW9`w|5S z0Z`6st1A?)RK5on&e8nyx??poW1C4r`p|EQ&iENlE01eb|Q5)cHsO~ zMpd1k7L--ze=z25$1A^0LL0Me6$jhy-tyBAC_iVn*$=!2s;?i!K}j!b<}K5tjtYIV zrt~$0jKq=#c`>G);Z4)0w5)@5fvOmssaV$*TsbT|Kt9}eaxq_7J8QeA0`x=s10E{68jrxmPpHNe?;O8 ztYOYq+Wc7QU7lK(c5YBP_ri<(UP6B0wi!YOxMo>LM!6`=uTN=TKy0!LEmP5o_2x|S znxkp5mYar^1ZL?p(NxIxSQ6`xRUM)ND*-CFP-+jR7Tn|23`#BKYh{`}9vVO6rB!X7 zOuf0iRu=i-F2AR^+POcUr}Id#t}D>0D~^l|VlNJ6C&OGGeLzg>EWNJWa-b?rw7LkG zQK90ZG2-+=EL`njqL=Gwrmp%iDpBqF;63#{?ybGc{G5`FY!Uji$y`&#e}3BTPET{E zBW`TA*3>aEj)^g0%d|*KDXG+D0H=+()HlnVFE8W1@MP<5MY8Q%rG&G>De~Az{mMfV zS?}iL`fVkvixN*OJe)dPlJ4$;>7Ka{$Dpy%`aA2-X(mQTtL%~AaUOq6(=+JoPL?lz zjsV(tdEZzrSD-Xt&K67C>BwD5!_MRse_`|%7oamYFO_?cBBVs~y|Bz)7(%;^3B50J zRdXOrR6KLr=@P`vQz0(Oqdh+lB1cin?(dS^$^T@lRo=*_*OMmknHNaN2MT*VWXsGE z)%QMnA3XfP$XaH;-5P6XD3RH?%41La(+_02>C|dITa0LL@bJ$e&8@)&z764dDB+sk zD{1`R`#xn%lA`uIvIEm#B0l#?R#q2oUB!1bM)I5caQ;>o`{tgix8q>a3yXB*J(1p6 zpXJc~>mbICx?0k+?W51r`B+tWBt$5i^NQ*Zvj|R4C;e|upV~uk`YO89PJhjju(K*@ z|LyehCp)XX+`l@#=&{qQ{nP2MIi5Iuslut9^^8K;gy7xl^Rx{zKtsd56`RXZwYeEJ z6Hd=|oP8+1p|&Ef?)tUkv~F7S3>T=&xBh%2nGV~Yt~d&!hlYR{&~IDF(Ot|* zCMKUQ7e>y;O(oJj+|Ky=m~!h|=t}h&+Lv?sWKF6CA4OG7{&ct1)BUdADx2*~UKIA; zW+~=jG@tnEjooO5MeITMY@bNJO*Cb$rBuo*DpVSCkzn||MfFzaBiqtC`@5yi6XujG z4pVTEwDIW%>Cmjg{sn*dO*Ikvk-}keI1E|&-OcEc=DjPOv2NSTR+!1pukC`W`{7kl zIy5M#FIk;MSme;WdZ-K|0vNCJJ?R>C*qD~2O3!!bA!lXSi)_JuwnCZytVB`8WSROm z_lx>8Qe8{RC?S_LGd5Je<=+onc@7zG7Z77SFQN|ed>*JC#n!-SQZQ^x~6E&a^9wfN*XwFQCs6_fc{0$vhD}A1lG(qx|5>bba!X!L)5A}GY*o4$L7oSBH3-pk zXh9J-OI8@*Xe0_1)c-x0pxD)+F5}*K!t}sv()C8 z97(EanK_aPB7G_GM$<@klk)?c-6kr=7*@aW&sygIu{e%2|Mjv_G>;_8BN3D&JB`Av)JQh;Y55_H!ZDs|$g71l<%TYv|1s=CO zKw;6y@Fq1C4WO;5cN*u_`*LBVZN0#;Y&8oP7o&N9^+#J6T#s`vc}j-l3(dGDx%O3*P87U|_P zJ7P9}e{i6Gc*7{=E>Gams7Jokv~ts(tHErm!f>-95vBWdMOCq>8^xi-4l`g2_GQ&} zMXChJ%-bFmMlrU~goIt>S5F>z?##{ysk8!AOG-JpUd?&?81TY-m}IX)1-0!H=Ap73 zcyMzv4#6$>@XCeo#KH?tB6k#6fjF!FrS2omhpFaQ&U$eR#6S+8qzW$e zSrt`dD`C=!1~sJs!>;q>1->4lO;>W;0)uR8dsv`BIuWY1_4za$`6($BL6733C%AqB zOgMdNnL0lRT-&UiqA7HPoaYadx$^~N6)zb3pj&?5ahvqPncaeIP)jOoO==0H#(Ptd zyv8yuc|^&KdMziM#LVFhNy}R7z5L4zcF>kX4EF&Imgo*WYV1WlEaJ?xYQ`Au!^%9# zOMu#u`1mMCp!$Y|8Deg`7bD)>QmywURn+Q{Up&F-^YOm?`!l`WAi?PusZTq-SSi8j zp%kZ`-W5s)kiq|(Ls+N&&k?|i9p9mn|LOE%r6*2LyMD^)0o%pkwwe(=vuitAtT!E$ zV*3^WTGI-0XjH_Uos-pY>b_;RSy=Gako3w-adMX=9}s!KB+++sD*304AxX&`mdHn{ zag~bHORcU8X&3+2G9K_IR6wz%4Z>(cqznqM`GG(LF}c4ai`H$5FHITNSYBP0$^a*R zsaW1{6&LIj!xi+${c*M$`dMO>Il^t8V|GSG)`k)A4L@X)SL=b|jUS4J0fQ^Bk~;rQ z+b22NHb+Kr6fFkB&kckWNe+g3NO@r5ul0}#?Xb%&QAEXWaFd6)crSf*teOikOqF;q zy0QO(^Ph<`OZ{Ov^N3y}l&MgQ$oLCFc2e+K{ysZc6d^m_LZ`DMUWC7Kl}IoCogD?j zK0Jg+Jh*cPfS*TuyndaJ1~BPF5D=d*{9i=;e6&r(3F6Hr0wUp7$=O)DUJBqUpX8{! z*q=k*cq=ydjW=ISyRs8bR&!rk=mH_EM%n+u{#q0O6RbF)C_Xs-vna%i@I?WAf*y-APSp2AEy!LXxXy1j`K8K(^Jm*Avi38a&^>xA{tGVlC za^Y$o>J0q=%{C?Yv?XFJfN1L6jrz?JVX%bPf+!DEvyV0BkZwB9!~)yJfWN;$-ks2A%Y-kwZ$)@=7ecTaNyHK!u5BWGynqr z5(fmt2p19n{{7DJI>p%ld;OTR%Rf0waGY=^`tJdjfOf)}Z53c%=ZT5p1Uw3XGqkhP zm*&^oFqNWb^BNz;5<+I8vPvHbv@J)js=3Q`7Yufl;l{h$V8sVen7yH3h1={V%jstR4N+yOr_mHRg>F`37 zSqKPbN(k%d`MF Fe*mm9@-P4Z literal 0 HcmV?d00001 diff --git a/testing/btest/scripts/base/protocols/ssl/basic.test b/testing/btest/scripts/base/protocols/ssl/basic.test new file mode 100644 index 0000000000..94b0e87ec1 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ssl/basic.test @@ -0,0 +1,4 @@ +# This tests a normal SSL connection and the log it outputs. + +# @TEST-EXEC: bro -r $TRACES/tls-conn-with-extensions.trace %INPUT +# @TEST-EXEC: btest-diff ssl.log From bbac44a6a4b234405a5335dfe8c8ea2beef3f8d6 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 3 May 2012 13:13:58 -0500 Subject: [PATCH 270/651] Changes to open-file caching limits and uncached file unserialization. - Unserializing files that were previously kicked out of the open-file cache would cause them to be fopen'd with the original access permissions which is usually 'w' and causes truncation. They are now opened in 'a' mode. (addresses #780) - Add 'max_files_in_cache' script option to manually set the maximum amount of opened files to keep cached. Mainly this just helped to create a simple test case for the above change. - Remove unused NO_HAVE_SETRLIMIT preprocessor switch. - On systems that don't enforce a limit on number of files opened for the process, raise default max size of open-file cache from 32 to 512. --- scripts/base/init-bare.bro | 5 ++ src/File.cc | 20 ++++---- src/File.h | 8 ++- src/NetVar.cc | 2 + src/NetVar.h | 1 + .../core.file-caching-serialization/one0 | 4 ++ .../core.file-caching-serialization/one1 | 4 ++ .../core.file-caching-serialization/one2 | 4 ++ .../core.file-caching-serialization/two0 | 6 +++ .../core.file-caching-serialization/two1 | 6 +++ .../core.file-caching-serialization/two2 | 6 +++ .../core/file-caching-serialization.test | 49 +++++++++++++++++++ 12 files changed, 104 insertions(+), 11 deletions(-) create mode 100644 testing/btest/Baseline/core.file-caching-serialization/one0 create mode 100644 testing/btest/Baseline/core.file-caching-serialization/one1 create mode 100644 testing/btest/Baseline/core.file-caching-serialization/one2 create mode 100644 testing/btest/Baseline/core.file-caching-serialization/two0 create mode 100644 testing/btest/Baseline/core.file-caching-serialization/two1 create mode 100644 testing/btest/Baseline/core.file-caching-serialization/two2 create mode 100644 testing/btest/core/file-caching-serialization.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 8f428b8549..20ce7b8ff5 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2329,6 +2329,11 @@ type bt_tracker_headers: table[string] of string; ## BPF filter the user has set via the -f command line options. Empty if none. const cmd_line_bpf_filter = "" &redef; +## The maximum number of open files to keep cached at a given time. +## If set to zero, this is automatically determined by inspecting +## the current/maximum limit on open files for the process. +const max_files_in_cache = 0 &redef; + ## Deprecated. const log_rotate_interval = 0 sec &redef; diff --git a/src/File.cc b/src/File.cc index d4e31bcc16..4f45c70a58 100644 --- a/src/File.cc +++ b/src/File.cc @@ -74,9 +74,8 @@ void RotateTimer::Dispatch(double t, int is_expire) // The following could in principle be part of a "file manager" object. -#define MAX_FILE_CACHE_SIZE 32 +#define MAX_FILE_CACHE_SIZE 512 static int num_files_in_cache = 0; -static int max_files_in_cache = 0; static BroFile* head = 0; static BroFile* tail = 0; @@ -87,9 +86,6 @@ double BroFile::default_rotation_size = 0; // that we should use for the cache. static int maximize_num_fds() { -#ifdef NO_HAVE_SETRLIMIT - return MAX_FILE_CACHE_SIZE; -#else struct rlimit rl; if ( getrlimit(RLIMIT_NOFILE, &rl) < 0 ) reporter->InternalError("maximize_num_fds(): getrlimit failed"); @@ -111,7 +107,6 @@ static int maximize_num_fds() reporter->InternalError("maximize_num_fds(): setrlimit failed"); return rl.rlim_cur / 2; -#endif } @@ -172,7 +167,7 @@ const char* BroFile::Name() const return 0; } -bool BroFile::Open(FILE* file) +bool BroFile::Open(FILE* file, const char* mode) { open_time = network_time ? network_time : current_time(); @@ -196,7 +191,12 @@ bool BroFile::Open(FILE* file) InstallRotateTimer(); if ( ! f ) - f = fopen(name, access); + { + if ( ! mode ) + f = fopen(name, access); + else + f = fopen(name, mode); + } SetBuf(buffered); @@ -846,8 +846,8 @@ BroFile* BroFile::Unserialize(UnserialInfo* info) } } - // Otherwise, open. - if ( ! file->Open() ) + // Otherwise, open, but don't clobber. + if ( ! file->Open(0, "a") ) { info->s->Error(fmt("cannot open %s: %s", file->name, strerror(errno))); diff --git a/src/File.h b/src/File.h index 444d6209e2..37f844867b 100644 --- a/src/File.h +++ b/src/File.h @@ -87,7 +87,13 @@ protected: BroFile() { Init(); } void Init(); - bool Open(FILE* f = 0); // if file is given, it's an open file to use + + /** + * If file is given, it's an open file to use already. + * If file is not given and mode is, the filename will be opened with that + * access mode. + */ + bool Open(FILE* f = 0, const char* mode = 0); BroFile* Prev() { return prev; } BroFile* Next() { return next; } diff --git a/src/NetVar.cc b/src/NetVar.cc index 59cc1cc633..bdb566b20b 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -167,6 +167,7 @@ TableVal* preserve_orig_addr; TableVal* preserve_resp_addr; TableVal* preserve_other_addr; +int max_files_in_cache; double log_rotate_interval; double log_max_size; RecordType* rotate_info; @@ -257,6 +258,7 @@ void init_general_global_var() state_dir = internal_val("state_dir")->AsStringVal(); state_write_delay = opt_internal_double("state_write_delay"); + max_files_in_cache = opt_internal_int("max_files_in_cache"); log_rotate_interval = opt_internal_double("log_rotate_interval"); log_max_size = opt_internal_double("log_max_size"); rotate_info = internal_type("rotate_info")->AsRecordType(); diff --git a/src/NetVar.h b/src/NetVar.h index 425ea93e09..a7e750dc59 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -170,6 +170,7 @@ extern double connection_status_update_interval; extern StringVal* state_dir; extern double state_write_delay; +extern int max_files_in_cache; extern double log_rotate_interval; extern double log_max_size; extern RecordType* rotate_info; diff --git a/testing/btest/Baseline/core.file-caching-serialization/one0 b/testing/btest/Baseline/core.file-caching-serialization/one0 new file mode 100644 index 0000000000..abfe9a2af6 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/one0 @@ -0,0 +1,4 @@ +opened +write 0 +write 3 +write 6 diff --git a/testing/btest/Baseline/core.file-caching-serialization/one1 b/testing/btest/Baseline/core.file-caching-serialization/one1 new file mode 100644 index 0000000000..d53edaed28 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/one1 @@ -0,0 +1,4 @@ +opened +write 1 +write 4 +write 7 diff --git a/testing/btest/Baseline/core.file-caching-serialization/one2 b/testing/btest/Baseline/core.file-caching-serialization/one2 new file mode 100644 index 0000000000..5b5c9bc130 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/one2 @@ -0,0 +1,4 @@ +opened +write 2 +write 5 +write 8 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two0 b/testing/btest/Baseline/core.file-caching-serialization/two0 new file mode 100644 index 0000000000..88e273032e --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/two0 @@ -0,0 +1,6 @@ +opened +write 0 +opened +write 3 +opened +write 6 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two1 b/testing/btest/Baseline/core.file-caching-serialization/two1 new file mode 100644 index 0000000000..b2f9350bc4 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/two1 @@ -0,0 +1,6 @@ +opened +write 1 +opened +write 4 +opened +write 7 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two2 b/testing/btest/Baseline/core.file-caching-serialization/two2 new file mode 100644 index 0000000000..94a971c7db --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/two2 @@ -0,0 +1,6 @@ +opened +write 2 +opened +write 5 +opened +write 8 diff --git a/testing/btest/core/file-caching-serialization.test b/testing/btest/core/file-caching-serialization.test new file mode 100644 index 0000000000..7ff1d8be8d --- /dev/null +++ b/testing/btest/core/file-caching-serialization.test @@ -0,0 +1,49 @@ +# This checks that the interactions between open-file caching and +# serialization works ok. In the first case, all files can fit +# in the cache, but get serialized before every write. In the +# second case, files are eventually forced out of the cache and +# undergo serialization, which requires re-opening. + +# @TEST-EXEC: bro -b %INPUT "test_file_prefix=one" +# @TEST-EXEC: btest-diff one0 +# @TEST-EXEC: btest-diff one1 +# @TEST-EXEC: btest-diff one2 +# @TEST-EXEC: bro -b %INPUT "test_file_prefix=two" "max_files_in_cache=2" +# @TEST-EXEC: btest-diff two0 +# @TEST-EXEC: btest-diff two1 +# @TEST-EXEC: btest-diff two2 + +const test_file_prefix = "" &redef; +global file_table: table[string] of file; +global iterations: vector of count = vector(0,1,2,3,4,5,6,7,8); + +function write_to_file(c: count) + { + local f: file; + # Take turns writing across three output files. + local filename = fmt("%s%s", test_file_prefix, c % 3 ); + + if ( filename in file_table ) + f = file_table[filename]; + else + { + f = open(filename); + file_table[filename] = f; + } + + # This when block is a trick to get the frame cloned + # and thus serialize the local file value + when ( local s = fmt("write %d", c) ) + print f, s; + } + +event file_opened(f: file) + { + print f, "opened"; + } + +event bro_init() + { + for ( i in iterations ) + write_to_file(iterations[i]); + } From c9b53706a15db9a0077bdffde2865bd36ad621b7 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 3 May 2012 11:45:11 -0700 Subject: [PATCH 271/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index 22df444f54..76e6bd4b18 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 22df444f54d8cbc05976ef4a5524c73a45ab6372 +Subproject commit 76e6bd4b182e9ff43456890e08aeaf451f9e4615 From 5984564946de035c5f26a5ab5b2378a21ad2d712 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 4 May 2012 11:21:18 -0500 Subject: [PATCH 272/651] Change IPv6 address/prefix output format to be bracketed. Also add a test case for content extraction. --- scripts/base/protocols/ftp/main.bro | 7 +- src/IPAddr.cc | 2 +- src/logging/WriterBackend.cc | 7 +- .../bifs.addr_count_conversion/output | 2 +- .../Baseline/bifs.ptr_name_to_addr/output | 2 +- .../bifs.routing0_data_to_addrs/output | 2 +- testing/btest/Baseline/bifs.to_addr/output | 2 +- testing/btest/Baseline/bifs.to_subnet/output | 4 +- testing/btest/Baseline/core.conn-uid/output | 6 +- testing/btest/Baseline/core.discarder/output | 10 +- .../Baseline/core.icmp.icmp-context/output | 2 +- .../Baseline/core.icmp.icmp6-context/output | 24 +- .../Baseline/core.icmp.icmp6-events/output | 82 +++--- .../Baseline/core.ipv6-atomic-frag/output | 8 +- testing/btest/Baseline/core.ipv6-frag/dns.log | 4 +- testing/btest/Baseline/core.ipv6-frag/output | 10 +- testing/btest/Baseline/core.ipv6_esp/output | 240 +++++++++--------- .../Baseline/core.ipv6_ext_headers/output | 6 +- .../core.mobile-ipv6-home-addr/output | 4 +- .../Baseline/core.mobile-ipv6-routing/output | 4 +- .../btest/Baseline/core.mobility_msg/output | 16 +- .../Baseline/istate.broccoli-ipv6/bro..stdout | 8 +- .../Baseline/istate.pybroccoli/bro..stdout | 6 +- .../istate.pybroccoli/python..stdout.filtered | 6 +- .../Baseline/language.expire_func/output | 14 +- .../Baseline/language.ipv6-literals/output | 46 ++-- testing/btest/Baseline/language.sizeof/output | 2 +- .../local.log | 8 +- .../remote.log | 6 +- ...]:49185-[2001:470:4867:99::21]:21_orig.dat | 22 ++ ...]:49185-[2001:470:4867:99::21]:21_resp.dat | 73 ++++++ .../conn.log | 12 +- .../ftp.log | 4 +- .../conn/contents-default-extract.test | 3 + 34 files changed, 374 insertions(+), 280 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat create mode 100644 testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat create mode 100644 testing/btest/scripts/base/protocols/conn/contents-default-extract.test diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro index 14f4307e5e..809ab61360 100644 --- a/scripts/base/protocols/ftp/main.bro +++ b/scripts/base/protocols/ftp/main.bro @@ -169,12 +169,7 @@ function ftp_message(s: Info) local arg = s$cmdarg$arg; if ( s$cmdarg$cmd in file_cmds ) - { - if ( is_v4_addr(s$id$resp_h) ) - arg = fmt("ftp://%s%s", s$id$resp_h, build_path_compressed(s$cwd, arg)); - else - arg = fmt("ftp://[%s]%s", s$id$resp_h, build_path_compressed(s$cwd, arg)); - } + arg = fmt("ftp://%s%s", s$id$resp_h, build_path_compressed(s$cwd, arg)); s$ts=s$cmdarg$ts; s$command=s$cmdarg$cmd; diff --git a/src/IPAddr.cc b/src/IPAddr.cc index 0ba5589fff..8d88cebc25 100644 --- a/src/IPAddr.cc +++ b/src/IPAddr.cc @@ -172,7 +172,7 @@ string IPAddr::AsString() const if ( ! bro_inet_ntop(AF_INET6, in6.s6_addr, s, INET6_ADDRSTRLEN) ) return ""; else return s; @@ -257,10 +258,10 @@ string WriterBackend::Render(const threading::Value::addr_t& addr) const { char s[INET6_ADDRSTRLEN]; - if ( inet_ntop(AF_INET6, &addr.in.in6, s, INET6_ADDRSTRLEN) == NULL ) + if ( ! bro_inet_ntop(AF_INET6, &addr.in.in6, s, INET6_ADDRSTRLEN) ) return ""; else - return s; + return string("[") + s + "]"; } } diff --git a/testing/btest/Baseline/bifs.addr_count_conversion/output b/testing/btest/Baseline/bifs.addr_count_conversion/output index 08a74512d3..c63e64b735 100644 --- a/testing/btest/Baseline/bifs.addr_count_conversion/output +++ b/testing/btest/Baseline/bifs.addr_count_conversion/output @@ -1,4 +1,4 @@ [536939960, 2242052096, 35374, 57701172] -2001:db8:85a3::8a2e:370:7334 +[2001:db8:85a3::8a2e:370:7334] [16909060] 1.2.3.4 diff --git a/testing/btest/Baseline/bifs.ptr_name_to_addr/output b/testing/btest/Baseline/bifs.ptr_name_to_addr/output index 7c290027aa..ebc4c15823 100644 --- a/testing/btest/Baseline/bifs.ptr_name_to_addr/output +++ b/testing/btest/Baseline/bifs.ptr_name_to_addr/output @@ -1,2 +1,2 @@ -2607:f8b0:4009:802::1012 +[2607:f8b0:4009:802::1012] 74.125.225.52 diff --git a/testing/btest/Baseline/bifs.routing0_data_to_addrs/output b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output index c79aef89d0..7e37c7b77a 100644 --- a/testing/btest/Baseline/bifs.routing0_data_to_addrs/output +++ b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output @@ -1 +1 @@ -[2001:78:1:32::1, 2001:78:1:32::2] +[[2001:78:1:32::1], [2001:78:1:32::2]] diff --git a/testing/btest/Baseline/bifs.to_addr/output b/testing/btest/Baseline/bifs.to_addr/output index ff277498f8..084261a8fd 100644 --- a/testing/btest/Baseline/bifs.to_addr/output +++ b/testing/btest/Baseline/bifs.to_addr/output @@ -6,4 +6,4 @@ to_addr(10.20.30.40) = 10.20.30.40 (SUCCESS) to_addr(100.200.30.40) = 100.200.30.40 (SUCCESS) to_addr(10.0.0.0) = 10.0.0.0 (SUCCESS) to_addr(10.00.00.000) = 10.0.0.0 (SUCCESS) -to_addr(not an IP) = :: (SUCCESS) +to_addr(not an IP) = [::] (SUCCESS) diff --git a/testing/btest/Baseline/bifs.to_subnet/output b/testing/btest/Baseline/bifs.to_subnet/output index 0775063f89..526c3d66b2 100644 --- a/testing/btest/Baseline/bifs.to_subnet/output +++ b/testing/btest/Baseline/bifs.to_subnet/output @@ -1,3 +1,3 @@ 10.0.0.0/8, T -2607:f8b0::/32, T -::/0, T +[2607:f8b0::]/32, T +[::]/0, T diff --git a/testing/btest/Baseline/core.conn-uid/output b/testing/btest/Baseline/core.conn-uid/output index c77eda4f04..a98469d075 100644 --- a/testing/btest/Baseline/core.conn-uid/output +++ b/testing/btest/Baseline/core.conn-uid/output @@ -1,5 +1,5 @@ [orig_h=141.142.220.202, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], UWkUyAuUGXf -[orig_h=fe80::217:f2ff:fed7:cf65, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], arKYeMETxOg +[orig_h=[fe80::217:f2ff:fed7:cf65], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], arKYeMETxOg [orig_h=141.142.220.50, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], k6kgXLOoSKl [orig_h=141.142.220.118, orig_p=35634/tcp, resp_h=208.80.152.2, resp_p=80/tcp], nQcgTWjvg4c [orig_h=141.142.220.118, orig_p=48649/tcp, resp_h=208.80.152.118, resp_p=80/tcp], j4u32Pc5bif @@ -36,8 +36,8 @@ [orig_h=141.142.220.235, orig_p=6705/tcp, resp_h=173.192.163.128, resp_p=80/tcp], 2cx26uAvUPl [orig_h=141.142.220.44, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], BWaU4aSuwkc [orig_h=141.142.220.226, orig_p=137/udp, resp_h=141.142.220.255, resp_p=137/udp], 10XodEwRycf -[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp], zno26fFZkrh +[orig_h=[fe80::3074:17d5:2052:c324], orig_p=65373/udp, resp_h=[ff02::1:3], resp_p=5355/udp], zno26fFZkrh [orig_h=141.142.220.226, orig_p=55131/udp, resp_h=224.0.0.252, resp_p=5355/udp], v5rgkJBig5l -[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp], eWZCH7OONC1 +[orig_h=[fe80::3074:17d5:2052:c324], orig_p=54213/udp, resp_h=[ff02::1:3], resp_p=5355/udp], eWZCH7OONC1 [orig_h=141.142.220.226, orig_p=55671/udp, resp_h=224.0.0.252, resp_p=5355/udp], 0Pwk3ntf8O3 [orig_h=141.142.220.238, orig_p=56641/udp, resp_h=141.142.220.255, resp_p=137/udp], 0HKorjr8Zp7 diff --git a/testing/btest/Baseline/core.discarder/output b/testing/btest/Baseline/core.discarder/output index 82b4b3e622..56b85cb83e 100644 --- a/testing/btest/Baseline/core.discarder/output +++ b/testing/btest/Baseline/core.discarder/output @@ -15,10 +15,10 @@ [orig_h=141.142.220.118, orig_p=50001/tcp, resp_h=208.80.152.3, resp_p=80/tcp] [orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] ################ UDP Discarder ################ -[orig_h=fe80::217:f2ff:fed7:cf65, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp] -[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp] -[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp] -[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp] -[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp] +[orig_h=[fe80::217:f2ff:fed7:cf65], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp] +[orig_h=[fe80::3074:17d5:2052:c324], orig_p=65373/udp, resp_h=[ff02::1:3], resp_p=5355/udp] +[orig_h=[fe80::3074:17d5:2052:c324], orig_p=65373/udp, resp_h=[ff02::1:3], resp_p=5355/udp] +[orig_h=[fe80::3074:17d5:2052:c324], orig_p=54213/udp, resp_h=[ff02::1:3], resp_p=5355/udp] +[orig_h=[fe80::3074:17d5:2052:c324], orig_p=54213/udp, resp_h=[ff02::1:3], resp_p=5355/udp] ################ ICMP Discarder ################ Discard icmp packet: [icmp_type=3] diff --git a/testing/btest/Baseline/core.icmp.icmp-context/output b/testing/btest/Baseline/core.icmp.icmp-context/output index 40dc778d8b..0820488cf8 100644 --- a/testing/btest/Baseline/core.icmp.icmp-context/output +++ b/testing/btest/Baseline/core.icmp.icmp-context/output @@ -1,7 +1,7 @@ icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=0, hlim=64, v6=F] - icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + icmp_context: [id=[orig_h=[::], orig_p=0/unknown, resp_h=[::], resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=20, hlim=64, v6=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-context/output b/testing/btest/Baseline/core.icmp.icmp6-context/output index 7a83679018..75b51ab697 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-context/output +++ b/testing/btest/Baseline/core.icmp.icmp6-context/output @@ -1,16 +1,16 @@ icmp_unreachable (code=0) - conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=0, hlim=64, v6=T] - icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=0, hlim=64, v6=T] + icmp_context: [id=[orig_h=[::], orig_p=0/unknown, resp_h=[::], resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) - conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=40, hlim=64, v6=T] - icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=40, hlim=64, v6=T] + icmp_context: [id=[orig_h=[fe80::beef], orig_p=0/unknown, resp_h=[fe80::dead], resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) - conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] - icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=60, hlim=64, v6=T] + icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) - conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=48, hlim=64, v6=T] - icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=48, hlim=64, v6=T] + icmp_context: [id=[orig_h=[fe80::beef], orig_p=0/unknown, resp_h=[fe80::dead], resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-events/output b/testing/btest/Baseline/core.icmp.icmp6-events/output index 81075b716a..8b41827dc0 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-events/output +++ b/testing/btest/Baseline/core.icmp.icmp6-events/output @@ -1,46 +1,46 @@ icmp_unreachable (code=0) - conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] - icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=60, hlim=64, v6=T] + icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_packet_too_big (code=0) - conn_id: [orig_h=fe80::dead, orig_p=2/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=2, icode=0, len=52, hlim=64, v6=T] - icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=2/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=2, icode=0, len=52, hlim=64, v6=T] + icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_time_exceeded (code=0) - conn_id: [orig_h=fe80::dead, orig_p=3/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=3, icode=0, len=52, hlim=64, v6=T] - icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=3/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=3, icode=0, len=52, hlim=64, v6=T] + icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_parameter_problem (code=0) - conn_id: [orig_h=fe80::dead, orig_p=4/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=4, icode=0, len=52, hlim=64, v6=T] - icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=[fe80::dead], orig_p=4/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=4, icode=0, len=52, hlim=64, v6=T] + icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_echo_request (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] -icmp_redirect (tgt=fe80::cafe, dest=fe80::babe) - conn_id: [orig_h=fe80::dead, orig_p=137/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, hlim=255, v6=T] + conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] + icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] +icmp_redirect (tgt=[fe80::cafe], dest=[fe80::babe]) + conn_id: [orig_h=[fe80::dead], orig_p=137/icmp, resp_h=[fe80::beef], resp_p=0/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=137, icode=0, len=32, hlim=255, v6=T] icmp_router_advertisement cur_hop_limit=13 managed=T @@ -52,17 +52,17 @@ icmp_router_advertisement router_lifetime=30.0 mins reachable_time=3.0 secs 700.0 msecs retrans_timer=1.0 sec 300.0 msecs - conn_id: [orig_h=fe80::dead, orig_p=134/icmp, resp_h=fe80::beef, resp_p=133/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, hlim=255, v6=T] -icmp_neighbor_advertisement (tgt=fe80::babe) + conn_id: [orig_h=[fe80::dead], orig_p=134/icmp, resp_h=[fe80::beef], resp_p=133/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=134, icode=0, len=8, hlim=255, v6=T] +icmp_neighbor_advertisement (tgt=[fe80::babe]) router=T solicited=F override=T - conn_id: [orig_h=fe80::dead, orig_p=136/icmp, resp_h=fe80::beef, resp_p=135/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, hlim=255, v6=T] + conn_id: [orig_h=[fe80::dead], orig_p=136/icmp, resp_h=[fe80::beef], resp_p=135/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=136, icode=0, len=16, hlim=255, v6=T] icmp_router_solicitation - conn_id: [orig_h=fe80::dead, orig_p=133/icmp, resp_h=fe80::beef, resp_p=134/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, hlim=255, v6=T] -icmp_neighbor_solicitation (tgt=fe80::babe) - conn_id: [orig_h=fe80::dead, orig_p=135/icmp, resp_h=fe80::beef, resp_p=136/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, hlim=255, v6=T] + conn_id: [orig_h=[fe80::dead], orig_p=133/icmp, resp_h=[fe80::beef], resp_p=134/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=133, icode=0, len=0, hlim=255, v6=T] +icmp_neighbor_solicitation (tgt=[fe80::babe]) + conn_id: [orig_h=[fe80::dead], orig_p=135/icmp, resp_h=[fe80::beef], resp_p=136/icmp] + icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=135, icode=0, len=16, hlim=255, v6=T] diff --git a/testing/btest/Baseline/core.ipv6-atomic-frag/output b/testing/btest/Baseline/core.ipv6-atomic-frag/output index 4a628a4bdc..b634ae11db 100644 --- a/testing/btest/Baseline/core.ipv6-atomic-frag/output +++ b/testing/btest/Baseline/core.ipv6-atomic-frag/output @@ -1,4 +1,4 @@ -[orig_h=2001:db8:1::2, orig_p=36951/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] -[orig_h=2001:db8:1::2, orig_p=59694/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] -[orig_h=2001:db8:1::2, orig_p=27393/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] -[orig_h=2001:db8:1::2, orig_p=45805/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] +[orig_h=[2001:db8:1::2], orig_p=36951/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] +[orig_h=[2001:db8:1::2], orig_p=59694/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] +[orig_h=[2001:db8:1::2], orig_p=27393/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] +[orig_h=[2001:db8:1::2], orig_p=45805/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] diff --git a/testing/btest/Baseline/core.ipv6-frag/dns.log b/testing/btest/Baseline/core.ipv6-frag/dns.log index 251f35d789..ccf9f4b73d 100644 --- a/testing/btest/Baseline/core.ipv6-frag/dns.log +++ b/testing/btest/Baseline/core.ipv6-frag/dns.log @@ -5,5 +5,5 @@ #path dns #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] -1331084278.438444 UWkUyAuUGXf 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51850 2607:f740:b::f93 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 -1331084293.592245 arKYeMETxOg 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51851 2607:f740:b::f93 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 +1331084278.438444 UWkUyAuUGXf [2001:470:1f11:81f:d138:5f55:6d4:1fe2] 51850 [2607:f740:b::f93] 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 +1331084293.592245 arKYeMETxOg [2001:470:1f11:81f:d138:5f55:6d4:1fe2] 51851 [2607:f740:b::f93] 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 diff --git a/testing/btest/Baseline/core.ipv6-frag/output b/testing/btest/Baseline/core.ipv6-frag/output index 12dfc3a841..3ab244254b 100644 --- a/testing/btest/Baseline/core.ipv6-frag/output +++ b/testing/btest/Baseline/core.ipv6-frag/output @@ -1,5 +1,5 @@ -ip6=[class=0, flow=0, len=81, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51850/udp, dport=53/udp, ulen=81] -ip6=[class=0, flow=0, len=331, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[]], udp = [sport=53/udp, dport=51850/udp, ulen=331] -ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] +ip6=[class=0, flow=0, len=81, nxt=17, hlim=64, src=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], dst=[2607:f740:b::f93], exts=[]], udp = [sport=51850/udp, dport=53/udp, ulen=81] +ip6=[class=0, flow=0, len=331, nxt=17, hlim=53, src=[2607:f740:b::f93], dst=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], exts=[]], udp = [sport=53/udp, dport=51850/udp, ulen=331] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], dst=[2607:f740:b::f93], exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], dst=[2607:f740:b::f93], exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=[2607:f740:b::f93], dst=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], exts=[]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] diff --git a/testing/btest/Baseline/core.ipv6_esp/output b/testing/btest/Baseline/core.ipv6_esp/output index 02fb7e154f..834a3cd56e 100644 --- a/testing/btest/Baseline/core.ipv6_esp/output +++ b/testing/btest/Baseline/core.ipv6_esp/output @@ -1,120 +1,120 @@ -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=1], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=2], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=3], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=4], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=5], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=6], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=7], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=8], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=9], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=10], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=1], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=2], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=3], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=4], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=5], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=6], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=7], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=8], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=9], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=10], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=1], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=2], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=3], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=4], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=5], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=6], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=7], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=8], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=9], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=10], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=1], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=2], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=3], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=4], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=5], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=6], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=7], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=8], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=9], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=10], mobility=]]] diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index b4cd249371..e6ac3de822 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1,3 +1,3 @@ -weird routing0_hdr from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:78:1:32::2 -[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=53/udp, resp_h=2001:78:1:32::2, resp_p=53/udp] -[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=, mobility=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] +weird routing0_hdr from [2001:4f8:4:7:2e0:81ff:fe52:ffff] to [2001:78:1:32::2] +[orig_h=[2001:4f8:4:7:2e0:81ff:fe52:ffff], orig_p=53/udp, resp_h=[2001:78:1:32::2], resp_p=53/udp] +[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=, mobility=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/Baseline/core.mobile-ipv6-home-addr/output b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output index 88cbe0cb16..63e3fb92f9 100644 --- a/testing/btest/Baseline/core.mobile-ipv6-home-addr/output +++ b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output @@ -1,2 +1,2 @@ -[orig_h=2001:78:1:32::1, orig_p=30000/udp, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=13000/udp] -[ip=, ip6=[class=0, flow=0, len=36, nxt=60, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=60, hopopts=, dstopts=[nxt=17, len=2, options=[[otype=1, len=2, data=\0\0], [otype=201, len=16, data= ^A\0x\0^A\02\0\0\0\0\0\0\0^A]]], routing=, fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] +[orig_h=[2001:78:1:32::1], orig_p=30000/udp, resp_h=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], resp_p=13000/udp] +[ip=, ip6=[class=0, flow=0, len=36, nxt=60, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=60, hopopts=, dstopts=[nxt=17, len=2, options=[[otype=1, len=2, data=\0\0], [otype=201, len=16, data= ^A\0x\0^A\02\0\0\0\0\0\0\0^A]]], routing=, fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] diff --git a/testing/btest/Baseline/core.mobile-ipv6-routing/output b/testing/btest/Baseline/core.mobile-ipv6-routing/output index 04292caaa7..e1cd99da1c 100644 --- a/testing/btest/Baseline/core.mobile-ipv6-routing/output +++ b/testing/btest/Baseline/core.mobile-ipv6-routing/output @@ -1,2 +1,2 @@ -[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=30000/udp, resp_h=2001:78:1:32::1, resp_p=13000/udp] -[ip=, ip6=[class=0, flow=0, len=36, nxt=43, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=43, hopopts=, dstopts=, routing=[nxt=17, len=2, rtype=2, segleft=1, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] +[orig_h=[2001:4f8:4:7:2e0:81ff:fe52:ffff], orig_p=30000/udp, resp_h=[2001:78:1:32::1], resp_p=13000/udp] +[ip=, ip6=[class=0, flow=0, len=36, nxt=43, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=43, hopopts=, dstopts=, routing=[nxt=17, len=2, rtype=2, segleft=1, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] diff --git a/testing/btest/Baseline/core.mobility_msg/output b/testing/btest/Baseline/core.mobility_msg/output index 6f8d6a1699..64315bf370 100644 --- a/testing/btest/Baseline/core.mobility_msg/output +++ b/testing/btest/Baseline/core.mobility_msg/output @@ -1,16 +1,16 @@ Binding ACK: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=6, rsv=0, chksum=53722, msg=[id=6, brr=, hoti=, coti=, hot=, cot=, bu=, back=[status=0, k=T, seq=42, life=8, options=[[otype=1, len=2, data=\0\0]]], be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=6, rsv=0, chksum=53722, msg=[id=6, brr=, hoti=, coti=, hot=, cot=, bu=, back=[status=0, k=T, seq=42, life=8, options=[[otype=1, len=2, data=\0\0]]], be=]]]]] Binding Error: -[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=7, rsv=0, chksum=45272, msg=[id=7, brr=, hoti=, coti=, hot=, cot=, bu=, back=, be=[status=1, hoa=2001:78:1:32::1, options=[]]]]]]] +[class=0, flow=0, len=24, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=7, rsv=0, chksum=45272, msg=[id=7, brr=, hoti=, coti=, hot=, cot=, bu=, back=, be=[status=1, hoa=[2001:78:1:32::1], options=[]]]]]]] Binding Refresh Request: -[class=0, flow=0, len=8, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=0, mh_type=0, rsv=0, chksum=55703, msg=[id=0, brr=[rsv=0, options=[]], hoti=, coti=, hot=, cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=8, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=0, mh_type=0, rsv=0, chksum=55703, msg=[id=0, brr=[rsv=0, options=[]], hoti=, coti=, hot=, cot=, bu=, back=, be=]]]]] Binding Update: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=5, rsv=0, chksum=868, msg=[id=5, brr=, hoti=, coti=, hot=, cot=, bu=[seq=37, a=T, h=T, l=F, k=T, life=3, options=[[otype=1, len=2, data=\0\0]]], back=, be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=5, rsv=0, chksum=868, msg=[id=5, brr=, hoti=, coti=, hot=, cot=, bu=[seq=37, a=T, h=T, l=F, k=T, life=3, options=[[otype=1, len=2, data=\0\0]]], back=, be=]]]]] Care-of Test: -[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=4, rsv=0, chksum=54378, msg=[id=4, brr=, hoti=, coti=, hot=, cot=[nonce_idx=13, cookie=15, token=255, options=[]], bu=, back=, be=]]]]] +[class=0, flow=0, len=24, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=4, rsv=0, chksum=54378, msg=[id=4, brr=, hoti=, coti=, hot=, cot=[nonce_idx=13, cookie=15, token=255, options=[]], bu=, back=, be=]]]]] Care-of Test Init: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=2, rsv=0, chksum=55181, msg=[id=2, brr=, hoti=, coti=[rsv=0, cookie=1, options=[]], hot=, cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=2, rsv=0, chksum=55181, msg=[id=2, brr=, hoti=, coti=[rsv=0, cookie=1, options=[]], hot=, cot=, bu=, back=, be=]]]]] Home Test: -[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=3, rsv=0, chksum=54634, msg=[id=3, brr=, hoti=, coti=, hot=[nonce_idx=13, cookie=15, token=255, options=[]], cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=24, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=3, rsv=0, chksum=54634, msg=[id=3, brr=, hoti=, coti=, hot=[nonce_idx=13, cookie=15, token=255, options=[]], cot=, bu=, back=, be=]]]]] Home Test Init: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=1, rsv=0, chksum=55437, msg=[id=1, brr=, hoti=[rsv=0, cookie=1, options=[]], coti=, hot=, cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=1, rsv=0, chksum=55437, msg=[id=1, brr=, hoti=[rsv=0, cookie=1, options=[]], coti=, hot=, cot=, bu=, back=, be=]]]]] diff --git a/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout b/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout index 0a7bac52c5..5114999813 100644 --- a/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout +++ b/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout @@ -1,9 +1,9 @@ handshake done with peer bro_addr(1.2.3.4) bro_subnet(10.0.0.0/16) -bro_addr(2607:f8b0:4009:802::1014) -bro_subnet(2607:f8b0::/32) +bro_addr([2607:f8b0:4009:802::1014]) +bro_subnet([2607:f8b0::]/32) broccoli_addr(1.2.3.4) broccoli_subnet(10.0.0.0/16) -broccoli_addr(2607:f8b0:4009:802::1014) -broccoli_subnet(2607:f8b0::/32) +broccoli_addr([2607:f8b0:4009:802::1014]) +broccoli_subnet([2607:f8b0::]/32) diff --git a/testing/btest/Baseline/istate.pybroccoli/bro..stdout b/testing/btest/Baseline/istate.pybroccoli/bro..stdout index 70ca69dd98..9c4637125e 100644 --- a/testing/btest/Baseline/istate.pybroccoli/bro..stdout +++ b/testing/btest/Baseline/istate.pybroccoli/bro..stdout @@ -1,16 +1,16 @@ ==== atomic -10 2 -1330035434.516896 +1336148094.497041 2.0 mins F 1.5 Servus 5555/tcp 6.7.6.5 -2001:db8:85a3::8a2e:370:7334 +[2001:db8:85a3::8a2e:370:7334] 192.168.0.0/16 -2001:db8:85a3::/48 +[2001:db8:85a3::]/48 ==== record [a=42, b=6.6.7.7] 42, 6.6.7.7 diff --git a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered index 5d98e2d759..5d1ca261c4 100644 --- a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered +++ b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered @@ -1,7 +1,7 @@ ==== atomic a 1 ==== -4L -4 42 42 -1330035434.5180 +1336148094.5020 60.0 True True 3.14 @@ -14,7 +14,7 @@ True True ==== atomic a 2 ==== -10L -10 2 2 -1330035434.5169 +1336148094.4970 120.0 False False 1.5 @@ -27,7 +27,7 @@ False False ==== atomic b 2 ==== -10L -10 2 - 1330035434.5169 + 1336148094.4970 120.0 False False 1.5 diff --git a/testing/btest/Baseline/language.expire_func/output b/testing/btest/Baseline/language.expire_func/output index 91cd2bad16..13be712d8a 100644 --- a/testing/btest/Baseline/language.expire_func/output +++ b/testing/btest/Baseline/language.expire_func/output @@ -16,7 +16,7 @@ am i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, -[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], am } { @@ -25,7 +25,7 @@ am i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, -[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], am } { @@ -34,7 +34,7 @@ am i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, -[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -45,7 +45,7 @@ i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], -[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -57,7 +57,7 @@ i, here, [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp], -[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -70,7 +70,7 @@ i, here, [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp], -[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], +[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -82,7 +82,7 @@ expired [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53 expired here expired [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp] expired [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp] -expired [orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp] +expired [orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp] expired [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp] expired am { diff --git a/testing/btest/Baseline/language.ipv6-literals/output b/testing/btest/Baseline/language.ipv6-literals/output index 8542af7f91..a540fe999b 100644 --- a/testing/btest/Baseline/language.ipv6-literals/output +++ b/testing/btest/Baseline/language.ipv6-literals/output @@ -1,24 +1,24 @@ -::1 -::ffff -::255.255.255.255 -::10.10.255.255 -1::1 -1::a -1::1:1 -1::1:a -a::a -a::1 -a::a:a -a::a:1 -a:a::a -aaaa::ffff +[::1] +[::ffff] +[::255.255.255.255] +[::10.10.255.255] +[1::1] +[1::a] +[1::1:1] +[1::1:a] +[a::a] +[a::1] +[a::a:a] +[a::a:1] +[a:a::a] +[aaaa::ffff] 192.168.1.100 -ffff::c0a8:164 -::192.168.1.100 -::ffff:0:192.168.1.100 -805b:2d9d:dc28::fc57:d4c8:1fff -aaaa::bbbb -aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222 -aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222 -aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222 -aaaa:bbbb:cccc:dddd:eeee::2222 +[ffff::c0a8:164] +[::192.168.1.100] +[::ffff:0:192.168.1.100] +[805b:2d9d:dc28::fc57:d4c8:1fff] +[aaaa::bbbb] +[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] +[aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222] +[aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222] +[aaaa:bbbb:cccc:dddd:eeee::2222] diff --git a/testing/btest/Baseline/language.sizeof/output b/testing/btest/Baseline/language.sizeof/output index 43cb73f763..160ea9ab4c 100644 --- a/testing/btest/Baseline/language.sizeof/output +++ b/testing/btest/Baseline/language.sizeof/output @@ -1,5 +1,5 @@ IPv4 Address 1.2.3.4: 32 -IPv6 Address ::1: 128 +IPv6 Address [::1]: 128 Boolean T: 1 Count 10: 10 Double -1.23: 1.230000 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log index 291909b80a..c2c69f3153 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log @@ -5,15 +5,15 @@ #path local #fields ts id.orig_h #types time addr -1300475168.855330 141.142.220.118 +1300475168.859163 141.142.220.118 1300475168.652003 141.142.220.118 1300475168.895267 141.142.220.118 +1300475168.902635 141.142.220.118 +1300475168.892936 141.142.220.118 1300475168.855305 141.142.220.118 -1300475168.859163 141.142.220.118 1300475168.892913 141.142.220.118 1300475168.724007 141.142.220.118 -1300475168.892936 141.142.220.118 -1300475168.902635 141.142.220.118 +1300475168.855330 141.142.220.118 1300475168.891644 141.142.220.118 1300475170.862384 141.142.220.226 1300475168.853899 141.142.220.118 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log index b396c3fc2d..ed0636bc4a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log @@ -6,6 +6,6 @@ #fields ts id.orig_h #types time addr 1300475169.780331 173.192.163.128 -1300475167.097012 fe80::217:f2ff:fed7:cf65 -1300475171.675372 fe80::3074:17d5:2052:c324 -1300475173.116749 fe80::3074:17d5:2052:c324 +1300475167.097012 [fe80::217:f2ff:fed7:cf65] +1300475171.675372 [fe80::3074:17d5:2052:c324] +1300475173.116749 [fe80::3074:17d5:2052:c324] diff --git a/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat new file mode 100644 index 0000000000..056ab8a44c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat @@ -0,0 +1,22 @@ +USER anonymous +PASS test +SYST +FEAT +PWD +EPSV +LIST +EPSV +NLST +TYPE I +SIZE robots.txt +EPSV +RETR robots.txt +MDTM robots.txt +SIZE robots.txt +EPRT |2|2001:470:1f11:81f:c999:d94:aa7c:2e3e|49189| +RETR robots.txt +MDTM robots.txt +TYPE A +EPRT |2|2001:470:1f11:81f:c999:d94:aa7c:2e3e|49190| +LIST +QUIT diff --git a/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat new file mode 100644 index 0000000000..05fe8b57d8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat @@ -0,0 +1,73 @@ +220 ftp.NetBSD.org FTP server (NetBSD-ftpd 20100320) ready. +331 Guest login ok, type your name as password. +230- + The NetBSD Project FTP Server located in Redwood City, CA, USA + 1 Gbps connectivity courtesy of , , + Internet Systems Consortium WELCOME! /( )` + \ \___ / | + +--- Currently Supported Platforms ----+ /- _ `-/ ' + | acorn[26,32], algor, alpha, amd64, | (/\/ \ \ /\ + | amiga[,ppc], arc, atari, bebox, | / / | ` \ + | cats, cesfic, cobalt, dreamcast, | O O ) / | + | evb[arm,mips,ppc,sh3], hp[300,700], | `-^--'`< ' + | hpc[arm,mips,sh], i386, | (_.) _ ) / + | ibmnws, iyonix, luna68k, | .___/` / + | mac[m68k,ppc], mipsco, mmeye, | `-----' / + | mvme[m68k,ppc], netwinders, | <----. __ / __ \ + | news[m68k,mips], next68k, ofppc, | <----|====O)))==) \) /==== + | playstation2, pmax, prep, sandpoint, | <----' `--' `.__,' \ + | sbmips, sgimips, shark, sparc[,64], | | | + | sun[2,3], vax, x68k, xen | \ / + +--------------------------------------+ ______( (_ / \_____ + See our website at http://www.NetBSD.org/ ,' ,-----' | \ + We log all FTP transfers and commands. `--{__________) (FL) \/ +230- + EXPORT NOTICE + + Please note that portions of this FTP site contain cryptographic + software controlled under the Export Administration Regulations (EAR). + + None of this software may be downloaded or otherwise exported or + re-exported into (or to a national or resident of) Cuba, Iran, Libya, + Sudan, North Korea, Syria or any other country to which the U.S. has + embargoed goods. + + By downloading or using said software, you are agreeing to the + foregoing and you are representing and warranting that you are not + located in, under the control of, or a national or resident of any + such country or on any such list. +230 Guest login ok, access restrictions apply. +215 UNIX Type: L8 Version: NetBSD-ftpd 20100320 +211-Features supported + MDTM + MLST Type*;Size*;Modify*;Perm*;Unique*; + REST STREAM + SIZE + TVFS +211 End +257 "/" is the current directory. +229 Entering Extended Passive Mode (|||57086|) +150 Opening ASCII mode data connection for '/bin/ls'. +226 Transfer complete. +229 Entering Extended Passive Mode (|||57087|) +150 Opening ASCII mode data connection for 'file list'. +226 Transfer complete. +200 Type set to I. +213 77 +229 Entering Extended Passive Mode (|||57088|) +150 Opening BINARY mode data connection for 'robots.txt' (77 bytes). +226 Transfer complete. +213 20090816112038 +213 77 +200 EPRT command successful. +150 Opening BINARY mode data connection for 'robots.txt' (77 bytes). +226 Transfer complete. +213 20090816112038 +200 Type set to A. +200 EPRT command successful. +150 Opening ASCII mode data connection for '/bin/ls'. +226 Transfer complete. +221- + Data traffic for this session was 154 bytes in 2 files. + Total traffic for this session was 4512 bytes in 5 transfers. +221 Thank you for using the FTP service on ftp.NetBSD.org. diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log index c4a515710d..e398020a87 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log @@ -5,9 +5,9 @@ #path conn #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes #types time string addr port addr port enum string interval count count string bool count string count count count count -1329327783.316897 arKYeMETxOg 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49186 2001:470:4867:99::21 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 -1329327786.524332 k6kgXLOoSKl 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49187 2001:470:4867:99::21 57087 tcp ftp-data 0.217501 0 43 SF - 0 ShAdfFa 5 372 4 343 -1329327787.289095 nQcgTWjvg4c 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49188 2001:470:4867:99::21 57088 tcp ftp-data 0.217941 0 77 SF - 0 ShAdfFa 5 372 4 377 -1329327795.571921 j4u32Pc5bif 2001:470:4867:99::21 55785 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 -1329327777.822004 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 -1329327800.017649 TEfuqmmG4bh 2001:470:4867:99::21 55647 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 +1329327783.316897 arKYeMETxOg [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49186 [2001:470:4867:99::21] 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 +1329327786.524332 k6kgXLOoSKl [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49187 [2001:470:4867:99::21] 57087 tcp ftp-data 0.217501 0 43 SF - 0 ShAdfFa 5 372 4 343 +1329327787.289095 nQcgTWjvg4c [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49188 [2001:470:4867:99::21] 57088 tcp ftp-data 0.217941 0 77 SF - 0 ShAdfFa 5 372 4 377 +1329327795.571921 j4u32Pc5bif [2001:470:4867:99::21] 55785 [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 +1329327777.822004 UWkUyAuUGXf [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49185 [2001:470:4867:99::21] 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 +1329327800.017649 TEfuqmmG4bh [2001:470:4867:99::21] 55647 [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log index 8bc2ef2cb7..61375d7233 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log @@ -5,5 +5,5 @@ #path ftp #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p user password command arg mime_type mime_desc file_size reply_code reply_msg tags extraction_file #types time string addr port addr port string string string string string string count count string table[string] file -1329327787.396984 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - -1329327795.463946 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - +1329327787.396984 UWkUyAuUGXf [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49185 [2001:470:4867:99::21] 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - +1329327795.463946 UWkUyAuUGXf [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49185 [2001:470:4867:99::21] 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - diff --git a/testing/btest/scripts/base/protocols/conn/contents-default-extract.test b/testing/btest/scripts/base/protocols/conn/contents-default-extract.test new file mode 100644 index 0000000000..82f46b62c8 --- /dev/null +++ b/testing/btest/scripts/base/protocols/conn/contents-default-extract.test @@ -0,0 +1,3 @@ +# @TEST-EXEC: bro -f "tcp port 21" -r $TRACES/ipv6-ftp.trace "Conn::default_extract=T" +# @TEST-EXEC: btest-diff contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat +# @TEST-EXEC: btest-diff contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat From 79afc834ce4218ac986c16dffa5f835fa3b7b6a2 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 4 May 2012 16:09:05 -0500 Subject: [PATCH 273/651] Add SHA1 and SHA256 hashing BIFs. (addresses #542) Also refactor all internal MD5 stuff to use OpenSSL's. --- src/Anon.cc | 1 - src/CMakeLists.txt | 6 +- src/DFA.cc | 15 +- src/Func.cc | 1 - src/MIME.cc | 5 +- src/MIME.h | 5 +- src/bro.bif | 301 ++++++++++++++++- src/digest.h | 92 ++++++ src/main.cc | 5 +- src/md5.c | 380 ---------------------- src/md5.h | 90 ----- src/util.cc | 27 +- src/util.h | 4 - testing/btest/Baseline/bifs.md5/output | 4 + testing/btest/Baseline/bifs.sha1/output | 4 + testing/btest/Baseline/bifs.sha256/output | 4 + testing/btest/bifs/md5.test | 16 + testing/btest/bifs/sha1.test | 16 + testing/btest/bifs/sha256.test | 16 + 19 files changed, 461 insertions(+), 531 deletions(-) create mode 100644 src/digest.h delete mode 100644 src/md5.c delete mode 100644 src/md5.h create mode 100644 testing/btest/Baseline/bifs.md5/output create mode 100644 testing/btest/Baseline/bifs.sha1/output create mode 100644 testing/btest/Baseline/bifs.sha256/output create mode 100644 testing/btest/bifs/md5.test create mode 100644 testing/btest/bifs/sha1.test create mode 100644 testing/btest/bifs/sha256.test diff --git a/src/Anon.cc b/src/Anon.cc index d2a28a0e08..f58057b2fc 100644 --- a/src/Anon.cc +++ b/src/Anon.cc @@ -5,7 +5,6 @@ #include "util.h" #include "net_util.h" -#include "md5.h" #include "Anon.h" #include "Val.h" #include "NetVar.h" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ce1b25dd42..4e73ad69b4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -247,7 +247,6 @@ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/DebugCmdConstants.h WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) -set(dns_SRCS nb_dns.c) set_source_files_properties(nb_dns.c PROPERTIES COMPILE_FLAGS -fno-strict-aliasing) @@ -403,7 +402,6 @@ set(bro_SRCS bsd-getopt-long.c bro_inet_ntop.c cq.c - md5.c patricia.c setsignal.c PacketDumper.cc @@ -421,8 +419,8 @@ set(bro_SRCS logging/writers/Ascii.cc logging/writers/None.cc - ${dns_SRCS} - ${openssl_SRCS} + nb_dns.c + digest.h ) collect_headers(bro_HEADERS ${bro_SRCS}) diff --git a/src/DFA.cc b/src/DFA.cc index e58ea260e5..06ccfd9342 100644 --- a/src/DFA.cc +++ b/src/DFA.cc @@ -2,9 +2,10 @@ #include "config.h" +#include + #include "EquivClass.h" #include "DFA.h" -#include "md5.h" int dfa_state_cache_size = 10000; @@ -312,8 +313,8 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, { // We assume that state ID's don't exceed 10 digits, plus // we allow one more character for the delimiter. - md5_byte_t id_tag[nfas.length() * 11 + 1]; - md5_byte_t* p = id_tag; + u_char id_tag[nfas.length() * 11 + 1]; + u_char* p = id_tag; for ( int i = 0; i < nfas.length(); ++i ) { @@ -335,12 +336,8 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, // We use the short MD5 instead of the full string for the // HashKey because the data is copied into the key. - md5_state_t state; - md5_byte_t digest[16]; - - md5_init(&state); - md5_append(&state, id_tag, p - id_tag); - md5_finish(&state, digest); + u_char digest[16]; + MD5(id_tag, p - id_tag, digest); *hash = new HashKey(&digest, sizeof(digest)); CacheEntry* e = states.Lookup(*hash); diff --git a/src/Func.cc b/src/Func.cc index 65cb22b09d..ecb341e3e0 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -29,7 +29,6 @@ #include -#include "md5.h" #include "Base64.h" #include "Stmt.h" #include "Scope.h" diff --git a/src/MIME.cc b/src/MIME.cc index 103cf149ef..4a7c0268b0 100644 --- a/src/MIME.cc +++ b/src/MIME.cc @@ -4,6 +4,7 @@ #include "MIME.h" #include "Event.h" #include "Reporter.h" +#include "digest.h" // Here are a few things to do: // @@ -1008,7 +1009,7 @@ void MIME_Mail::Done() if ( compute_content_hash && mime_content_hash ) { u_char* digest = new u_char[16]; - md5_finish(&md5_hash, digest); + md5_final(&md5_hash, digest); val_list* vl = new val_list; vl->append(analyzer->BuildConnVal()); @@ -1096,7 +1097,7 @@ void MIME_Mail::SubmitData(int len, const char* buf) if ( compute_content_hash ) { content_hash_length += len; - md5_append(&md5_hash, (const u_char*) buf, len); + md5_update(&md5_hash, (const u_char*) buf, len); } if ( mime_entity_data || mime_all_data ) diff --git a/src/MIME.h b/src/MIME.h index 52d943fb15..ffff30e387 100644 --- a/src/MIME.h +++ b/src/MIME.h @@ -2,13 +2,12 @@ #define mime_h #include - +#include #include #include #include using namespace std; -#include "md5.h" #include "Base64.h" #include "BroString.h" #include "Analyzer.h" @@ -248,7 +247,7 @@ protected: int buffer_offset; int compute_content_hash; int content_hash_length; - md5_state_t md5_hash; + MD5_CTX md5_hash; vector entity_content; vector all_content; diff --git a/src/bro.bif b/src/bro.bif index f76704cfe6..15740a83c7 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -6,13 +6,13 @@ %%{ // C segment #include - #include #include #include #include #include +#include "digest.h" #include "Reporter.h" #include "IPAddr.h" @@ -530,7 +530,7 @@ function piped_exec%(program: string, to_write: string%): bool %%{ static void hash_md5_val(val_list& vlist, unsigned char digest[16]) { - md5_state_s h; + MD5_CTX h; md5_init(&h); loop_over_list(vlist, i) @@ -539,16 +539,16 @@ static void hash_md5_val(val_list& vlist, unsigned char digest[16]) if ( v->Type()->Tag() == TYPE_STRING ) { const BroString* str = v->AsString(); - md5_append(&h, str->Bytes(), str->Len()); + md5_update(&h, str->Bytes(), str->Len()); } else { ODesc d(DESC_BINARY); v->Describe(&d); - md5_append(&h, (const md5_byte_t *) d.Bytes(), d.Len()); + md5_update(&h, (const u_char *) d.Bytes(), d.Len()); } } - md5_finish(&h, digest); + md5_final(&h, digest); } static void hmac_md5_val(val_list& vlist, unsigned char digest[16]) @@ -556,7 +556,53 @@ static void hmac_md5_val(val_list& vlist, unsigned char digest[16]) hash_md5_val(vlist, digest); for ( int i = 0; i < 16; ++i ) digest[i] = digest[i] ^ shared_hmac_md5_key[i]; - hash_md5(16, digest, digest); + MD5(digest, 16, digest); + } + +static void hash_sha1_val(val_list& vlist, unsigned char digest[20]) + { + SHA_CTX h; + + sha1_init(&h); + loop_over_list(vlist, i) + { + Val* v = vlist[i]; + if ( v->Type()->Tag() == TYPE_STRING ) + { + const BroString* str = v->AsString(); + sha1_update(&h, str->Bytes(), str->Len()); + } + else + { + ODesc d(DESC_BINARY); + v->Describe(&d); + sha1_update(&h, (const u_char *) d.Bytes(), d.Len()); + } + } + sha1_final(&h, digest); + } + +static void hash_sha256_val(val_list& vlist, unsigned char digest[32]) + { + SHA256_CTX h; + + sha256_init(&h); + loop_over_list(vlist, i) + { + Val* v = vlist[i]; + if ( v->Type()->Tag() == TYPE_STRING ) + { + const BroString* str = v->AsString(); + sha256_update(&h, str->Bytes(), str->Len()); + } + else + { + ODesc d(DESC_BINARY); + v->Describe(&d); + sha256_update(&h, (const u_char *) d.Bytes(), d.Len()); + } + } + sha256_final(&h, digest); } %%} @@ -565,6 +611,8 @@ static void hmac_md5_val(val_list& vlist, unsigned char digest[16]) ## Returns: The MD5 hash value of the concatenated arguments. ## ## .. bro:see:: md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish ## ## .. note:: ## @@ -578,6 +626,46 @@ function md5_hash%(...%): string return new StringVal(md5_digest_print(digest)); %} +## Computes the SHA1 hash value of the provided list of arguments. +## +## Returns: The SHA1 hash value of the concatenated arguments. +## +## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +## +## .. note:: +## +## This function performs a one-shot computation of its arguments. +## For incremental hash computation, see :bro:id:`sha1_hash_init` and +## friends. +function sha1_hash%(...%): string + %{ + unsigned char digest[20]; + hash_sha1_val(@ARG@, digest); + return new StringVal(sha1_digest_print(digest)); + %} + +## Computes the SHA256 hash value of the provided list of arguments. +## +## Returns: The SHA256 hash value of the concatenated arguments. +## +## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash_init sha256_hash_update sha256_hash_finish +## +## .. note:: +## +## This function performs a one-shot computation of its arguments. +## For incremental hash computation, see :bro:id:`sha256_hash_init` and +## friends. +function sha256_hash%(...%): string + %{ + unsigned char digest[32]; + hash_sha256_val(@ARG@, digest); + return new StringVal(sha256_digest_print(digest)); + %} + ## Computes an HMAC-MD5 hash value of the provided list of arguments. The HMAC ## secret key is generated from available entropy when Bro starts up, or it can ## be specified for repeatability using the ``-K`` command line flag. @@ -585,6 +673,8 @@ function md5_hash%(...%): string ## Returns: The HMAC-MD5 hash value of the concatenated arguments. ## ## .. bro:see:: md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hmac%(...%): string %{ unsigned char digest[16]; @@ -593,7 +683,9 @@ function md5_hmac%(...%): string %} %%{ -static map md5_states; +static map md5_states; +static map sha1_states; +static map sha256_states; BroString* convert_index_to_string(Val* index) { @@ -618,7 +710,9 @@ BroString* convert_index_to_string(Val* index) ## ## index: The unique identifier to associate with this hash computation. ## -## .. bro:see:: md5_hash md5_hmac md5_hash_update md5_hash_finish +## .. bro:see:: md5_hmac md5_hash md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hash_init%(index: any%): bool %{ BroString* s = convert_index_to_string(index); @@ -626,7 +720,7 @@ function md5_hash_init%(index: any%): bool if ( md5_states.count(*s) < 1 ) { - md5_state_s h; + MD5_CTX h; md5_init(&h); md5_states[*s] = h; status = 1; @@ -636,6 +730,75 @@ function md5_hash_init%(index: any%): bool return new Val(status, TYPE_BOOL); %} +## Initializes SHA1 state to enable incremental hash computation. After +## initializing the SHA1 state with this function, you can feed data to +## :bro:id:`sha1_hash_update` and finally need to call +## :bro:id:`sha1_hash_finish` to finish the computation and get the final hash +## value. +## +## For example, when computing incremental SHA1 values of transferred files in +## multiple concurrent HTTP connections, one would call ``sha1_hash_init(c$id)`` +## once before invoking ``sha1_hash_update(c$id, some_more_data)`` in the +## :bro:id:`http_entity_data` event handler. When all data has arrived, a call +## to :bro:id:`sha1_hash_finish` returns the final hash value. +## +## index: The unique identifier to associate with this hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_init%(index: any%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha1_states.count(*s) < 1 ) + { + SHA_CTX h; + sha1_init(&h); + sha1_states[*s] = h; + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + +## Initializes SHA256 state to enable incremental hash computation. After +## initializing the SHA256 state with this function, you can feed data to +## :bro:id:`sha256_hash_update` and finally need to call +## :bro:id:`sha256_hash_finish` to finish the computation and get the final hash +## value. +## +## For example, when computing incremental SHA256 values of transferred files in +## multiple concurrent HTTP connections, one would call +## ``sha256_hash_init(c$id)`` once before invoking +## ``sha256_hash_update(c$id, some_more_data)`` in the +## :bro:id:`http_entity_data` event handler. When all data has arrived, a call +## to :bro:id:`sha256_hash_finish` returns the final hash value. +## +## index: The unique identifier to associate with this hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_update sha256_hash_finish +function sha256_hash_init%(index: any%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha256_states.count(*s) < 1 ) + { + SHA256_CTX h; + sha256_init(&h); + sha256_states[*s] = h; + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + ## Update the MD5 value associated with a given index. It is required to ## call :bro:id:`md5_hash_init` once before calling this ## function. @@ -644,7 +807,9 @@ function md5_hash_init%(index: any%): bool ## ## data: The data to add to the hash computation. ## -## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_finish +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hash_update%(index: any, data: string%): bool %{ BroString* s = convert_index_to_string(index); @@ -652,7 +817,59 @@ function md5_hash_update%(index: any, data: string%): bool if ( md5_states.count(*s) > 0 ) { - md5_append(&md5_states[*s], data->Bytes(), data->Len()); + md5_update(&md5_states[*s], data->Bytes(), data->Len()); + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + +## Update the SHA1 value associated with a given index. It is required to +## call :bro:id:`sha1_hash_init` once before calling this +## function. +## +## index: The unique identifier to associate with this hash computation. +## +## data: The data to add to the hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_update%(index: any, data: string%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha1_states.count(*s) > 0 ) + { + sha1_update(&sha1_states[*s], data->Bytes(), data->Len()); + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + +## Update the SHA256 value associated with a given index. It is required to +## call :bro:id:`sha256_hash_init` once before calling this +## function. +## +## index: The unique identifier to associate with this hash computation. +## +## data: The data to add to the hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_finish +function sha256_hash_update%(index: any, data: string%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha256_states.count(*s) > 0 ) + { + sha256_update(&sha256_states[*s], data->Bytes(), data->Len()); status = 1; } @@ -666,7 +883,9 @@ function md5_hash_update%(index: any, data: string%): bool ## ## Returns: The hash value associated with the computation at *index*. ## -## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hash_finish%(index: any%): string %{ BroString* s = convert_index_to_string(index); @@ -675,7 +894,7 @@ function md5_hash_finish%(index: any%): string if ( md5_states.count(*s) > 0 ) { unsigned char digest[16]; - md5_finish(&md5_states[*s], digest); + md5_final(&md5_states[*s], digest); md5_states.erase(*s); printable_digest = new StringVal(md5_digest_print(digest)); } @@ -686,6 +905,62 @@ function md5_hash_finish%(index: any%): string return printable_digest; %} +## Returns the final SHA1 digest of an incremental hash computation. +## +## index: The unique identifier of this hash computation. +## +## Returns: The hash value associated with the computation at *index*. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_finish%(index: any%): string + %{ + BroString* s = convert_index_to_string(index); + StringVal* printable_digest; + + if ( sha1_states.count(*s) > 0 ) + { + unsigned char digest[20]; + sha1_final(&sha1_states[*s], digest); + sha1_states.erase(*s); + printable_digest = new StringVal(sha1_digest_print(digest)); + } + else + printable_digest = new StringVal(""); + + delete s; + return printable_digest; + %} + +## Returns the final SHA256 digest of an incremental hash computation. +## +## index: The unique identifier of this hash computation. +## +## Returns: The hash value associated with the computation at *index*. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update +function sha256_hash_finish%(index: any%): string + %{ + BroString* s = convert_index_to_string(index); + StringVal* printable_digest; + + if ( sha256_states.count(*s) > 0 ) + { + unsigned char digest[32]; + sha256_final(&sha256_states[*s], digest); + sha256_states.erase(*s); + printable_digest = new StringVal(sha256_digest_print(digest)); + } + else + printable_digest = new StringVal(""); + + delete s; + return printable_digest; + %} + ## Generates a random number. ## ## max: The maximum value the random number. diff --git a/src/digest.h b/src/digest.h new file mode 100644 index 0000000000..ef52ba059a --- /dev/null +++ b/src/digest.h @@ -0,0 +1,92 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +/** + * Wrapper and helper functions for MD5/SHA digest algorithms. + */ + +#ifndef bro_digest_h +#define bro_digest_h + +#include +#include + +#include "Reporter.h" + +static inline const char* digest_print(const u_char* digest, size_t n) + { + static char buf[256]; // big enough for any of md5/sha1/sha256 + for ( size_t i = 0; i < n; ++i ) + snprintf(buf + i * 2, 3, "%02x", digest[i]); + return buf; + } + +inline const char* md5_digest_print(const u_char digest[MD5_DIGEST_LENGTH]) + { + return digest_print(digest, MD5_DIGEST_LENGTH); + } + +inline const char* sha1_digest_print(const u_char digest[SHA_DIGEST_LENGTH]) + { + return digest_print(digest, SHA_DIGEST_LENGTH); + } + +inline const char* sha256_digest_print(const u_char digest[SHA256_DIGEST_LENGTH]) + { + return digest_print(digest, SHA256_DIGEST_LENGTH); + } + +inline void md5_init(MD5_CTX* c) + { + if ( ! MD5_Init(c) ) + reporter->InternalError("MD5_Init failed"); + } + +inline void md5_update(MD5_CTX* c, const void* data, unsigned long len) + { + if ( ! MD5_Update(c, data, len) ) + reporter->InternalError("MD5_Update failed"); + } + +inline void md5_final(MD5_CTX* c, u_char md[MD5_DIGEST_LENGTH]) + { + if ( ! MD5_Final(md, c) ) + reporter->InternalError("MD5_Final failed"); + } + +inline void sha1_init(SHA_CTX* c) + { + if ( ! SHA1_Init(c) ) + reporter->InternalError("SHA_Init failed"); + } + +inline void sha1_update(SHA_CTX* c, const void* data, unsigned long len) + { + if ( ! SHA1_Update(c, data, len) ) + reporter->InternalError("SHA_Update failed"); + } + +inline void sha1_final(SHA_CTX* c, u_char md[SHA_DIGEST_LENGTH]) + { + if ( ! SHA1_Final(md, c) ) + reporter->InternalError("SHA_Final failed"); + } + +inline void sha256_init(SHA256_CTX* c) + { + if ( ! SHA256_Init(c) ) + reporter->InternalError("SHA256_Init failed"); + } + +inline void sha256_update(SHA256_CTX* c, const void* data, unsigned long len) + { + if ( ! SHA256_Update(c, data, len) ) + reporter->InternalError("SHA256_Update failed"); + } + +inline void sha256_final(SHA256_CTX* c, u_char md[SHA256_DIGEST_LENGTH]) + { + if ( ! SHA256_Final(md, c) ) + reporter->InternalError("SHA256_Final failed"); + } + +#endif //bro_digest_h diff --git a/src/main.cc b/src/main.cc index ff33a3859d..89783031bf 100644 --- a/src/main.cc +++ b/src/main.cc @@ -18,6 +18,8 @@ extern "C" { } #endif +#include + extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "bsd-getopt-long.h" @@ -570,8 +572,7 @@ int main(int argc, char** argv) break; case 'K': - hash_md5(strlen(optarg), (const u_char*) optarg, - shared_hmac_md5_key); + MD5((const u_char*) optarg, strlen(optarg), shared_hmac_md5_key); hmac_key_set = 1; break; diff --git a/src/md5.c b/src/md5.c deleted file mode 100644 index 888993b9c4..0000000000 --- a/src/md5.c +++ /dev/null @@ -1,380 +0,0 @@ -/* - Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.c is L. Peter Deutsch - . Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order - either statically or dynamically; added missing #include - in library. - 2002-03-11 lpd Corrected argument list for main(), and added int return - type, in test program and T value program. - 2002-02-21 lpd Added missing #include in test program. - 2000-07-03 lpd Patched to eliminate warnings about "constant is - unsigned in ANSI C, signed in traditional"; made test program - self-checking. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). - 1999-05-03 lpd Original version. - */ - -#include "md5.h" -#include - -#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ -#ifdef ARCH_IS_BIG_ENDIAN -# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1) -#else -# define BYTE_ORDER 0 -#endif - -#define T_MASK ((md5_word_t)~0) -#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) -#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) -#define T3 0x242070db -#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) -#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) -#define T6 0x4787c62a -#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) -#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) -#define T9 0x698098d8 -#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) -#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) -#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) -#define T13 0x6b901122 -#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) -#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) -#define T16 0x49b40821 -#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) -#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) -#define T19 0x265e5a51 -#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) -#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) -#define T22 0x02441453 -#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) -#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) -#define T25 0x21e1cde6 -#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) -#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) -#define T28 0x455a14ed -#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) -#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) -#define T31 0x676f02d9 -#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) -#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) -#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) -#define T35 0x6d9d6122 -#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) -#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) -#define T38 0x4bdecfa9 -#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) -#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) -#define T41 0x289b7ec6 -#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) -#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) -#define T44 0x04881d05 -#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) -#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) -#define T47 0x1fa27cf8 -#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) -#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) -#define T50 0x432aff97 -#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) -#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) -#define T53 0x655b59c3 -#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) -#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) -#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) -#define T57 0x6fa87e4f -#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) -#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) -#define T60 0x4e0811a1 -#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) -#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) -#define T63 0x2ad7d2bb -#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) - - -static void -md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/) -{ - md5_word_t - a = pms->abcd[0], b = pms->abcd[1], - c = pms->abcd[2], d = pms->abcd[3]; - md5_word_t t; -#if BYTE_ORDER > 0 - /* Define storage only for big-endian CPUs. */ - md5_word_t X[16]; -#else - /* Define storage for little-endian or both types of CPUs. */ - md5_word_t xbuf[16]; - const md5_word_t *X; -#endif - - { -#if BYTE_ORDER == 0 - /* - * Determine dynamically whether this is a big-endian or - * little-endian machine, since we can use a more efficient - * algorithm on the latter. - */ - static const int w = 1; - - if (*((const md5_byte_t *)&w)) /* dynamic little-endian */ -#endif -#if BYTE_ORDER <= 0 /* little-endian */ - { - /* - * On little-endian machines, we can process properly aligned - * data without copying it. - */ - if (!((data - (const md5_byte_t *)0) & 3)) { - /* data are properly aligned */ - X = (const md5_word_t *)data; - } else { - /* not aligned */ - memcpy(xbuf, data, 64); - X = xbuf; - } - } -#endif -#if BYTE_ORDER == 0 - else /* dynamic big-endian */ -#endif -#if BYTE_ORDER >= 0 /* big-endian */ - { - /* - * On big-endian machines, we must arrange the bytes in the - * right order. - */ - const md5_byte_t *xp = data; - int i; - -# if BYTE_ORDER == 0 - X = xbuf; /* (dynamic only) */ -# else -# define xbuf X /* (static only) */ -# endif - for (i = 0; i < 16; ++i, xp += 4) - xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24); - } -#endif - } - -#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) - - /* Round 1. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ -#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + F(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 7, T1); - SET(d, a, b, c, 1, 12, T2); - SET(c, d, a, b, 2, 17, T3); - SET(b, c, d, a, 3, 22, T4); - SET(a, b, c, d, 4, 7, T5); - SET(d, a, b, c, 5, 12, T6); - SET(c, d, a, b, 6, 17, T7); - SET(b, c, d, a, 7, 22, T8); - SET(a, b, c, d, 8, 7, T9); - SET(d, a, b, c, 9, 12, T10); - SET(c, d, a, b, 10, 17, T11); - SET(b, c, d, a, 11, 22, T12); - SET(a, b, c, d, 12, 7, T13); - SET(d, a, b, c, 13, 12, T14); - SET(c, d, a, b, 14, 17, T15); - SET(b, c, d, a, 15, 22, T16); -#undef SET - - /* Round 2. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ -#define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + G(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 1, 5, T17); - SET(d, a, b, c, 6, 9, T18); - SET(c, d, a, b, 11, 14, T19); - SET(b, c, d, a, 0, 20, T20); - SET(a, b, c, d, 5, 5, T21); - SET(d, a, b, c, 10, 9, T22); - SET(c, d, a, b, 15, 14, T23); - SET(b, c, d, a, 4, 20, T24); - SET(a, b, c, d, 9, 5, T25); - SET(d, a, b, c, 14, 9, T26); - SET(c, d, a, b, 3, 14, T27); - SET(b, c, d, a, 8, 20, T28); - SET(a, b, c, d, 13, 5, T29); - SET(d, a, b, c, 2, 9, T30); - SET(c, d, a, b, 7, 14, T31); - SET(b, c, d, a, 12, 20, T32); -#undef SET - - /* Round 3. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ -#define H(x, y, z) ((x) ^ (y) ^ (z)) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + H(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 5, 4, T33); - SET(d, a, b, c, 8, 11, T34); - SET(c, d, a, b, 11, 16, T35); - SET(b, c, d, a, 14, 23, T36); - SET(a, b, c, d, 1, 4, T37); - SET(d, a, b, c, 4, 11, T38); - SET(c, d, a, b, 7, 16, T39); - SET(b, c, d, a, 10, 23, T40); - SET(a, b, c, d, 13, 4, T41); - SET(d, a, b, c, 0, 11, T42); - SET(c, d, a, b, 3, 16, T43); - SET(b, c, d, a, 6, 23, T44); - SET(a, b, c, d, 9, 4, T45); - SET(d, a, b, c, 12, 11, T46); - SET(c, d, a, b, 15, 16, T47); - SET(b, c, d, a, 2, 23, T48); -#undef SET - - /* Round 4. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ -#define I(x, y, z) ((y) ^ ((x) | ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + I(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 6, T49); - SET(d, a, b, c, 7, 10, T50); - SET(c, d, a, b, 14, 15, T51); - SET(b, c, d, a, 5, 21, T52); - SET(a, b, c, d, 12, 6, T53); - SET(d, a, b, c, 3, 10, T54); - SET(c, d, a, b, 10, 15, T55); - SET(b, c, d, a, 1, 21, T56); - SET(a, b, c, d, 8, 6, T57); - SET(d, a, b, c, 15, 10, T58); - SET(c, d, a, b, 6, 15, T59); - SET(b, c, d, a, 13, 21, T60); - SET(a, b, c, d, 4, 6, T61); - SET(d, a, b, c, 11, 10, T62); - SET(c, d, a, b, 2, 15, T63); - SET(b, c, d, a, 9, 21, T64); -#undef SET - - /* Then perform the following additions. (That is increment each - of the four registers by the value it had before this block - was started.) */ - pms->abcd[0] += a; - pms->abcd[1] += b; - pms->abcd[2] += c; - pms->abcd[3] += d; -} - -void -md5_init(md5_state_t *pms) -{ - pms->count[0] = pms->count[1] = 0; - pms->abcd[0] = 0x67452301; - pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; - pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; - pms->abcd[3] = 0x10325476; -} - -void -md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes) -{ - const md5_byte_t *p = data; - int left = nbytes; - int offset = (pms->count[0] >> 3) & 63; - md5_word_t nbits = (md5_word_t)(nbytes << 3); - - if (nbytes <= 0) - return; - - /* Update the message length. */ - pms->count[1] += nbytes >> 29; - pms->count[0] += nbits; - if (pms->count[0] < nbits) - pms->count[1]++; - - /* Process an initial partial block. */ - if (offset) { - int copy = (offset + nbytes > 64 ? 64 - offset : nbytes); - - memcpy(pms->buf + offset, p, copy); - if (offset + copy < 64) - return; - p += copy; - left -= copy; - md5_process(pms, pms->buf); - } - - /* Process full blocks. */ - for (; left >= 64; p += 64, left -= 64) - md5_process(pms, p); - - /* Process a final partial block. */ - if (left) - memcpy(pms->buf, p, left); -} - -void -md5_finish(md5_state_t *pms, md5_byte_t digest[16]) -{ - static const md5_byte_t pad[64] = { - 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - md5_byte_t data[8]; - int i; - - /* Save the length before padding. */ - for (i = 0; i < 8; ++i) - data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); - /* Pad to 56 bytes mod 64. */ - md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); - /* Append the length. */ - md5_append(pms, data, 8); - for (i = 0; i < 16; ++i) - digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); -} diff --git a/src/md5.h b/src/md5.h deleted file mode 100644 index 2806b5b9b5..0000000000 --- a/src/md5.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.h is L. Peter Deutsch - . Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Removed support for non-ANSI compilers; removed - references to Ghostscript; clarified derivation from RFC 1321; - now handles byte order either statically or dynamically. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5); - added conditionalization for C++ compilation from Martin - Purschke . - 1999-05-03 lpd Original version. - */ - -#ifndef md5_INCLUDED -# define md5_INCLUDED - -/* - * This package supports both compile-time and run-time determination of CPU - * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be - * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is - * defined as non-zero, the code will be compiled to run only on big-endian - * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to - * run on either big- or little-endian CPUs, but will run slightly less - * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined. - */ - -typedef unsigned char md5_byte_t; /* 8-bit byte */ -typedef unsigned int md5_word_t; /* 32-bit word */ - -/* Define the state of the MD5 Algorithm. */ -typedef struct md5_state_s { - md5_word_t count[2]; /* message length in bits, lsw first */ - md5_word_t abcd[4]; /* digest buffer */ - md5_byte_t buf[64]; /* accumulate block */ -} md5_state_t; - -#ifdef __cplusplus -extern "C" -{ -#endif - -/* Initialize the algorithm. */ -void md5_init(md5_state_t *pms); - -/* Append a string to the message. */ -void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes); - -/* Finish the message and return the digest. */ -void md5_finish(md5_state_t *pms, md5_byte_t digest[16]); - -#ifdef __cplusplus -} /* end extern "C" */ -#endif - -#endif /* md5_INCLUDED */ diff --git a/src/util.cc b/src/util.cc index 856e90d156..90143923f1 100644 --- a/src/util.cc +++ b/src/util.cc @@ -27,6 +27,8 @@ #include #include #include +#include +#include #ifdef HAVE_MALLINFO # include @@ -35,7 +37,6 @@ #include "input.h" #include "util.h" #include "Obj.h" -#include "md5.h" #include "Val.h" #include "NetVar.h" #include "Net.h" @@ -546,24 +547,6 @@ bool is_dir(const char* path) return S_ISDIR(st.st_mode); } -void hash_md5(size_t size, const unsigned char* bytes, unsigned char digest[16]) - { - md5_state_s h; - md5_init(&h); - md5_append(&h, bytes, size); - md5_finish(&h, digest); - } - -const char* md5_digest_print(const unsigned char digest[16]) - { - static char digest_print[256]; - - for ( int i = 0; i < 16; ++i ) - snprintf(digest_print + i * 2, 3, "%02x", digest[i]); - - return digest_print; - } - int hmac_key_set = 0; uint8 shared_hmac_md5_key[16]; @@ -572,12 +555,12 @@ void hmac_md5(size_t size, const unsigned char* bytes, unsigned char digest[16]) if ( ! hmac_key_set ) reporter->InternalError("HMAC-MD5 invoked before the HMAC key is set"); - hash_md5(size, bytes, digest); + MD5(bytes, size, digest); for ( int i = 0; i < 16; ++i ) digest[i] ^= shared_hmac_md5_key[i]; - hash_md5(16, digest, digest); + MD5(digest, 16, digest); } static bool read_random_seeds(const char* read_file, uint32* seed, @@ -724,7 +707,7 @@ void init_random_seed(uint32 seed, const char* read_file, const char* write_file if ( ! hmac_key_set ) { - hash_md5(sizeof(buf), (u_char*) buf, shared_hmac_md5_key); + MD5((const u_char*) buf, sizeof(buf), shared_hmac_md5_key); hmac_key_set = 1; } diff --git a/src/util.h b/src/util.h index a4e3aa71b8..6b237edfd8 100644 --- a/src/util.h +++ b/src/util.h @@ -136,16 +136,12 @@ extern bool ensure_dir(const char *dirname); bool is_dir(const char* path); extern uint8 shared_hmac_md5_key[16]; -extern void hash_md5(size_t size, const unsigned char* bytes, - unsigned char digest[16]); extern int hmac_key_set; extern unsigned char shared_hmac_md5_key[16]; extern void hmac_md5(size_t size, const unsigned char* bytes, unsigned char digest[16]); -extern const char* md5_digest_print(const unsigned char digest[16]); - // Initializes RNGs for bro_random() and MD5 usage. If seed is given, then // it is used (to provide determinism). If load_file is given, the seeds // (both random & MD5) are loaded from that file. This takes precedence diff --git a/testing/btest/Baseline/bifs.md5/output b/testing/btest/Baseline/bifs.md5/output new file mode 100644 index 0000000000..71c0fbfcb8 --- /dev/null +++ b/testing/btest/Baseline/bifs.md5/output @@ -0,0 +1,4 @@ +f97c5d29941bfb1b2fdab0874906ab82 +7b0391feb2e0cd271f1cf39aafb4376f +f97c5d29941bfb1b2fdab0874906ab82 +7b0391feb2e0cd271f1cf39aafb4376f diff --git a/testing/btest/Baseline/bifs.sha1/output b/testing/btest/Baseline/bifs.sha1/output new file mode 100644 index 0000000000..ddcf9060b9 --- /dev/null +++ b/testing/btest/Baseline/bifs.sha1/output @@ -0,0 +1,4 @@ +fe05bcdcdc4928012781a5f1a2a77cbb5398e106 +3e949019500deb1369f13d9644d420d3a920aa5e +fe05bcdcdc4928012781a5f1a2a77cbb5398e106 +3e949019500deb1369f13d9644d420d3a920aa5e diff --git a/testing/btest/Baseline/bifs.sha256/output b/testing/btest/Baseline/bifs.sha256/output new file mode 100644 index 0000000000..5bd6a63fa4 --- /dev/null +++ b/testing/btest/Baseline/bifs.sha256/output @@ -0,0 +1,4 @@ +7692c3ad3540bb803c020b3aee66cd8887123234ea0c6e7143c0add73ff431ed +4592092e1061c7ea85af2aed194621cc17a2762bae33a79bf8ce33fd0168b801 +7692c3ad3540bb803c020b3aee66cd8887123234ea0c6e7143c0add73ff431ed +4592092e1061c7ea85af2aed194621cc17a2762bae33a79bf8ce33fd0168b801 diff --git a/testing/btest/bifs/md5.test b/testing/btest/bifs/md5.test new file mode 100644 index 0000000000..2632d76cb4 --- /dev/null +++ b/testing/btest/bifs/md5.test @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +print md5_hash("one"); +print md5_hash("one", "two", "three"); + +md5_hash_init("a"); +md5_hash_init("b"); + +md5_hash_update("a", "one"); +md5_hash_update("b", "one"); +md5_hash_update("b", "two"); +md5_hash_update("b", "three"); + +print md5_hash_finish("a"); +print md5_hash_finish("b"); diff --git a/testing/btest/bifs/sha1.test b/testing/btest/bifs/sha1.test new file mode 100644 index 0000000000..85c8df99c5 --- /dev/null +++ b/testing/btest/bifs/sha1.test @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +print sha1_hash("one"); +print sha1_hash("one", "two", "three"); + +sha1_hash_init("a"); +sha1_hash_init("b"); + +sha1_hash_update("a", "one"); +sha1_hash_update("b", "one"); +sha1_hash_update("b", "two"); +sha1_hash_update("b", "three"); + +print sha1_hash_finish("a"); +print sha1_hash_finish("b"); diff --git a/testing/btest/bifs/sha256.test b/testing/btest/bifs/sha256.test new file mode 100644 index 0000000000..7451f2fad3 --- /dev/null +++ b/testing/btest/bifs/sha256.test @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +print sha256_hash("one"); +print sha256_hash("one", "two", "three"); + +sha256_hash_init("a"); +sha256_hash_init("b"); + +sha256_hash_update("a", "one"); +sha256_hash_update("b", "one"); +sha256_hash_update("b", "two"); +sha256_hash_update("b", "three"); + +print sha256_hash_finish("a"); +print sha256_hash_finish("b"); From a0575158efffba2ebb6ae0308fb7af6fdee25e4c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 4 May 2012 21:50:20 -0700 Subject: [PATCH 274/651] DataSeries updates and fixes. --- doc/logging-dataseries.rst | 16 -- src/logging/WriterBackend.cc | 7 +- src/logging/WriterBackend.h | 8 + src/logging/writers/Ascii.cc | 7 +- src/logging/writers/DataSeries.cc | 34 +++-- .../ssh.ds.xml | 2 +- .../out | 140 +++++++++--------- .../ssh.ds.txt | 10 +- .../conn.ds.txt | 80 +++++----- .../http.ds.txt | 38 ++--- 10 files changed, 169 insertions(+), 173 deletions(-) diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index 67f95ecf3b..1a5f4ae520 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -102,20 +102,4 @@ TODO. TODO ==== -* I'm seeing lots of warning on stderr:: - - Warning, while packing field ts of record 1, error was > 10%: - (1334620000 / 1000000 = 1334.62, round() = 1335) - Warning, while packing field not_valid_after of record 11, error was > 10%: - (1346460000 / 1000000 = 1346.46, round() = 1346) - - See Eric's mail. - -* For testing our script-level options: - - - Can we get the extentsize from a ``.ds`` file? - - Can we get the compressio level from a ``.ds`` file? - - See Eric's mail. - * Do we have a leak? diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 28b623988c..09970f02c6 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -267,4 +267,9 @@ string WriterBackend::Render(const threading::Value::subnet_t& subnet) const return s; } - +string WriterBackend::Render(double d) const + { + char buf[256]; + modp_dtoa(d, buf, 6); + return buf; + } diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 8fbf0c9e71..fa12613e6d 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -165,6 +165,14 @@ public: */ string Render(const threading::Value::subnet_t& subnet) const; + /** Helper method to render a double in Bro's standard precision. + * + * @param d The double. + * + * @return An ASCII representation of the double. + */ + string Render(double d) const; + protected: friend class FinishMessage; diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 3a35eea380..efc001aa97 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -176,14 +176,9 @@ bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) desc->Add(Render(val->val.addr_val)); break; + case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: - char buf[256]; - modp_dtoa(val->val.double_val, buf, 6); - desc->Add(buf); - break; - - case TYPE_DOUBLE: desc->Add(val->val.double_val); break; diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index a3d193be97..bd1da57403 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -21,29 +21,31 @@ std::string DataSeries::LogValueToString(threading::Value *val) if( ! val->present ) return ""; - std::ostringstream ostr; - switch(val->type) { case TYPE_BOOL: return (val->val.int_val ? "true" : "false"); case TYPE_INT: + { + std::ostringstream ostr; ostr << val->val.int_val; return ostr.str(); + } case TYPE_COUNT: case TYPE_COUNTER: case TYPE_PORT: + { + std::ostringstream ostr; ostr << val->val.uint_val; return ostr.str(); + } case TYPE_SUBNET: - ostr << Render(val->val.subnet_val); - return ostr.str(); + return Render(val->val.subnet_val); case TYPE_ADDR: - ostr << Render(val->val.addr_val); - return ostr.str(); + return Render(val->val.addr_val); // Note: These two cases are relatively special. We need to convert // these values into their integer equivalents to maximize precision. @@ -57,15 +59,16 @@ std::string DataSeries::LogValueToString(threading::Value *val) case TYPE_TIME: case TYPE_INTERVAL: if ( ds_use_integer_for_time ) + { + std::ostringstream ostr; ostr << (unsigned long)(DataSeries::TIME_SCALE * val->val.double_val); + return ostr.str(); + } else - ostr << val->val.double_val; - - return ostr.str(); + return Render(val->val.double_val); case TYPE_DOUBLE: - ostr << val->val.double_val; - return ostr.str(); + return Render(val->val.double_val); case TYPE_ENUM: case TYPE_STRING: @@ -190,10 +193,11 @@ std::string DataSeries::GetDSOptionsForType(const threading::Field *field) case TYPE_TIME: case TYPE_INTERVAL: { - std::string s = "pack_relative=\"" + std::string(field->name) + "\""; + std::string s; + s += "pack_relative=\"" + std::string(field->name) + "\""; if ( ! ds_use_integer_for_time ) - s += " pack_scale=\"1000000\""; + s += " pack_scale=\"1000\" pack_scale_warn=\"no\""; else s += string(" units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; @@ -250,7 +254,7 @@ bool DataSeries::OpenLog(string path) ds_extent_size = ROW_MAX; } - log_output = new OutputModule(*log_file, log_series, *log_type, ds_extent_size); + log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); return true; } @@ -330,7 +334,7 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); log_type = log_types.registerTypePtr(schema); - log_series.setType(*log_type); + log_series.setType(log_type); return OpenLog(path); } diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml index 71ad5d70a0..9862ae606f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml @@ -1,5 +1,5 @@ - + diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out index a12fed36e1..76e7e77c77 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out @@ -20,7 +20,7 @@ test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataserie - + @@ -34,17 +34,17 @@ test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataserie extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1024 -1.299e+09 10.0.0.2 20 10.0.0.3 0 +1.299467e+09 10.0.0.1 20 10.0.0.2 1024 +1.299471e+09 10.0.0.2 20 10.0.0.3 0 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-04-00-05.ds # Extent Types ... @@ -57,7 +57,7 @@ offset extenttype - + @@ -71,17 +71,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1025 -1.299e+09 10.0.0.2 20 10.0.0.3 1 +1.29947e+09 10.0.0.1 20 10.0.0.2 1025 +1.299474e+09 10.0.0.2 20 10.0.0.3 1 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-05-00-05.ds # Extent Types ... @@ -94,7 +94,7 @@ offset extenttype - + @@ -108,17 +108,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1026 -1.299e+09 10.0.0.2 20 10.0.0.3 2 +1.299474e+09 10.0.0.1 20 10.0.0.2 1026 +1.299478e+09 10.0.0.2 20 10.0.0.3 2 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-06-00-05.ds # Extent Types ... @@ -131,7 +131,7 @@ offset extenttype - + @@ -145,17 +145,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1027 -1.299e+09 10.0.0.2 20 10.0.0.3 3 +1.299478e+09 10.0.0.1 20 10.0.0.2 1027 +1.299482e+09 10.0.0.2 20 10.0.0.3 3 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-07-00-05.ds # Extent Types ... @@ -168,7 +168,7 @@ offset extenttype - + @@ -182,17 +182,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1028 -1.299e+09 10.0.0.2 20 10.0.0.3 4 +1.299481e+09 10.0.0.1 20 10.0.0.2 1028 +1.299485e+09 10.0.0.2 20 10.0.0.3 4 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-08-00-05.ds # Extent Types ... @@ -205,7 +205,7 @@ offset extenttype - + @@ -219,17 +219,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1029 -1.299e+09 10.0.0.2 20 10.0.0.3 5 +1.299485e+09 10.0.0.1 20 10.0.0.2 1029 +1.299489e+09 10.0.0.2 20 10.0.0.3 5 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-09-00-05.ds # Extent Types ... @@ -242,7 +242,7 @@ offset extenttype - + @@ -256,17 +256,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1030 -1.299e+09 10.0.0.2 20 10.0.0.3 6 +1.299488e+09 10.0.0.1 20 10.0.0.2 1030 +1.299492e+09 10.0.0.2 20 10.0.0.3 6 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-10-00-05.ds # Extent Types ... @@ -279,7 +279,7 @@ offset extenttype - + @@ -293,17 +293,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299e+09 10.0.0.1 20 10.0.0.2 1031 -1.299e+09 10.0.0.2 20 10.0.0.3 7 +1.299492e+09 10.0.0.1 20 10.0.0.2 1031 +1.299496e+09 10.0.0.2 20 10.0.0.3 7 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-11-00-05.ds # Extent Types ... @@ -316,7 +316,7 @@ offset extenttype - + @@ -330,17 +330,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.3e+09 10.0.0.1 20 10.0.0.2 1032 -1.3e+09 10.0.0.2 20 10.0.0.3 8 +1.299496e+09 10.0.0.1 20 10.0.0.2 1032 +1.2995e+09 10.0.0.2 20 10.0.0.3 8 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex > test.2011-03-07-12-00-05.ds # Extent Types ... @@ -353,7 +353,7 @@ offset extenttype - + @@ -367,14 +367,14 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.3e+09 10.0.0.1 20 10.0.0.2 1033 -1.3e+09 10.0.0.2 20 10.0.0.3 9 +1.299499e+09 10.0.0.1 20 10.0.0.2 1033 +1.299503e+09 10.0.0.2 20 10.0.0.3 9 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -360 test -468 DataSeries: ExtentIndex +372 test +484 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt index 05026a24ef..8cb1293772 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt @@ -9,7 +9,7 @@ - + @@ -27,8 +27,8 @@ extent offset ExtentType 40 DataSeries: XmlType -400 ssh -604 DataSeries: ExtentIndex +416 ssh +624 DataSeries: ExtentIndex # Extent, type='ssh' t id.orig_h id.orig_p id.resp_h id.resp_p status country X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 success unknown @@ -39,5 +39,5 @@ X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 failure MX # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -400 ssh -604 DataSeries: ExtentIndex +416 ssh +624 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt index e85cf9337e..7a4af6776b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -9,7 +9,7 @@ - + @@ -17,7 +17,7 @@ - + @@ -51,46 +51,46 @@ extent offset ExtentType 40 DataSeries: XmlType -660 conn -2564 DataSeries: ExtentIndex +680 conn +2592 DataSeries: ExtentIndex # Extent, type='conn' ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -1.3e+09 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 -1.3e+09 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 -1.3e+09 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 -1.3e+09 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.3e+09 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.3e+09 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.3e+09 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.3e+09 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.3e+09 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.3e+09 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.3e+09 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.3e+09 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.3e+09 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.3e+09 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.3e+09 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.3e+09 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 0 0 131 SHR F 0 Cd 0 0 1 159 -1.3e+09 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 0 0 198 SHR F 0 Cd 0 0 1 226 -1.3e+09 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 -1.3e+09 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 0 350 0 S0 F 0 D 7 546 0 0 -1.3e+09 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 -1.3e+09 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 -1.3e+09 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 -1.3e+09 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 -1.3e+09 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 -1.3e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 0 1130 734 S1 F 1130 ShACad 4 216 4 950 -1.3e+09 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0 0 350 OTH F 0 CdA 1 52 1 402 -1.3e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 0 1178 734 S1 F 1178 ShACad 4 216 4 950 -1.3e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0 534 412 S1 F 534 ShACad 3 164 3 576 -1.3e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 0 1148 734 S1 F 1148 ShACad 4 216 4 950 -1.3e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 0 1171 733 S1 F 1171 ShACad 4 216 4 949 -1.3e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 0 1137 733 S1 F 1137 ShACad 4 216 4 949 -1.3e+09 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 -1.3e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0 525 232 S1 F 525 ShACad 3 164 3 396 -1.3e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0 1125 734 S1 F 1125 ShACad 4 216 4 950 +1.300475e+09 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 +1.300475e+09 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 +1.300475e+09 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 +1.300475e+09 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.300475e+09 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.300475e+09 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.300475e+09 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.300475e+09 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.300475e+09 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.300475e+09 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.300475e+09 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.300475e+09 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.300475e+09 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 +1.300475e+09 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 +1.300475e+09 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 +1.300475e+09 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 0 0 131 SHR F 0 Cd 0 0 1 159 +1.300475e+09 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 0 0 198 SHR F 0 Cd 0 0 1 226 +1.300475e+09 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 +1.300475e+09 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 0 350 0 S0 F 0 D 7 546 0 0 +1.300475e+09 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 +1.300475e+09 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 +1.300475e+09 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 +1.300475e+09 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 +1.300475e+09 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 +1.300475e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 0 1130 734 S1 F 1130 ShACad 4 216 4 950 +1.300475e+09 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0 0 350 OTH F 0 CdA 1 52 1 402 +1.300475e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 0 1178 734 S1 F 1178 ShACad 4 216 4 950 +1.300475e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0 534 412 S1 F 534 ShACad 3 164 3 576 +1.300475e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 0 1148 734 S1 F 1148 ShACad 4 216 4 950 +1.300475e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 0 1171 733 S1 F 1171 ShACad 4 216 4 949 +1.300475e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 0 1137 733 S1 F 1137 ShACad 4 216 4 949 +1.300475e+09 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 +1.300475e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0 525 232 S1 F 525 ShACad 3 164 3 396 +1.300475e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0 1125 734 S1 F 1125 ShACad 4 216 4 950 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -660 conn -2564 DataSeries: ExtentIndex +680 conn +2592 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt index a0c6cbbff3..0b16a69a6f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt @@ -9,7 +9,7 @@ - + @@ -65,26 +65,26 @@ extent offset ExtentType 40 DataSeries: XmlType -768 http -1156 DataSeries: ExtentIndex +784 http +1172 DataSeries: ExtentIndex # Extent, type='http' ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file -1.3e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 -1.3e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 0 0 0 304 Not Modified 0 -1.3e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.3e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 +1.300475e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 0 0 0 304 Not Modified 0 +1.300475e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1.300475e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -768 http -1156 DataSeries: ExtentIndex +784 http +1172 DataSeries: ExtentIndex From 905e4d3a14f05fc5ff2970e05ddceb29384ee3b7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 7 May 2012 12:55:54 -0500 Subject: [PATCH 275/651] Change IPv6 output format to no longer automatically be bracketed. Instead, the `addr_to_uri` script-level function can be used to explicitly add brackets to an address if it's IPv6 and will be included in a URI or when a ":" needs to be appended to it. --- scripts/base/protocols/ftp/main.bro | 3 +- scripts/base/protocols/http/utils.bro | 3 +- scripts/base/utils/addrs.bro | 15 ++ scripts/base/utils/files.bro | 5 +- src/IPAddr.cc | 2 +- src/logging/WriterBackend.cc | 2 +- .../bifs.addr_count_conversion/output | 2 +- .../Baseline/bifs.ptr_name_to_addr/output | 2 +- .../bifs.routing0_data_to_addrs/output | 2 +- testing/btest/Baseline/bifs.to_addr/output | 2 +- testing/btest/Baseline/bifs.to_subnet/output | 4 +- testing/btest/Baseline/core.conn-uid/output | 6 +- testing/btest/Baseline/core.discarder/output | 10 +- .../Baseline/core.icmp.icmp-context/output | 2 +- .../Baseline/core.icmp.icmp6-context/output | 24 +- .../Baseline/core.icmp.icmp6-events/output | 82 +++--- .../Baseline/core.ipv6-atomic-frag/output | 8 +- testing/btest/Baseline/core.ipv6-frag/dns.log | 4 +- testing/btest/Baseline/core.ipv6-frag/output | 10 +- testing/btest/Baseline/core.ipv6_esp/output | 240 +++++++++--------- .../Baseline/core.ipv6_ext_headers/output | 6 +- .../core.mobile-ipv6-home-addr/output | 4 +- .../Baseline/core.mobile-ipv6-routing/output | 4 +- .../btest/Baseline/core.mobility_msg/output | 16 +- .../Baseline/istate.broccoli-ipv6/bro..stdout | 8 +- .../Baseline/istate.pybroccoli/bro..stdout | 6 +- .../istate.pybroccoli/python..stdout.filtered | 6 +- .../Baseline/language.expire_func/output | 14 +- .../Baseline/language.ipv6-literals/output | 46 ++-- testing/btest/Baseline/language.sizeof/output | 2 +- .../remote.log | 6 +- .../conn.log | 12 +- .../ftp.log | 4 +- 33 files changed, 290 insertions(+), 272 deletions(-) diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro index 809ab61360..7c5bbaefdc 100644 --- a/scripts/base/protocols/ftp/main.bro +++ b/scripts/base/protocols/ftp/main.bro @@ -6,6 +6,7 @@ @load ./utils-commands @load base/utils/paths @load base/utils/numbers +@load base/utils/addrs module FTP; @@ -169,7 +170,7 @@ function ftp_message(s: Info) local arg = s$cmdarg$arg; if ( s$cmdarg$cmd in file_cmds ) - arg = fmt("ftp://%s%s", s$id$resp_h, build_path_compressed(s$cwd, arg)); + arg = fmt("ftp://%s%s", addr_to_uri(s$id$resp_h), build_path_compressed(s$cwd, arg)); s$ts=s$cmdarg$ts; s$command=s$cmdarg$cmd; diff --git a/scripts/base/protocols/http/utils.bro b/scripts/base/protocols/http/utils.bro index 0f2666fade..a74a2fe696 100644 --- a/scripts/base/protocols/http/utils.bro +++ b/scripts/base/protocols/http/utils.bro @@ -1,6 +1,7 @@ ##! Utilities specific for HTTP processing. @load ./main +@load base/utils/addrs module HTTP; @@ -51,7 +52,7 @@ function extract_keys(data: string, kv_splitter: pattern): string_vec function build_url(rec: Info): string { local uri = rec?$uri ? rec$uri : "/"; - local host = rec?$host ? rec$host : fmt("%s", rec$id$resp_h); + local host = rec?$host ? rec$host : addr_to_uri(rec$id$resp_h); if ( rec$id$resp_p != 80/tcp ) host = fmt("%s:%s", host, rec$id$resp_p); return fmt("%s%s", host, uri); diff --git a/scripts/base/utils/addrs.bro b/scripts/base/utils/addrs.bro index 415b9adfa9..08efd5281a 100644 --- a/scripts/base/utils/addrs.bro +++ b/scripts/base/utils/addrs.bro @@ -98,3 +98,18 @@ function find_ip_addresses(input: string): string_array } return output; } + +## Returns the string representation of an IP address suitable for inclusion +## in a URI. For IPv4, this does no special formatting, but for IPv6, the +## address is included in square brackets. +## +## a: the address to make suitable for URI inclusion. +## +## Returns: the string representation of *a* suitable for URI inclusion. +function addr_to_uri(a: addr): string + { + if ( is_v4_addr(a) ) + return fmt("%s", a); + else + return fmt("[%s]", a); + } diff --git a/scripts/base/utils/files.bro b/scripts/base/utils/files.bro index 8111245c24..ccd03df0e6 100644 --- a/scripts/base/utils/files.bro +++ b/scripts/base/utils/files.bro @@ -1,10 +1,11 @@ +@load ./addrs ## This function can be used to generate a consistent filename for when ## contents of a file, stream, or connection are being extracted to disk. function generate_extraction_filename(prefix: string, c: connection, suffix: string): string { - local conn_info = fmt("%s:%d-%s:%d", - c$id$orig_h, c$id$orig_p, c$id$resp_h, c$id$resp_p); + local conn_info = fmt("%s:%d-%s:%d", addr_to_uri(c$id$orig_h), c$id$orig_p, + addr_to_uri(c$id$resp_h), c$id$resp_p); if ( prefix != "" ) conn_info = fmt("%s_%s", prefix, conn_info); diff --git a/src/IPAddr.cc b/src/IPAddr.cc index 8d88cebc25..0ba5589fff 100644 --- a/src/IPAddr.cc +++ b/src/IPAddr.cc @@ -172,7 +172,7 @@ string IPAddr::AsString() const if ( ! bro_inet_ntop(AF_INET6, in6.s6_addr, s, INET6_ADDRSTRLEN) ) return ""; else - return string("[") + s + "]"; + return s; } } diff --git a/testing/btest/Baseline/bifs.addr_count_conversion/output b/testing/btest/Baseline/bifs.addr_count_conversion/output index c63e64b735..08a74512d3 100644 --- a/testing/btest/Baseline/bifs.addr_count_conversion/output +++ b/testing/btest/Baseline/bifs.addr_count_conversion/output @@ -1,4 +1,4 @@ [536939960, 2242052096, 35374, 57701172] -[2001:db8:85a3::8a2e:370:7334] +2001:db8:85a3::8a2e:370:7334 [16909060] 1.2.3.4 diff --git a/testing/btest/Baseline/bifs.ptr_name_to_addr/output b/testing/btest/Baseline/bifs.ptr_name_to_addr/output index ebc4c15823..7c290027aa 100644 --- a/testing/btest/Baseline/bifs.ptr_name_to_addr/output +++ b/testing/btest/Baseline/bifs.ptr_name_to_addr/output @@ -1,2 +1,2 @@ -[2607:f8b0:4009:802::1012] +2607:f8b0:4009:802::1012 74.125.225.52 diff --git a/testing/btest/Baseline/bifs.routing0_data_to_addrs/output b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output index 7e37c7b77a..c79aef89d0 100644 --- a/testing/btest/Baseline/bifs.routing0_data_to_addrs/output +++ b/testing/btest/Baseline/bifs.routing0_data_to_addrs/output @@ -1 +1 @@ -[[2001:78:1:32::1], [2001:78:1:32::2]] +[2001:78:1:32::1, 2001:78:1:32::2] diff --git a/testing/btest/Baseline/bifs.to_addr/output b/testing/btest/Baseline/bifs.to_addr/output index 084261a8fd..ff277498f8 100644 --- a/testing/btest/Baseline/bifs.to_addr/output +++ b/testing/btest/Baseline/bifs.to_addr/output @@ -6,4 +6,4 @@ to_addr(10.20.30.40) = 10.20.30.40 (SUCCESS) to_addr(100.200.30.40) = 100.200.30.40 (SUCCESS) to_addr(10.0.0.0) = 10.0.0.0 (SUCCESS) to_addr(10.00.00.000) = 10.0.0.0 (SUCCESS) -to_addr(not an IP) = [::] (SUCCESS) +to_addr(not an IP) = :: (SUCCESS) diff --git a/testing/btest/Baseline/bifs.to_subnet/output b/testing/btest/Baseline/bifs.to_subnet/output index 526c3d66b2..0775063f89 100644 --- a/testing/btest/Baseline/bifs.to_subnet/output +++ b/testing/btest/Baseline/bifs.to_subnet/output @@ -1,3 +1,3 @@ 10.0.0.0/8, T -[2607:f8b0::]/32, T -[::]/0, T +2607:f8b0::/32, T +::/0, T diff --git a/testing/btest/Baseline/core.conn-uid/output b/testing/btest/Baseline/core.conn-uid/output index a98469d075..c77eda4f04 100644 --- a/testing/btest/Baseline/core.conn-uid/output +++ b/testing/btest/Baseline/core.conn-uid/output @@ -1,5 +1,5 @@ [orig_h=141.142.220.202, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], UWkUyAuUGXf -[orig_h=[fe80::217:f2ff:fed7:cf65], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], arKYeMETxOg +[orig_h=fe80::217:f2ff:fed7:cf65, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], arKYeMETxOg [orig_h=141.142.220.50, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], k6kgXLOoSKl [orig_h=141.142.220.118, orig_p=35634/tcp, resp_h=208.80.152.2, resp_p=80/tcp], nQcgTWjvg4c [orig_h=141.142.220.118, orig_p=48649/tcp, resp_h=208.80.152.118, resp_p=80/tcp], j4u32Pc5bif @@ -36,8 +36,8 @@ [orig_h=141.142.220.235, orig_p=6705/tcp, resp_h=173.192.163.128, resp_p=80/tcp], 2cx26uAvUPl [orig_h=141.142.220.44, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], BWaU4aSuwkc [orig_h=141.142.220.226, orig_p=137/udp, resp_h=141.142.220.255, resp_p=137/udp], 10XodEwRycf -[orig_h=[fe80::3074:17d5:2052:c324], orig_p=65373/udp, resp_h=[ff02::1:3], resp_p=5355/udp], zno26fFZkrh +[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp], zno26fFZkrh [orig_h=141.142.220.226, orig_p=55131/udp, resp_h=224.0.0.252, resp_p=5355/udp], v5rgkJBig5l -[orig_h=[fe80::3074:17d5:2052:c324], orig_p=54213/udp, resp_h=[ff02::1:3], resp_p=5355/udp], eWZCH7OONC1 +[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp], eWZCH7OONC1 [orig_h=141.142.220.226, orig_p=55671/udp, resp_h=224.0.0.252, resp_p=5355/udp], 0Pwk3ntf8O3 [orig_h=141.142.220.238, orig_p=56641/udp, resp_h=141.142.220.255, resp_p=137/udp], 0HKorjr8Zp7 diff --git a/testing/btest/Baseline/core.discarder/output b/testing/btest/Baseline/core.discarder/output index 56b85cb83e..82b4b3e622 100644 --- a/testing/btest/Baseline/core.discarder/output +++ b/testing/btest/Baseline/core.discarder/output @@ -15,10 +15,10 @@ [orig_h=141.142.220.118, orig_p=50001/tcp, resp_h=208.80.152.3, resp_p=80/tcp] [orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] ################ UDP Discarder ################ -[orig_h=[fe80::217:f2ff:fed7:cf65], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp] -[orig_h=[fe80::3074:17d5:2052:c324], orig_p=65373/udp, resp_h=[ff02::1:3], resp_p=5355/udp] -[orig_h=[fe80::3074:17d5:2052:c324], orig_p=65373/udp, resp_h=[ff02::1:3], resp_p=5355/udp] -[orig_h=[fe80::3074:17d5:2052:c324], orig_p=54213/udp, resp_h=[ff02::1:3], resp_p=5355/udp] -[orig_h=[fe80::3074:17d5:2052:c324], orig_p=54213/udp, resp_h=[ff02::1:3], resp_p=5355/udp] +[orig_h=fe80::217:f2ff:fed7:cf65, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=65373/udp, resp_h=ff02::1:3, resp_p=5355/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp] +[orig_h=fe80::3074:17d5:2052:c324, orig_p=54213/udp, resp_h=ff02::1:3, resp_p=5355/udp] ################ ICMP Discarder ################ Discard icmp packet: [icmp_type=3] diff --git a/testing/btest/Baseline/core.icmp.icmp-context/output b/testing/btest/Baseline/core.icmp.icmp-context/output index 0820488cf8..40dc778d8b 100644 --- a/testing/btest/Baseline/core.icmp.icmp-context/output +++ b/testing/btest/Baseline/core.icmp.icmp-context/output @@ -1,7 +1,7 @@ icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=0, hlim=64, v6=F] - icmp_context: [id=[orig_h=[::], orig_p=0/unknown, resp_h=[::], resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=20, hlim=64, v6=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-context/output b/testing/btest/Baseline/core.icmp.icmp6-context/output index 75b51ab697..7a83679018 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-context/output +++ b/testing/btest/Baseline/core.icmp.icmp6-context/output @@ -1,16 +1,16 @@ icmp_unreachable (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=0, hlim=64, v6=T] - icmp_context: [id=[orig_h=[::], orig_p=0/unknown, resp_h=[::], resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=0, hlim=64, v6=T] + icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=40, hlim=64, v6=T] - icmp_context: [id=[orig_h=[fe80::beef], orig_p=0/unknown, resp_h=[fe80::dead], resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=40, hlim=64, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=60, hlim=64, v6=T] - icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=48, hlim=64, v6=T] - icmp_context: [id=[orig_h=[fe80::beef], orig_p=0/unknown, resp_h=[fe80::dead], resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=48, hlim=64, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-events/output b/testing/btest/Baseline/core.icmp.icmp6-events/output index 8b41827dc0..81075b716a 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-events/output +++ b/testing/btest/Baseline/core.icmp.icmp6-events/output @@ -1,46 +1,46 @@ icmp_unreachable (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=1/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=1, icode=0, len=60, hlim=64, v6=T] - icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_packet_too_big (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=2/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=2, icode=0, len=52, hlim=64, v6=T] - icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=2/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=2, icode=0, len=52, hlim=64, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_time_exceeded (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=3/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=3, icode=0, len=52, hlim=64, v6=T] - icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=3/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=3, icode=0, len=52, hlim=64, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_parameter_problem (code=0) - conn_id: [orig_h=[fe80::dead], orig_p=4/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=4, icode=0, len=52, hlim=64, v6=T] - icmp_context: [id=[orig_h=[fe80::beef], orig_p=30000/udp, resp_h=[fe80::dead], resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] + conn_id: [orig_h=fe80::dead, orig_p=4/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=4, icode=0, len=52, hlim=64, v6=T] + icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_echo_request (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) - conn_id: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], orig_p=128/icmp, resp_h=[2001:4860:8006::63], resp_p=129/icmp] - icmp_conn: [orig_h=[2620:0:e00:400e:d1d:db37:beb:5aac], resp_h=[2001:4860:8006::63], itype=128, icode=0, len=32, hlim=128, v6=T] -icmp_redirect (tgt=[fe80::cafe], dest=[fe80::babe]) - conn_id: [orig_h=[fe80::dead], orig_p=137/icmp, resp_h=[fe80::beef], resp_p=0/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=137, icode=0, len=32, hlim=255, v6=T] + conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] +icmp_redirect (tgt=fe80::cafe, dest=fe80::babe) + conn_id: [orig_h=fe80::dead, orig_p=137/icmp, resp_h=fe80::beef, resp_p=0/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, hlim=255, v6=T] icmp_router_advertisement cur_hop_limit=13 managed=T @@ -52,17 +52,17 @@ icmp_router_advertisement router_lifetime=30.0 mins reachable_time=3.0 secs 700.0 msecs retrans_timer=1.0 sec 300.0 msecs - conn_id: [orig_h=[fe80::dead], orig_p=134/icmp, resp_h=[fe80::beef], resp_p=133/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=134, icode=0, len=8, hlim=255, v6=T] -icmp_neighbor_advertisement (tgt=[fe80::babe]) + conn_id: [orig_h=fe80::dead, orig_p=134/icmp, resp_h=fe80::beef, resp_p=133/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, hlim=255, v6=T] +icmp_neighbor_advertisement (tgt=fe80::babe) router=T solicited=F override=T - conn_id: [orig_h=[fe80::dead], orig_p=136/icmp, resp_h=[fe80::beef], resp_p=135/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=136, icode=0, len=16, hlim=255, v6=T] + conn_id: [orig_h=fe80::dead, orig_p=136/icmp, resp_h=fe80::beef, resp_p=135/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, hlim=255, v6=T] icmp_router_solicitation - conn_id: [orig_h=[fe80::dead], orig_p=133/icmp, resp_h=[fe80::beef], resp_p=134/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=133, icode=0, len=0, hlim=255, v6=T] -icmp_neighbor_solicitation (tgt=[fe80::babe]) - conn_id: [orig_h=[fe80::dead], orig_p=135/icmp, resp_h=[fe80::beef], resp_p=136/icmp] - icmp_conn: [orig_h=[fe80::dead], resp_h=[fe80::beef], itype=135, icode=0, len=16, hlim=255, v6=T] + conn_id: [orig_h=fe80::dead, orig_p=133/icmp, resp_h=fe80::beef, resp_p=134/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, hlim=255, v6=T] +icmp_neighbor_solicitation (tgt=fe80::babe) + conn_id: [orig_h=fe80::dead, orig_p=135/icmp, resp_h=fe80::beef, resp_p=136/icmp] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, hlim=255, v6=T] diff --git a/testing/btest/Baseline/core.ipv6-atomic-frag/output b/testing/btest/Baseline/core.ipv6-atomic-frag/output index b634ae11db..4a628a4bdc 100644 --- a/testing/btest/Baseline/core.ipv6-atomic-frag/output +++ b/testing/btest/Baseline/core.ipv6-atomic-frag/output @@ -1,4 +1,4 @@ -[orig_h=[2001:db8:1::2], orig_p=36951/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] -[orig_h=[2001:db8:1::2], orig_p=59694/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] -[orig_h=[2001:db8:1::2], orig_p=27393/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] -[orig_h=[2001:db8:1::2], orig_p=45805/tcp, resp_h=[2001:db8:1::1], resp_p=80/tcp] +[orig_h=2001:db8:1::2, orig_p=36951/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] +[orig_h=2001:db8:1::2, orig_p=59694/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] +[orig_h=2001:db8:1::2, orig_p=27393/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] +[orig_h=2001:db8:1::2, orig_p=45805/tcp, resp_h=2001:db8:1::1, resp_p=80/tcp] diff --git a/testing/btest/Baseline/core.ipv6-frag/dns.log b/testing/btest/Baseline/core.ipv6-frag/dns.log index ccf9f4b73d..251f35d789 100644 --- a/testing/btest/Baseline/core.ipv6-frag/dns.log +++ b/testing/btest/Baseline/core.ipv6-frag/dns.log @@ -5,5 +5,5 @@ #path dns #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] -1331084278.438444 UWkUyAuUGXf [2001:470:1f11:81f:d138:5f55:6d4:1fe2] 51850 [2607:f740:b::f93] 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 -1331084293.592245 arKYeMETxOg [2001:470:1f11:81f:d138:5f55:6d4:1fe2] 51851 [2607:f740:b::f93] 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 +1331084278.438444 UWkUyAuUGXf 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51850 2607:f740:b::f93 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 +1331084293.592245 arKYeMETxOg 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51851 2607:f740:b::f93 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 diff --git a/testing/btest/Baseline/core.ipv6-frag/output b/testing/btest/Baseline/core.ipv6-frag/output index 3ab244254b..12dfc3a841 100644 --- a/testing/btest/Baseline/core.ipv6-frag/output +++ b/testing/btest/Baseline/core.ipv6-frag/output @@ -1,5 +1,5 @@ -ip6=[class=0, flow=0, len=81, nxt=17, hlim=64, src=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], dst=[2607:f740:b::f93], exts=[]], udp = [sport=51850/udp, dport=53/udp, ulen=81] -ip6=[class=0, flow=0, len=331, nxt=17, hlim=53, src=[2607:f740:b::f93], dst=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], exts=[]], udp = [sport=53/udp, dport=51850/udp, ulen=331] -ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], dst=[2607:f740:b::f93], exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], dst=[2607:f740:b::f93], exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] -ip6=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=[2607:f740:b::f93], dst=[2001:470:1f11:81f:d138:5f55:6d4:1fe2], exts=[]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] +ip6=[class=0, flow=0, len=81, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51850/udp, dport=53/udp, ulen=81] +ip6=[class=0, flow=0, len=331, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[]], udp = [sport=53/udp, dport=51850/udp, ulen=331] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=82, nxt=17, hlim=64, src=2001:470:1f11:81f:d138:5f55:6d4:1fe2, dst=2607:f740:b::f93, exts=[]], udp = [sport=51851/udp, dport=53/udp, ulen=82] +ip6=[class=0, flow=0, len=3238, nxt=17, hlim=53, src=2607:f740:b::f93, dst=2001:470:1f11:81f:d138:5f55:6d4:1fe2, exts=[]], udp = [sport=53/udp, dport=51851/udp, ulen=3238] diff --git a/testing/btest/Baseline/core.ipv6_esp/output b/testing/btest/Baseline/core.ipv6_esp/output index 834a3cd56e..02fb7e154f 100644 --- a/testing/btest/Baseline/core.ipv6_esp/output +++ b/testing/btest/Baseline/core.ipv6_esp/output @@ -1,120 +1,120 @@ -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::2], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::3], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::4], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::5], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] -[class=0, flow=0, len=116, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::12], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::13], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] -[class=0, flow=0, len=100, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::14], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::15], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=1], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=2], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=3], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=4], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=5], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=6], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=7], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=8], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=9], mobility=]]] -[class=0, flow=0, len=104, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::22], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::23], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=10], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=1], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=2], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=3], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=4], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=5], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=6], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=7], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=8], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=9], mobility=]]] -[class=0, flow=0, len=88, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::24], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=10], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=1], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=2], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=3], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=4], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=5], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=6], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=7], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=8], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=9], mobility=]]] -[class=0, flow=0, len=76, nxt=50, hlim=64, src=[3ffe::1], dst=[3ffe::25], exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=10], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::2, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::3, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::4, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::5, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=1], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=2], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=3], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=4], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=5], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=6], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=7], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=8], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=9], mobility=]]] +[class=0, flow=0, len=116, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::12, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=10, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::13, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=11, seq=10], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=1], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=2], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=3], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=4], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=5], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=6], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=7], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=8], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=9], mobility=]]] +[class=0, flow=0, len=100, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::14, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=12, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::15, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=13, seq=10], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=1], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=2], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=3], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=4], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=5], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=6], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=7], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=8], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=9], mobility=]]] +[class=0, flow=0, len=104, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::22, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=20, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::23, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=21, seq=10], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=1], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=2], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=3], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=4], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=5], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=6], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=7], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=8], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=9], mobility=]]] +[class=0, flow=0, len=88, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::24, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=22, seq=10], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=1], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=2], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=3], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=4], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=5], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=6], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=7], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=8], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=9], mobility=]]] +[class=0, flow=0, len=76, nxt=50, hlim=64, src=3ffe::1, dst=3ffe::25, exts=[[id=50, hopopts=, dstopts=, routing=, fragment=, ah=, esp=[spi=23, seq=10], mobility=]]] diff --git a/testing/btest/Baseline/core.ipv6_ext_headers/output b/testing/btest/Baseline/core.ipv6_ext_headers/output index e6ac3de822..b4cd249371 100644 --- a/testing/btest/Baseline/core.ipv6_ext_headers/output +++ b/testing/btest/Baseline/core.ipv6_ext_headers/output @@ -1,3 +1,3 @@ -weird routing0_hdr from [2001:4f8:4:7:2e0:81ff:fe52:ffff] to [2001:78:1:32::2] -[orig_h=[2001:4f8:4:7:2e0:81ff:fe52:ffff], orig_p=53/udp, resp_h=[2001:78:1:32::2], resp_p=53/udp] -[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=, mobility=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] +weird routing0_hdr from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:78:1:32::2 +[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=53/udp, resp_h=2001:78:1:32::2, resp_p=53/udp] +[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=, mobility=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] diff --git a/testing/btest/Baseline/core.mobile-ipv6-home-addr/output b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output index 63e3fb92f9..88cbe0cb16 100644 --- a/testing/btest/Baseline/core.mobile-ipv6-home-addr/output +++ b/testing/btest/Baseline/core.mobile-ipv6-home-addr/output @@ -1,2 +1,2 @@ -[orig_h=[2001:78:1:32::1], orig_p=30000/udp, resp_h=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], resp_p=13000/udp] -[ip=, ip6=[class=0, flow=0, len=36, nxt=60, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=60, hopopts=, dstopts=[nxt=17, len=2, options=[[otype=1, len=2, data=\0\0], [otype=201, len=16, data= ^A\0x\0^A\02\0\0\0\0\0\0\0^A]]], routing=, fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] +[orig_h=2001:78:1:32::1, orig_p=30000/udp, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=13000/udp] +[ip=, ip6=[class=0, flow=0, len=36, nxt=60, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=60, hopopts=, dstopts=[nxt=17, len=2, options=[[otype=1, len=2, data=\0\0], [otype=201, len=16, data= ^A\0x\0^A\02\0\0\0\0\0\0\0^A]]], routing=, fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] diff --git a/testing/btest/Baseline/core.mobile-ipv6-routing/output b/testing/btest/Baseline/core.mobile-ipv6-routing/output index e1cd99da1c..04292caaa7 100644 --- a/testing/btest/Baseline/core.mobile-ipv6-routing/output +++ b/testing/btest/Baseline/core.mobile-ipv6-routing/output @@ -1,2 +1,2 @@ -[orig_h=[2001:4f8:4:7:2e0:81ff:fe52:ffff], orig_p=30000/udp, resp_h=[2001:78:1:32::1], resp_p=13000/udp] -[ip=, ip6=[class=0, flow=0, len=36, nxt=43, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=43, hopopts=, dstopts=, routing=[nxt=17, len=2, rtype=2, segleft=1, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] +[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=30000/udp, resp_h=2001:78:1:32::1, resp_p=13000/udp] +[ip=, ip6=[class=0, flow=0, len=36, nxt=43, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=43, hopopts=, dstopts=, routing=[nxt=17, len=2, rtype=2, segleft=1, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=30000/udp, dport=13000/udp, ulen=12], icmp=] diff --git a/testing/btest/Baseline/core.mobility_msg/output b/testing/btest/Baseline/core.mobility_msg/output index 64315bf370..6f8d6a1699 100644 --- a/testing/btest/Baseline/core.mobility_msg/output +++ b/testing/btest/Baseline/core.mobility_msg/output @@ -1,16 +1,16 @@ Binding ACK: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=6, rsv=0, chksum=53722, msg=[id=6, brr=, hoti=, coti=, hot=, cot=, bu=, back=[status=0, k=T, seq=42, life=8, options=[[otype=1, len=2, data=\0\0]]], be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=6, rsv=0, chksum=53722, msg=[id=6, brr=, hoti=, coti=, hot=, cot=, bu=, back=[status=0, k=T, seq=42, life=8, options=[[otype=1, len=2, data=\0\0]]], be=]]]]] Binding Error: -[class=0, flow=0, len=24, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=7, rsv=0, chksum=45272, msg=[id=7, brr=, hoti=, coti=, hot=, cot=, bu=, back=, be=[status=1, hoa=[2001:78:1:32::1], options=[]]]]]]] +[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=7, rsv=0, chksum=45272, msg=[id=7, brr=, hoti=, coti=, hot=, cot=, bu=, back=, be=[status=1, hoa=2001:78:1:32::1, options=[]]]]]]] Binding Refresh Request: -[class=0, flow=0, len=8, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=0, mh_type=0, rsv=0, chksum=55703, msg=[id=0, brr=[rsv=0, options=[]], hoti=, coti=, hot=, cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=8, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=0, mh_type=0, rsv=0, chksum=55703, msg=[id=0, brr=[rsv=0, options=[]], hoti=, coti=, hot=, cot=, bu=, back=, be=]]]]] Binding Update: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=5, rsv=0, chksum=868, msg=[id=5, brr=, hoti=, coti=, hot=, cot=, bu=[seq=37, a=T, h=T, l=F, k=T, life=3, options=[[otype=1, len=2, data=\0\0]]], back=, be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=5, rsv=0, chksum=868, msg=[id=5, brr=, hoti=, coti=, hot=, cot=, bu=[seq=37, a=T, h=T, l=F, k=T, life=3, options=[[otype=1, len=2, data=\0\0]]], back=, be=]]]]] Care-of Test: -[class=0, flow=0, len=24, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=4, rsv=0, chksum=54378, msg=[id=4, brr=, hoti=, coti=, hot=, cot=[nonce_idx=13, cookie=15, token=255, options=[]], bu=, back=, be=]]]]] +[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=4, rsv=0, chksum=54378, msg=[id=4, brr=, hoti=, coti=, hot=, cot=[nonce_idx=13, cookie=15, token=255, options=[]], bu=, back=, be=]]]]] Care-of Test Init: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=2, rsv=0, chksum=55181, msg=[id=2, brr=, hoti=, coti=[rsv=0, cookie=1, options=[]], hot=, cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=2, rsv=0, chksum=55181, msg=[id=2, brr=, hoti=, coti=[rsv=0, cookie=1, options=[]], hot=, cot=, bu=, back=, be=]]]]] Home Test: -[class=0, flow=0, len=24, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=3, rsv=0, chksum=54634, msg=[id=3, brr=, hoti=, coti=, hot=[nonce_idx=13, cookie=15, token=255, options=[]], cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=24, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=2, mh_type=3, rsv=0, chksum=54634, msg=[id=3, brr=, hoti=, coti=, hot=[nonce_idx=13, cookie=15, token=255, options=[]], cot=, bu=, back=, be=]]]]] Home Test Init: -[class=0, flow=0, len=16, nxt=135, hlim=64, src=[2001:4f8:4:7:2e0:81ff:fe52:ffff], dst=[2001:4f8:4:7:2e0:81ff:fe52:9a6b], exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=1, rsv=0, chksum=55437, msg=[id=1, brr=, hoti=[rsv=0, cookie=1, options=[]], coti=, hot=, cot=, bu=, back=, be=]]]]] +[class=0, flow=0, len=16, nxt=135, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=135, hopopts=, dstopts=, routing=, fragment=, ah=, esp=, mobility=[nxt=59, len=1, mh_type=1, rsv=0, chksum=55437, msg=[id=1, brr=, hoti=[rsv=0, cookie=1, options=[]], coti=, hot=, cot=, bu=, back=, be=]]]]] diff --git a/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout b/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout index 5114999813..0a7bac52c5 100644 --- a/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout +++ b/testing/btest/Baseline/istate.broccoli-ipv6/bro..stdout @@ -1,9 +1,9 @@ handshake done with peer bro_addr(1.2.3.4) bro_subnet(10.0.0.0/16) -bro_addr([2607:f8b0:4009:802::1014]) -bro_subnet([2607:f8b0::]/32) +bro_addr(2607:f8b0:4009:802::1014) +bro_subnet(2607:f8b0::/32) broccoli_addr(1.2.3.4) broccoli_subnet(10.0.0.0/16) -broccoli_addr([2607:f8b0:4009:802::1014]) -broccoli_subnet([2607:f8b0::]/32) +broccoli_addr(2607:f8b0:4009:802::1014) +broccoli_subnet(2607:f8b0::/32) diff --git a/testing/btest/Baseline/istate.pybroccoli/bro..stdout b/testing/btest/Baseline/istate.pybroccoli/bro..stdout index 9c4637125e..a5d20b1f2a 100644 --- a/testing/btest/Baseline/istate.pybroccoli/bro..stdout +++ b/testing/btest/Baseline/istate.pybroccoli/bro..stdout @@ -1,16 +1,16 @@ ==== atomic -10 2 -1336148094.497041 +1336411585.166009 2.0 mins F 1.5 Servus 5555/tcp 6.7.6.5 -[2001:db8:85a3::8a2e:370:7334] +2001:db8:85a3::8a2e:370:7334 192.168.0.0/16 -[2001:db8:85a3::]/48 +2001:db8:85a3::/48 ==== record [a=42, b=6.6.7.7] 42, 6.6.7.7 diff --git a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered index 5d1ca261c4..a44a95bd69 100644 --- a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered +++ b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered @@ -1,7 +1,7 @@ ==== atomic a 1 ==== -4L -4 42 42 -1336148094.5020 +1336411585.1711 60.0 True True 3.14 @@ -14,7 +14,7 @@ True True ==== atomic a 2 ==== -10L -10 2 2 -1336148094.4970 +1336411585.1660 120.0 False False 1.5 @@ -27,7 +27,7 @@ False False ==== atomic b 2 ==== -10L -10 2 - 1336148094.4970 + 1336411585.1660 120.0 False False 1.5 diff --git a/testing/btest/Baseline/language.expire_func/output b/testing/btest/Baseline/language.expire_func/output index 13be712d8a..91cd2bad16 100644 --- a/testing/btest/Baseline/language.expire_func/output +++ b/testing/btest/Baseline/language.expire_func/output @@ -16,7 +16,7 @@ am i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, -[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], am } { @@ -25,7 +25,7 @@ am i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, -[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], am } { @@ -34,7 +34,7 @@ am i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, -[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -45,7 +45,7 @@ i, [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp], here, [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], -[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -57,7 +57,7 @@ i, here, [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp], -[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -70,7 +70,7 @@ i, here, [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp], [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp], -[orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp], +[orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp], [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp], am } @@ -82,7 +82,7 @@ expired [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53 expired here expired [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp] expired [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp] -expired [orig_h=[fe80::20c:29ff:febd:6f01], orig_p=5353/udp, resp_h=[ff02::fb], resp_p=5353/udp] +expired [orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp] expired [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp] expired am { diff --git a/testing/btest/Baseline/language.ipv6-literals/output b/testing/btest/Baseline/language.ipv6-literals/output index a540fe999b..8542af7f91 100644 --- a/testing/btest/Baseline/language.ipv6-literals/output +++ b/testing/btest/Baseline/language.ipv6-literals/output @@ -1,24 +1,24 @@ -[::1] -[::ffff] -[::255.255.255.255] -[::10.10.255.255] -[1::1] -[1::a] -[1::1:1] -[1::1:a] -[a::a] -[a::1] -[a::a:a] -[a::a:1] -[a:a::a] -[aaaa::ffff] +::1 +::ffff +::255.255.255.255 +::10.10.255.255 +1::1 +1::a +1::1:1 +1::1:a +a::a +a::1 +a::a:a +a::a:1 +a:a::a +aaaa::ffff 192.168.1.100 -[ffff::c0a8:164] -[::192.168.1.100] -[::ffff:0:192.168.1.100] -[805b:2d9d:dc28::fc57:d4c8:1fff] -[aaaa::bbbb] -[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] -[aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222] -[aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222] -[aaaa:bbbb:cccc:dddd:eeee::2222] +ffff::c0a8:164 +::192.168.1.100 +::ffff:0:192.168.1.100 +805b:2d9d:dc28::fc57:d4c8:1fff +aaaa::bbbb +aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222 +aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222 +aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222 +aaaa:bbbb:cccc:dddd:eeee::2222 diff --git a/testing/btest/Baseline/language.sizeof/output b/testing/btest/Baseline/language.sizeof/output index 160ea9ab4c..43cb73f763 100644 --- a/testing/btest/Baseline/language.sizeof/output +++ b/testing/btest/Baseline/language.sizeof/output @@ -1,5 +1,5 @@ IPv4 Address 1.2.3.4: 32 -IPv6 Address [::1]: 128 +IPv6 Address ::1: 128 Boolean T: 1 Count 10: 10 Double -1.23: 1.230000 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log index ed0636bc4a..b396c3fc2d 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log @@ -6,6 +6,6 @@ #fields ts id.orig_h #types time addr 1300475169.780331 173.192.163.128 -1300475167.097012 [fe80::217:f2ff:fed7:cf65] -1300475171.675372 [fe80::3074:17d5:2052:c324] -1300475173.116749 [fe80::3074:17d5:2052:c324] +1300475167.097012 fe80::217:f2ff:fed7:cf65 +1300475171.675372 fe80::3074:17d5:2052:c324 +1300475173.116749 fe80::3074:17d5:2052:c324 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log index e398020a87..c4a515710d 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log @@ -5,9 +5,9 @@ #path conn #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes #types time string addr port addr port enum string interval count count string bool count string count count count count -1329327783.316897 arKYeMETxOg [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49186 [2001:470:4867:99::21] 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 -1329327786.524332 k6kgXLOoSKl [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49187 [2001:470:4867:99::21] 57087 tcp ftp-data 0.217501 0 43 SF - 0 ShAdfFa 5 372 4 343 -1329327787.289095 nQcgTWjvg4c [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49188 [2001:470:4867:99::21] 57088 tcp ftp-data 0.217941 0 77 SF - 0 ShAdfFa 5 372 4 377 -1329327795.571921 j4u32Pc5bif [2001:470:4867:99::21] 55785 [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 -1329327777.822004 UWkUyAuUGXf [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49185 [2001:470:4867:99::21] 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 -1329327800.017649 TEfuqmmG4bh [2001:470:4867:99::21] 55647 [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 +1329327783.316897 arKYeMETxOg 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49186 2001:470:4867:99::21 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 +1329327786.524332 k6kgXLOoSKl 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49187 2001:470:4867:99::21 57087 tcp ftp-data 0.217501 0 43 SF - 0 ShAdfFa 5 372 4 343 +1329327787.289095 nQcgTWjvg4c 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49188 2001:470:4867:99::21 57088 tcp ftp-data 0.217941 0 77 SF - 0 ShAdfFa 5 372 4 377 +1329327795.571921 j4u32Pc5bif 2001:470:4867:99::21 55785 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 +1329327777.822004 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 +1329327800.017649 TEfuqmmG4bh 2001:470:4867:99::21 55647 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log index 61375d7233..8bc2ef2cb7 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log @@ -5,5 +5,5 @@ #path ftp #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p user password command arg mime_type mime_desc file_size reply_code reply_msg tags extraction_file #types time string addr port addr port string string string string string string count count string table[string] file -1329327787.396984 UWkUyAuUGXf [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49185 [2001:470:4867:99::21] 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - -1329327795.463946 UWkUyAuUGXf [2001:470:1f11:81f:c999:d94:aa7c:2e3e] 49185 [2001:470:4867:99::21] 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - +1329327787.396984 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - +1329327795.463946 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - From 1e66fe905a948fcded4b0ba13c11e907831c835e Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 9 May 2012 15:08:36 -0500 Subject: [PATCH 276/651] Add support to Bro for connecting with peers over IPv6. - Communication::listen_ipv6 needs to be redef'd to true in order for IPv6 listening sockets to be opened. - Added Communication::listen_retry option as an interval at which to retry binding to socket addresses that were already in use. - Added some explicit baselines to check in the istate.events and istate.events-ssl tests -- the SSL test was incorrectly passing because it compared two empty files. (The files being empty because "http/base" was given as an argument to Bro which it couldn't handle because that script doesn't exist anymore). --- .../base/frameworks/communication/main.bro | 13 +- scripts/base/utils/addrs.bro | 15 + .../frameworks/communication/listen.bro | 3 +- src/IPAddr.h | 22 +- src/RemoteSerializer.cc | 362 +++++++++++------- src/RemoteSerializer.h | 27 +- src/bro.bif | 9 +- .../Baseline/istate.bro-ipv6/recv..stdout | 1 + .../Baseline/istate.bro-ipv6/send..stdout | 2 + .../Baseline/istate.events-ssl/events.rec.log | 33 ++ .../Baseline/istate.events-ssl/events.snd.log | 33 ++ .../istate.events-ssl/receiver.http.log | 2 +- .../istate.events-ssl/sender.http.log | 2 +- .../Baseline/istate.events/events.rec.log | 33 ++ .../Baseline/istate.events/events.snd.log | 33 ++ .../Baseline/istate.events/receiver.http.log | 2 +- .../Baseline/istate.events/sender.http.log | 2 +- testing/btest/istate/bro-ipv6.bro | 52 +++ testing/btest/istate/events-ssl.bro | 6 +- testing/btest/istate/events.bro | 2 + 20 files changed, 480 insertions(+), 174 deletions(-) create mode 100644 testing/btest/Baseline/istate.bro-ipv6/recv..stdout create mode 100644 testing/btest/Baseline/istate.bro-ipv6/send..stdout create mode 100644 testing/btest/Baseline/istate.events-ssl/events.rec.log create mode 100644 testing/btest/Baseline/istate.events-ssl/events.snd.log create mode 100644 testing/btest/Baseline/istate.events/events.rec.log create mode 100644 testing/btest/Baseline/istate.events/events.snd.log create mode 100644 testing/btest/istate/bro-ipv6.bro diff --git a/scripts/base/frameworks/communication/main.bro b/scripts/base/frameworks/communication/main.bro index 04772f57aa..26ec9f41b8 100644 --- a/scripts/base/frameworks/communication/main.bro +++ b/scripts/base/frameworks/communication/main.bro @@ -2,6 +2,7 @@ ##! and/or transfer events. @load base/frameworks/packet-filter +@load base/utils/addrs module Communication; @@ -10,7 +11,7 @@ export { ## The communication logging stream identifier. redef enum Log::ID += { LOG }; - ## Which interface to listen on (0.0.0.0 for any interface). + ## Which interface to listen on (``0.0.0.0`` or ``[::]`` are wildcards). const listen_interface = 0.0.0.0 &redef; ## Which port to listen on. @@ -19,6 +20,14 @@ export { ## This defines if a listening socket should use SSL. const listen_ssl = F &redef; + ## Defines if a listening socket can bind to IPv6 addresses. + const listen_ipv6 = F &redef; + + ## Defines the interval at which to retry binding to + ## :bro:id:`listen_interface` on :bro:id:`listen_port` if it's already in + ## use. + const listen_retry = 30 secs &redef; + ## Default compression level. Compression level is 0-9, with 0 = no ## compression. global compression_level = 0 &redef; @@ -160,7 +169,7 @@ event remote_log(level: count, src: count, msg: string) # This is a core generated event. event remote_log_peer(p: event_peer, level: count, src: count, msg: string) { - local rmsg = fmt("[#%d/%s:%d] %s", p$id, p$host, p$p, msg); + local rmsg = fmt("[#%d/%s:%d] %s", p$id, addr_to_uri(p$host), p$p, msg); do_script_log_common(level, src, rmsg); } diff --git a/scripts/base/utils/addrs.bro b/scripts/base/utils/addrs.bro index 415b9adfa9..08efd5281a 100644 --- a/scripts/base/utils/addrs.bro +++ b/scripts/base/utils/addrs.bro @@ -98,3 +98,18 @@ function find_ip_addresses(input: string): string_array } return output; } + +## Returns the string representation of an IP address suitable for inclusion +## in a URI. For IPv4, this does no special formatting, but for IPv6, the +## address is included in square brackets. +## +## a: the address to make suitable for URI inclusion. +## +## Returns: the string representation of *a* suitable for URI inclusion. +function addr_to_uri(a: addr): string + { + if ( is_v4_addr(a) ) + return fmt("%s", a); + else + return fmt("[%s]", a); + } diff --git a/scripts/policy/frameworks/communication/listen.bro b/scripts/policy/frameworks/communication/listen.bro index e366e5b4ff..609e8c91d6 100644 --- a/scripts/policy/frameworks/communication/listen.bro +++ b/scripts/policy/frameworks/communication/listen.bro @@ -8,5 +8,6 @@ module Communication; event bro_init() &priority=-10 { enable_communication(); - listen(listen_interface, listen_port, listen_ssl); + listen(listen_interface, listen_port, listen_ssl, listen_ipv6, + listen_retry); } diff --git a/src/IPAddr.h b/src/IPAddr.h index 8e1921e07b..447669d422 100644 --- a/src/IPAddr.h +++ b/src/IPAddr.h @@ -188,11 +188,16 @@ public: * IPv4 to IPv6 address mapping to return a full 16 bytes. * * @param bytes The pointer to a memory location in which the - * raw bytes of the address are to be copied in network byte-order. + * raw bytes of the address are to be copied. + * + * @param order The byte-order in which the returned raw bytes are copied. + * The default is network order. */ - void CopyIPv6(uint32_t* bytes) const + void CopyIPv6(uint32_t* bytes, ByteOrder order = Network) const { memcpy(bytes, in6.s6_addr, sizeof(in6.s6_addr)); + if ( order == Host ) + for ( unsigned int i = 0; i < 4; ++i ) bytes[i] = ntohl(bytes[i]); } /** @@ -280,6 +285,19 @@ public: */ string AsString() const; + /** + * Returns a string representation of the address suitable for inclusion + * in an URI. For IPv4 addresses, this is the same as AsString(), but + * IPv6 addresses are encased in square brackets. + */ + string AsURIString() const + { + if ( GetFamily() == IPv4 ) + return AsString(); + else + return string("[") + AsString() + "]"; + } + /** * Returns a host-order, plain hex string representation of the address. */ diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 61be8a9e8f..3abec00f59 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -147,6 +147,7 @@ #include #include +#include #include #include #include @@ -195,7 +196,7 @@ extern "C" { // Gets incremented each time there's an incompatible change // to the communication internals. -static const unsigned short PROTOCOL_VERSION = 0x07; +static const unsigned short PROTOCOL_VERSION = 0x08; static const char MSG_NONE = 0x00; static const char MSG_VERSION = 0x01; @@ -458,17 +459,6 @@ static inline char* fmt_uint32s(int nargs, va_list ap) } #endif - -static inline const char* ip2a(uint32 ip) - { - static char buffer[32]; - struct in_addr addr; - - addr.s_addr = htonl(ip); - - return bro_inet_ntop(AF_INET, &addr, buffer, 32); - } - static pid_t child_pid = 0; // Return true if message type is sent by a peer (rather than the child @@ -683,24 +673,20 @@ RemoteSerializer::PeerID RemoteSerializer::Connect(const IPAddr& ip, if ( ! initialized ) reporter->InternalError("remote serializer not initialized"); - if ( ip.GetFamily() == IPv6 ) - Error("inter-Bro communication not supported over IPv6"); - - const uint32* bytes; - ip.GetBytes(&bytes); - uint32 ip4 = ntohl(*bytes); - if ( ! child_pid ) Fork(); - Peer* p = AddPeer(ip4, port); + Peer* p = AddPeer(ip, port); p->orig = true; if ( our_class ) p->our_class = our_class; - if ( ! SendToChild(MSG_CONNECT_TO, p, 5, p->id, - ip4, port, uint32(retry), use_ssl) ) + uint32 bytes[4]; + ip.CopyIPv6(bytes, IPAddr::Host); + + if ( ! SendToChild(MSG_CONNECT_TO, p, 8, p->id, bytes[0], bytes[1], + bytes[2], bytes[3], port, uint32(retry), use_ssl) ) { RemovePeer(p); return false; @@ -1232,7 +1218,8 @@ bool RemoteSerializer::SendCapabilities(Peer* peer) return caps ? SendToChild(MSG_CAPS, peer, 3, caps, 0, 0) : true; } -bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl) +bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl, + bool ipv6, double retry) { if ( ! using_communication ) return true; @@ -1240,14 +1227,15 @@ bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl) if ( ! initialized ) reporter->InternalError("remote serializer not initialized"); - if ( ip.GetFamily() == IPv6 ) - Error("inter-Bro communication not supported over IPv6"); + if ( ! ipv6 && ip.GetFamily() == IPv6 && + ip != IPAddr("0.0.0.0") && ip != IPAddr("::") ) + reporter->FatalError("Attempt to listen on address %s, but IPv6 communication disabled", ip.AsString().c_str()); - const uint32* bytes; - ip.GetBytes(&bytes); - uint32 ip4 = ntohl(*bytes); + uint32 bytes[4]; + ip.CopyIPv6(bytes, IPAddr::Host); - if ( ! SendToChild(MSG_LISTEN, 0, 3, ip4, port, expect_ssl) ) + if ( ! SendToChild(MSG_LISTEN, 0, 8, bytes[0], bytes[1], bytes[2], bytes[3], + port, expect_ssl, ipv6, (uint32) retry) ) return false; listening = true; @@ -1784,7 +1772,7 @@ RecordVal* RemoteSerializer::MakePeerVal(Peer* peer) RecordVal* v = new RecordVal(::peer); v->Assign(0, new Val(uint32(peer->id), TYPE_COUNT)); // Sic! Network order for AddrVal, host order for PortVal. - v->Assign(1, new AddrVal(htonl(peer->ip))); + v->Assign(1, new AddrVal(peer->ip)); v->Assign(2, new PortVal(peer->port, TRANSPORT_TCP)); v->Assign(3, new Val(false, TYPE_BOOL)); v->Assign(4, new StringVal("")); // set when received @@ -1793,8 +1781,8 @@ RecordVal* RemoteSerializer::MakePeerVal(Peer* peer) return v; } -RemoteSerializer::Peer* RemoteSerializer::AddPeer(uint32 ip, uint16 port, - PeerID id) +RemoteSerializer::Peer* RemoteSerializer::AddPeer(const IPAddr& ip, uint16 port, + PeerID id) { Peer* peer = new Peer; peer->id = id != PEER_NONE ? id : id_counter++; @@ -1960,8 +1948,8 @@ bool RemoteSerializer::ProcessConnected() { // IP and port follow. uint32* args = (uint32*) current_args->data; - uint32 host = ntohl(args[0]); // ### Fix: only works for IPv4 - uint16 port = (uint16) ntohl(args[1]); + IPAddr host = IPAddr(IPv6, args, IPAddr::Network); + uint16 port = (uint16) ntohl(args[4]); if ( ! current_peer ) { @@ -2980,7 +2968,8 @@ void RemoteSerializer::Log(LogLevel level, const char* msg, Peer* peer, if ( peer ) len += snprintf(buffer + len, sizeof(buffer) - len, "[#%d/%s:%d] ", - int(peer->id), ip2a(peer->ip), peer->port); + int(peer->id), peer->ip.AsURIString().c_str(), + peer->port); len += safe_snprintf(buffer + len, sizeof(buffer) - len, "%s", msg); @@ -3266,8 +3255,10 @@ SocketComm::SocketComm() terminating = false; killing = false; - listen_fd_clear = -1; - listen_fd_ssl = -1; + listen_port = 0; + listen_ssl = false; + enable_ipv6 = false; + bind_retry_interval = 0; listen_next_try = 0; // We don't want to use the signal handlers of our parent. @@ -3290,8 +3281,7 @@ SocketComm::~SocketComm() delete peers[i]->io; delete io; - close(listen_fd_clear); - close(listen_fd_ssl); + CloseListenFDs(); } static unsigned int first_rtime = 0; @@ -3340,20 +3330,13 @@ void SocketComm::Run() } if ( listen_next_try && time(0) > listen_next_try ) - Listen(listen_if, listen_port, listen_ssl); + Listen(); - if ( listen_fd_clear >= 0 ) + for ( size_t i = 0; i < listen_fds.size(); ++i ) { - FD_SET(listen_fd_clear, &fd_read); - if ( listen_fd_clear > max_fd ) - max_fd = listen_fd_clear; - } - - if ( listen_fd_ssl >= 0 ) - { - FD_SET(listen_fd_ssl, &fd_read); - if ( listen_fd_ssl > max_fd ) - max_fd = listen_fd_ssl; + FD_SET(listen_fds[i], &fd_read); + if ( listen_fds[i] > max_fd ) + max_fd = listen_fds[i]; } if ( io->IsFillingUp() && ! shutting_conns_down ) @@ -3442,12 +3425,9 @@ void SocketComm::Run() } } - if ( listen_fd_clear >= 0 && - FD_ISSET(listen_fd_clear, &fd_read) ) - AcceptConnection(listen_fd_clear); - - if ( listen_fd_ssl >= 0 && FD_ISSET(listen_fd_ssl, &fd_read) ) - AcceptConnection(listen_fd_ssl); + for ( size_t i = 0; i < listen_fds.size(); ++i ) + if ( FD_ISSET(listen_fds[i], &fd_read) ) + AcceptConnection(listen_fds[i]); // Hack to display CPU usage of the child, triggered via // SIGPROF. @@ -3571,13 +3551,8 @@ bool SocketComm::DoParentMessage() case MSG_LISTEN_STOP: { - if ( listen_fd_ssl >= 0 ) - close(listen_fd_ssl); + CloseListenFDs(); - if ( listen_fd_clear >= 0 ) - close(listen_fd_clear); - - listen_fd_clear = listen_fd_ssl = -1; Log("stopped listening"); return true; @@ -3721,10 +3696,10 @@ bool SocketComm::ProcessConnectTo() Peer* peer = new Peer; peer->id = ntohl(args[0]); - peer->ip = ntohl(args[1]); - peer->port = ntohl(args[2]); - peer->retry = ntohl(args[3]); - peer->ssl = ntohl(args[4]); + peer->ip = IPAddr(IPv6, &args[1], IPAddr::Network); + peer->port = ntohl(args[5]); + peer->retry = ntohl(args[6]); + peer->ssl = ntohl(args[7]); return Connect(peer); } @@ -3734,11 +3709,13 @@ bool SocketComm::ProcessListen() assert(parent_args); uint32* args = (uint32*) parent_args->data; - uint32 addr = ntohl(args[0]); - uint16 port = uint16(ntohl(args[1])); - uint32 ssl = ntohl(args[2]); + listen_if = IPAddr(IPv6, args, IPAddr::Network); + listen_port = uint16(ntohl(args[4])); + listen_ssl = ntohl(args[5]) != 0; + enable_ipv6 = ntohl(args[6]) != 0; + bind_retry_interval = ntohl(args[7]); - return Listen(addr, port, ssl); + return Listen(); } bool SocketComm::ProcessParentCompress() @@ -3900,29 +3877,53 @@ bool SocketComm::ProcessPeerCompress(Peer* peer) bool SocketComm::Connect(Peer* peer) { - struct sockaddr_in server; + int status; + addrinfo hints, *res, *res0; + bzero(&hints, sizeof(hints)); - int sockfd = socket(PF_INET, SOCK_STREAM, 0); - if ( sockfd < 0 ) + hints.ai_family = PF_UNSPEC; + hints.ai_protocol = IPPROTO_TCP; + hints.ai_socktype = SOCK_STREAM; + hints.ai_flags = AI_NUMERICHOST; + + char port_str[16]; + modp_uitoa10(peer->port, port_str); + + // TODO: better to accept string arguments from the user to pass into + // getaddrinfo? This might make it easier to explicitly connect to + // non-global IPv6 addresses with a scope zone identifier (RFC 4007). + status = getaddrinfo(peer->ip.AsString().c_str(), port_str, &hints, &res0); + if ( status != 0 ) { - Error(fmt("can't create socket, %s", strerror(errno))); + Error(fmt("getaddrinfo error: %s", gai_strerror(status))); return false; } - bzero(&server, sizeof(server)); - server.sin_family = AF_INET; - server.sin_port = htons(peer->port); - server.sin_addr.s_addr = htonl(peer->ip); - - bool connected = true; - - if ( connect(sockfd, (sockaddr*) &server, sizeof(server)) < 0 ) + int sockfd = -1; + for ( res = res0; res; res = res->ai_next ) { - Error(fmt("connect failed: %s", strerror(errno)), peer); - close(sockfd); - connected = false; + sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); + if ( sockfd < 0 ) + { + Error(fmt("can't create connect socket, %s", strerror(errno))); + continue; + } + + if ( connect(sockfd, res->ai_addr, res->ai_addrlen) < 0 ) + { + Error(fmt("connect failed: %s", strerror(errno)), peer); + close(sockfd); + sockfd = -1; + continue; + } + + break; } + freeaddrinfo(res0); + + bool connected = sockfd != -1; + if ( ! (connected || peer->retry) ) { CloseConnection(peer, false); @@ -3947,9 +3948,7 @@ bool SocketComm::Connect(Peer* peer) if ( connected ) { if ( peer->ssl ) - { peer->io = new ChunkedIOSSL(sockfd, false); - } else peer->io = new ChunkedIOFd(sockfd, "child->peer"); @@ -3964,7 +3963,12 @@ bool SocketComm::Connect(Peer* peer) if ( connected ) { Log("connected", peer); - if ( ! SendToParent(MSG_CONNECTED, peer, 2, peer->ip, peer->port) ) + + uint32 bytes[4]; + peer->ip.CopyIPv6(bytes, IPAddr::Host); + + if ( ! SendToParent(MSG_CONNECTED, peer, 5, bytes[0], bytes[1], + bytes[2], bytes[3], peer->port) ) return false; } @@ -4001,86 +4005,139 @@ bool SocketComm::CloseConnection(Peer* peer, bool reconnect) return true; } -bool SocketComm::Listen(uint32 ip, uint16 port, bool expect_ssl) +bool SocketComm::Listen() { - int* listen_fd = expect_ssl ? &listen_fd_ssl : &listen_fd_clear; + int status, on = 1; + addrinfo hints, *res, *res0; + bzero(&hints, sizeof(hints)); - if ( *listen_fd >= 0 ) - close(*listen_fd); - - struct sockaddr_in server; - - *listen_fd = socket(PF_INET, SOCK_STREAM, 0); - if ( *listen_fd < 0 ) + if ( enable_ipv6 ) { - Error(fmt("can't create listen socket, %s", - strerror(errno))); + if ( listen_if == IPAddr("0.0.0.0") || listen_if == IPAddr("::") ) + hints.ai_family = PF_UNSPEC; + else + hints.ai_family = listen_if.GetFamily() == IPv4 ? PF_INET : PF_INET6; + } + else + hints.ai_family = PF_INET; + + hints.ai_protocol = IPPROTO_TCP; + hints.ai_socktype = SOCK_STREAM; + hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG | AI_NUMERICHOST; + + char port_str[16]; + modp_uitoa10(listen_port, port_str); + + const char* addr_str = 0; + if ( listen_if != IPAddr("0.0.0.0") && listen_if != IPAddr("::") ) + addr_str = listen_if.AsString().c_str(); + + CloseListenFDs(); + + // TODO: better to accept string arguments from the user to pass into + // getaddrinfo? This might make it easier to explicitly bind to a + // non-global IPv6 address with a scope zone identifier (RFC 4007). + if ( (status = getaddrinfo(addr_str, port_str, &hints, &res0)) != 0 ) + { + Error(fmt("getaddrinfo error: %s", gai_strerror(status))); return false; } - // Set SO_REUSEADDR. - int turn_on = 1; - if ( setsockopt(*listen_fd, SOL_SOCKET, SO_REUSEADDR, - &turn_on, sizeof(turn_on)) < 0 ) + for ( res = res0; res; res = res->ai_next ) { - Error(fmt("can't set SO_REUSEADDR, %s", - strerror(errno))); - return false; - } - - bzero(&server, sizeof(server)); - server.sin_family = AF_INET; - server.sin_port = htons(port); - server.sin_addr.s_addr = htonl(ip); - - if ( bind(*listen_fd, (sockaddr*) &server, sizeof(server)) < 0 ) - { - Error(fmt("can't bind to port %d, %s", port, strerror(errno))); - close(*listen_fd); - *listen_fd = -1; - - if ( errno == EADDRINUSE ) + if ( res->ai_family != AF_INET && res->ai_family != AF_INET6 ) { - listen_if = ip; - listen_port = port; - listen_ssl = expect_ssl; - // FIXME: Make this timeout configurable. - listen_next_try = time(0) + 30; + Error(fmt("can't create listen socket: unknown address family, %d", + res->ai_family)); + continue; } - return false; + + IPAddr a = res->ai_family == AF_INET ? + IPAddr(((sockaddr_in*)res->ai_addr)->sin_addr) : + IPAddr(((sockaddr_in6*)res->ai_addr)->sin6_addr); + + int fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); + if ( fd < 0 ) + { + Error(fmt("can't create listen socket, %s", strerror(errno))); + continue; + } + + if ( setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0 ) + Error(fmt("can't set SO_REUSEADDR, %s", strerror(errno))); + + // For IPv6 listening sockets, we don't want do dual binding to also + // get IPv4-mapped addresses because that's not as portable. e.g. + // many BSDs don't allow that. + if ( res->ai_family == AF_INET6 && + setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on)) < 0 ) + Error(fmt("can't set IPV6_V6ONLY, %s", strerror(errno))); + + if ( bind(fd, res->ai_addr, res->ai_addrlen) < 0 ) + { + Error(fmt("can't bind to %s:%s, %s", a.AsURIString().c_str(), + port_str, strerror(errno))); + close(fd); + + if ( errno == EADDRINUSE ) + { + // Abandon completely this attempt to set up listening sockets, + // try again later. + CloseListenFDs(); + listen_next_try = time(0) + bind_retry_interval; + return false; + } + continue; + } + + if ( listen(fd, 50) < 0 ) + { + Error(fmt("can't listen on %s:%s, %s", a.AsURIString().c_str(), + port_str, strerror(errno))); + close(fd); + continue; + } + + listen_fds.push_back(fd); + Log(fmt("listening on %s:%s (%s)", a.AsURIString().c_str(), port_str, + listen_ssl ? "ssl" : "clear")); } - if ( listen(*listen_fd, 50) < 0 ) - { - Error(fmt("can't listen, %s", strerror(errno))); - return false; - } + freeaddrinfo(res0); listen_next_try = 0; - Log(fmt("listening on %s:%d (%s)", - ip2a(ip), port, expect_ssl ? "ssl" : "clear")); - return true; + return listen_fds.size() > 0; } bool SocketComm::AcceptConnection(int fd) { - sockaddr_in client; + sockaddr_storage client; socklen_t len = sizeof(client); int clientfd = accept(fd, (sockaddr*) &client, &len); if ( clientfd < 0 ) { - Error(fmt("accept failed, %s %d", - strerror(errno), errno)); + Error(fmt("accept failed, %s %d", strerror(errno), errno)); + return false; + } + + if ( client.ss_family != AF_INET && client.ss_family != AF_INET6 ) + { + Error(fmt("accept fail, unknown address family %d", client.ss_family)); + close(clientfd); return false; } Peer* peer = new Peer; peer->id = id_counter++; - peer->ip = ntohl(client.sin_addr.s_addr); - peer->port = ntohs(client.sin_port); + peer->ip = client.ss_family == AF_INET ? + IPAddr(((sockaddr_in*)&client)->sin_addr) : + IPAddr(((sockaddr_in6*)&client)->sin6_addr); + peer->port = client.ss_family == AF_INET ? + ntohs(((sockaddr_in*)&client)->sin_port) : + ntohs(((sockaddr_in6*)&client)->sin6_port); peer->connected = true; - peer->ssl = (fd == listen_fd_ssl); + peer->ssl = listen_ssl; peer->compressor = false; if ( peer->ssl ) @@ -4090,8 +4147,7 @@ bool SocketComm::AcceptConnection(int fd) if ( ! peer->io->Init() ) { - Error(fmt("can't init peer io: %s", - peer->io->Error()), false); + Error(fmt("can't init peer io: %s", peer->io->Error()), false); return false; } @@ -4099,7 +4155,11 @@ bool SocketComm::AcceptConnection(int fd) Log(fmt("accepted %s connection", peer->ssl ? "SSL" : "clear"), peer); - if ( ! SendToParent(MSG_CONNECTED, peer, 2, peer->ip, peer->port) ) + uint32 bytes[4]; + peer->ip.CopyIPv6(bytes, IPAddr::Host); + + if ( ! SendToParent(MSG_CONNECTED, peer, 5, bytes[0], bytes[1], bytes[2], + bytes[3], peer->port) ) return false; return true; @@ -4117,12 +4177,19 @@ const char* SocketComm::MakeLogString(const char* msg, Peer* peer) if ( peer ) len = snprintf(buffer, BUFSIZE, "[#%d/%s:%d] ", int(peer->id), - ip2a(peer->ip), peer->port); + peer->ip.AsURIString().c_str(), peer->port); len += safe_snprintf(buffer + len, BUFSIZE - len, "%s", msg); return buffer; } +void SocketComm::CloseListenFDs() + { + for ( size_t i = 0; i < listen_fds.size(); ++i ) + close(listen_fds[i]); + listen_fds.clear(); + } + void SocketComm::Error(const char* msg, bool kill_me) { if ( kill_me ) @@ -4165,7 +4232,7 @@ void SocketComm::Log(const char* msg, Peer* peer) void SocketComm::InternalError(const char* msg) { - fprintf(stderr, "interal error in child: %s\n", msg); + fprintf(stderr, "internal error in child: %s\n", msg); Kill(); } @@ -4180,8 +4247,7 @@ void SocketComm::Kill() LogProf(); Log("terminating"); - close(listen_fd_clear); - close(listen_fd_ssl); + CloseListenFDs(); kill(getpid(), SIGTERM); diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 05d25ca525..f6f94f53d3 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -10,8 +10,7 @@ #include "Stats.h" #include "File.h" -// All IP arguments are in host byte-order. -// FIXME: Change this to network byte order +#include class IncrementalSendTimer; @@ -63,7 +62,8 @@ public: bool CompleteHandshake(PeerID peer); // Start to listen. - bool Listen(const IPAddr& ip, uint16 port, bool expect_ssl); + bool Listen(const IPAddr& ip, uint16 port, bool expect_ssl, bool ipv6, + double retry); // Stop it. bool StopListening(); @@ -179,9 +179,7 @@ protected: struct Peer { PeerID id; // Unique ID (non-zero) per peer. - // ### Fix: currently, we only work for IPv4. - // addr_type ip; - uint32 ip; + IPAddr ip; uint16 port; handler_list handlers; @@ -277,7 +275,7 @@ protected: bool ProcessLogWrite(); bool ProcessRequestLogs(); - Peer* AddPeer(uint32 ip, uint16 port, PeerID id = PEER_NONE); + Peer* AddPeer(const IPAddr& ip, uint16 port, PeerID id = PEER_NONE); Peer* LookupPeer(PeerID id, bool only_if_connected); void RemovePeer(Peer* peer); bool IsConnectedPeer(PeerID id); @@ -412,7 +410,6 @@ protected: { id = 0; io = 0; - ip = 0; port = 0; state = 0; connected = false; @@ -424,7 +421,7 @@ protected: RemoteSerializer::PeerID id; ChunkedIO* io; - uint32 ip; + IPAddr ip; uint16 port; char state; bool connected; @@ -437,7 +434,7 @@ protected: bool compressor; }; - bool Listen(uint32 ip, uint16 port, bool expect_ssl); + bool Listen(); bool AcceptConnection(int listen_fd); bool Connect(Peer* peer); bool CloseConnection(Peer* peer, bool reconnect); @@ -482,6 +479,9 @@ protected: bool ForwardChunkToPeer(); const char* MakeLogString(const char* msg, Peer *peer); + // Closes all file descriptors associated with listening sockets. + void CloseListenFDs(); + // Peers we are communicating with: declare(PList, Peer); typedef PList(Peer) peer_list; @@ -498,14 +498,15 @@ protected: char parent_msgtype; ChunkedIO::Chunk* parent_args; - int listen_fd_clear; - int listen_fd_ssl; + vector listen_fds; // If the port we're trying to bind to is already in use, we will retry // it regularly. - uint32 listen_if; // Fix: only supports IPv4 + IPAddr listen_if; uint16 listen_port; bool listen_ssl; + bool enable_ipv6; // allow IPv6 listen sockets + uint32 bind_retry_interval; time_t listen_next_try; bool shutting_conns_down; bool terminating; diff --git a/src/bro.bif b/src/bro.bif index 15740a83c7..3f4215dc13 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -5402,12 +5402,17 @@ function set_compression_level%(p: event_peer, level: count%) : bool ## ## ssl: If true, Bro uses SSL to encrypt the session. ## +## ipv6: If true, enable listening on IPv6 addresses. +## +## retry_interval: If address *ip* is found to be already in use, this is +## the interval at which to automatically retry binding. +## ## Returns: True on success. ## ## .. bro:see:: connect disconnect -function listen%(ip: addr, p: port, ssl: bool %) : bool +function listen%(ip: addr, p: port, ssl: bool, ipv6: bool, retry_interval: interval%) : bool %{ - return new Val(remote_serializer->Listen(ip->AsAddr(), p->Port(), ssl), TYPE_BOOL); + return new Val(remote_serializer->Listen(ip->AsAddr(), p->Port(), ssl, ipv6, retry_interval), TYPE_BOOL); %} ## Checks whether the last raised event came from a remote peer. diff --git a/testing/btest/Baseline/istate.bro-ipv6/recv..stdout b/testing/btest/Baseline/istate.bro-ipv6/recv..stdout new file mode 100644 index 0000000000..673af68234 --- /dev/null +++ b/testing/btest/Baseline/istate.bro-ipv6/recv..stdout @@ -0,0 +1 @@ +handshake done with peer: ::1 diff --git a/testing/btest/Baseline/istate.bro-ipv6/send..stdout b/testing/btest/Baseline/istate.bro-ipv6/send..stdout new file mode 100644 index 0000000000..fbc855464d --- /dev/null +++ b/testing/btest/Baseline/istate.bro-ipv6/send..stdout @@ -0,0 +1,2 @@ +handshake done with peer: ::1 +my_event: hello world diff --git a/testing/btest/Baseline/istate.events-ssl/events.rec.log b/testing/btest/Baseline/istate.events-ssl/events.rec.log new file mode 100644 index 0000000000..04993fb84a --- /dev/null +++ b/testing/btest/Baseline/istate.events-ssl/events.rec.log @@ -0,0 +1,33 @@ +http_request +http_begin_entity +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_end_entity +http_message_done +http_signature_found +http_reply +http_begin_entity +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_end_entity +http_message_done diff --git a/testing/btest/Baseline/istate.events-ssl/events.snd.log b/testing/btest/Baseline/istate.events-ssl/events.snd.log new file mode 100644 index 0000000000..04993fb84a --- /dev/null +++ b/testing/btest/Baseline/istate.events-ssl/events.snd.log @@ -0,0 +1,33 @@ +http_request +http_begin_entity +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_end_entity +http_message_done +http_signature_found +http_reply +http_begin_entity +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_end_entity +http_message_done diff --git a/testing/btest/Baseline/istate.events-ssl/receiver.http.log b/testing/btest/Baseline/istate.events-ssl/receiver.http.log index 1601f8ad3c..5a7912d23d 100644 --- a/testing/btest/Baseline/istate.events-ssl/receiver.http.log +++ b/testing/btest/Baseline/istate.events-ssl/receiver.http.log @@ -5,4 +5,4 @@ #path http #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1324314406.995958 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1336588614.060989 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - diff --git a/testing/btest/Baseline/istate.events-ssl/sender.http.log b/testing/btest/Baseline/istate.events-ssl/sender.http.log index 1601f8ad3c..5a7912d23d 100644 --- a/testing/btest/Baseline/istate.events-ssl/sender.http.log +++ b/testing/btest/Baseline/istate.events-ssl/sender.http.log @@ -5,4 +5,4 @@ #path http #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1324314406.995958 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1336588614.060989 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - diff --git a/testing/btest/Baseline/istate.events/events.rec.log b/testing/btest/Baseline/istate.events/events.rec.log new file mode 100644 index 0000000000..04993fb84a --- /dev/null +++ b/testing/btest/Baseline/istate.events/events.rec.log @@ -0,0 +1,33 @@ +http_request +http_begin_entity +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_end_entity +http_message_done +http_signature_found +http_reply +http_begin_entity +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_end_entity +http_message_done diff --git a/testing/btest/Baseline/istate.events/events.snd.log b/testing/btest/Baseline/istate.events/events.snd.log new file mode 100644 index 0000000000..04993fb84a --- /dev/null +++ b/testing/btest/Baseline/istate.events/events.snd.log @@ -0,0 +1,33 @@ +http_request +http_begin_entity +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_end_entity +http_message_done +http_signature_found +http_reply +http_begin_entity +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_header +http_all_headers +http_content_type +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_entity_data +http_end_entity +http_message_done diff --git a/testing/btest/Baseline/istate.events/receiver.http.log b/testing/btest/Baseline/istate.events/receiver.http.log index 25a7f289c0..55a0189cec 100644 --- a/testing/btest/Baseline/istate.events/receiver.http.log +++ b/testing/btest/Baseline/istate.events/receiver.http.log @@ -5,4 +5,4 @@ #path http #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1324314415.616486 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1336587178.164598 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - diff --git a/testing/btest/Baseline/istate.events/sender.http.log b/testing/btest/Baseline/istate.events/sender.http.log index 25a7f289c0..55a0189cec 100644 --- a/testing/btest/Baseline/istate.events/sender.http.log +++ b/testing/btest/Baseline/istate.events/sender.http.log @@ -5,4 +5,4 @@ #path http #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1324314415.616486 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1336587178.164598 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - diff --git a/testing/btest/istate/bro-ipv6.bro b/testing/btest/istate/bro-ipv6.bro new file mode 100644 index 0000000000..6230018636 --- /dev/null +++ b/testing/btest/istate/bro-ipv6.bro @@ -0,0 +1,52 @@ +# @TEST-GROUP: comm +# +# @TEST-REQUIRES: ifconfig | grep -q "inet6 ::1" +# +# @TEST-EXEC: btest-bg-run recv bro -b ../recv.bro +# @TEST-EXEC: btest-bg-run send bro -b ../send.bro +# @TEST-EXEC: btest-bg-wait -k 20 +# +# @TEST-EXEC: btest-diff recv/.stdout +# @TEST-EXEC: btest-diff send/.stdout + +@TEST-START-FILE send.bro + +@load base/frameworks/communication + +redef Communication::nodes += { + ["foo"] = [$host=[::1], $connect=T, $events=/my_event/] +}; + +global my_event: event(s: string); + +event remote_connection_handshake_done(p: event_peer) + { + print fmt("handshake done with peer: %s", p$host); + } + +event my_event(s: string) + { + print fmt("my_event: %s", s); + terminate(); + } + +@TEST-END-FILE + +############# + +@TEST-START-FILE recv.bro + +@load frameworks/communication/listen + +redef Communication::listen_ipv6=T; + +global my_event: event(s: string); + +event remote_connection_handshake_done(p: event_peer) + { + print fmt("handshake done with peer: %s", p$host); + event my_event("hello world"); + terminate(); + } + +@TEST-END-FILE diff --git a/testing/btest/istate/events-ssl.bro b/testing/btest/istate/events-ssl.bro index 25aa2dc8fb..c86087df81 100644 --- a/testing/btest/istate/events-ssl.bro +++ b/testing/btest/istate/events-ssl.bro @@ -8,8 +8,10 @@ # @TEST-EXEC: btest-diff receiver/http.log # @TEST-EXEC: cmp sender/http.log receiver/http.log # -# @TEST-EXEC: bro -x sender/events.bst http/base | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log -# @TEST-EXEC: bro -x receiver/events.bst http/base | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log +# @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log +# @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log +# @TEST-EXEC: btest-diff events.rec.log +# @TEST-EXEC: btest-diff events.snd.log # @TEST-EXEC: cmp events.rec.log events.snd.log # # We don't compare the transmitted event paramerters anymore. With the dynamic diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index 81a1d765db..6d8227c810 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -10,6 +10,8 @@ # # @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log # @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log +# @TEST-EXEC: btest-diff events.rec.log +# @TEST-EXEC: btest-diff events.snd.log # @TEST-EXEC: cmp events.rec.log events.snd.log # # We don't compare the transmitted event paramerters anymore. With the dynamic From 8bb62eaaa2df13adf1de74081ca4f8b1dfc66423 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 9 May 2012 16:09:16 -0500 Subject: [PATCH 277/651] Undo communication protocol version bump. Looks like it wasn't necessary because no message between remote peers needed to be changed to support IPv6, just messages between Bro parent and child processes were changed. --- src/RemoteSerializer.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 3abec00f59..0383977de1 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -196,7 +196,7 @@ extern "C" { // Gets incremented each time there's an incompatible change // to the communication internals. -static const unsigned short PROTOCOL_VERSION = 0x08; +static const unsigned short PROTOCOL_VERSION = 0x07; static const char MSG_NONE = 0x00; static const char MSG_VERSION = 0x01; From 2338a322882e966c9f1ce673ae3f180b153f73bd Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 10 May 2012 10:47:39 -0500 Subject: [PATCH 278/651] Remove AI_ADDRCONFIG getaddrinfo hints flag for listening sockets. Because, according to RFC 3493, that will cause getaddrinfo to overlook the ::1 loopback if there's not some other interface with a global IPv6 address. The rationale being that the flag helps prevent unnecessary AAAA lookups, but since I set AI_NUMERICHOST, lookups aren't going to happen anyway. Also update the IPv6 Bro communication test to get it to work more reliably. --- src/RemoteSerializer.cc | 2 +- .../recv..stdout | 0 .../send..stdout | 0 .../btest/istate/{bro-ipv6.bro => bro-ipv6-socket.bro} | 8 ++++++-- 4 files changed, 7 insertions(+), 3 deletions(-) rename testing/btest/Baseline/{istate.bro-ipv6 => istate.bro-ipv6-socket}/recv..stdout (100%) rename testing/btest/Baseline/{istate.bro-ipv6 => istate.bro-ipv6-socket}/send..stdout (100%) rename testing/btest/istate/{bro-ipv6.bro => bro-ipv6-socket.bro} (88%) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 0383977de1..9123e99ef4 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -4023,7 +4023,7 @@ bool SocketComm::Listen() hints.ai_protocol = IPPROTO_TCP; hints.ai_socktype = SOCK_STREAM; - hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG | AI_NUMERICHOST; + hints.ai_flags = AI_PASSIVE | AI_NUMERICHOST; char port_str[16]; modp_uitoa10(listen_port, port_str); diff --git a/testing/btest/Baseline/istate.bro-ipv6/recv..stdout b/testing/btest/Baseline/istate.bro-ipv6-socket/recv..stdout similarity index 100% rename from testing/btest/Baseline/istate.bro-ipv6/recv..stdout rename to testing/btest/Baseline/istate.bro-ipv6-socket/recv..stdout diff --git a/testing/btest/Baseline/istate.bro-ipv6/send..stdout b/testing/btest/Baseline/istate.bro-ipv6-socket/send..stdout similarity index 100% rename from testing/btest/Baseline/istate.bro-ipv6/send..stdout rename to testing/btest/Baseline/istate.bro-ipv6-socket/send..stdout diff --git a/testing/btest/istate/bro-ipv6.bro b/testing/btest/istate/bro-ipv6-socket.bro similarity index 88% rename from testing/btest/istate/bro-ipv6.bro rename to testing/btest/istate/bro-ipv6-socket.bro index 6230018636..ae77a42c54 100644 --- a/testing/btest/istate/bro-ipv6.bro +++ b/testing/btest/istate/bro-ipv6-socket.bro @@ -1,11 +1,11 @@ # @TEST-GROUP: comm # -# @TEST-REQUIRES: ifconfig | grep -q "inet6 ::1" +# @TEST-REQUIRES: ifconfig | grep -q -E "inet6 ::1|inet6 addr: ::1" # # @TEST-EXEC: btest-bg-run recv bro -b ../recv.bro # @TEST-EXEC: btest-bg-run send bro -b ../send.bro # @TEST-EXEC: btest-bg-wait -k 20 -# +# # @TEST-EXEC: btest-diff recv/.stdout # @TEST-EXEC: btest-diff send/.stdout @@ -46,6 +46,10 @@ event remote_connection_handshake_done(p: event_peer) { print fmt("handshake done with peer: %s", p$host); event my_event("hello world"); + } + +event remote_connection_closed(p: event_peer) + { terminate(); } From 98394a698d4a9963718fcbf22994aebd0970b6cf Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 10 May 2012 15:50:19 -0500 Subject: [PATCH 279/651] Generate icmp_error_message event for ICMPv6 error msgs Use the (previously unused) icmp_error_message event for ICMPv6 error messages that don't have a dedicated event. Previously, icmp_sent was being generated, but icmp_error_message contains more info (icmp_sent is still being used as a fallback for other icmp messages that don't have a dedicated event). Also improved documentation comments for all icmp-related events. --- src/ICMP.cc | 9 ++++- src/event.bif | 93 +++++++++++++++++++++++++++------------------------ 2 files changed, 57 insertions(+), 45 deletions(-) diff --git a/src/ICMP.cc b/src/ICMP.cc index dd2108ebf0..2f11337d8a 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -181,7 +181,10 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c case MLD_LISTENER_REDUCTION: #endif default: - ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); + if ( icmpp->icmp_type < 128 ) + Context6(t, icmpp, len, caplen, data, ip_hdr); + else + ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); break; } } @@ -663,6 +666,10 @@ void ICMP_Analyzer::Context6(double t, const struct icmp* icmpp, case ICMP6_PACKET_TOO_BIG: f = icmp_packet_too_big; break; + + default: + f = icmp_error_message; + break; } if ( f ) diff --git a/src/event.bif b/src/event.bif index 5ef3e8f04b..ded054dd53 100644 --- a/src/event.bif +++ b/src/event.bif @@ -762,10 +762,9 @@ event udp_contents%(u: connection, is_orig: bool, contents: string%); ## .. bro:see:: udp_contents udp_reply udp_request event udp_session_done%(u: connection%); -## Generated for all ICMP messages that are not handled separetely with dedicated -## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly -## with dedicated events. This handlers acts as a fallback for those it doesn't. -## The *icmp* record provides more information about the message. +## Generated for all ICMP messages that are not handled separately with dedicated +## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly +## with dedicated events. This event acts as a fallback for those it doesn't. ## ## See `Wikipedia ## `__ for more @@ -776,8 +775,7 @@ event udp_session_done%(u: connection%); ## icmp: Additional ICMP-specific information augmenting the standard ## connection record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_error_message event icmp_sent%(c: connection, icmp: icmp_conn%); ## Generated for ICMP *echo request* messages. @@ -798,8 +796,7 @@ event icmp_sent%(c: connection, icmp: icmp_conn%); ## payload: The message-specific data of the packet payload, i.e., everything after ## the first 8 bytes of the ICMP header. ## -## .. bro:see:: icmp_echo_reply icmp_redirect icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_echo_reply event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); ## Generated for ICMP *echo reply* messages. @@ -820,26 +817,30 @@ event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, ## payload: The message-specific data of the packet payload, i.e., everything after ## the first 8 bytes of the ICMP header. ## -## .. bro:see:: icmp_echo_request icmp_redirect icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_echo_request event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); -## Generated for all ICMP error messages that are not handled separately with dedicated -## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly -## with dedicated events. This handler acts as a fallback for those it doesn't. -## The *icmp* record provides more information about the message. +## Generated for all ICMPv6 error messages that are not handled +## separately with dedicated events. Bro's ICMP analyzer handles a number +## of ICMP error messages directly with dedicated events. This event acts +## as a fallback for those it doesn't. ## ## See `Wikipedia -## `__ for more -## information about the ICMP protocol. +## `__ for more +## information about the ICMPv6 protocol. ## ## c: The connection record for the corresponding ICMP flow. ## ## icmp: Additional ICMP-specific information augmenting the standard ## connection record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect -## icmp_time_exceeded icmp_unreachable +## code: The ICMP code of the error message. +## +## context: A record with specifics of the original packet that the message refers +## to. +## +## .. bro:see:: icmp_unreachable icmp_packet_too_big +## icmp_time_exceeded icmp_parameter_problem event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); ## Generated for ICMP *destination unreachable* messages. @@ -861,15 +862,15 @@ event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: ## that if the *unreachable* includes only a partial IP header for some reason, no ## fields of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_time_exceeded +## .. bro:see:: icmp_error_message icmp_packet_too_big +## icmp_time_exceeded icmp_parameter_problem event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -## Generated for ICMP *packet too big* messages. +## Generated for ICMPv6 *packet too big* messages. ## ## See `Wikipedia -## `__ for more -## information about the ICMP protocol. +## `__ for more +## information about the ICMPv6 protocol. ## ## c: The connection record for the corresponding ICMP flow. ## @@ -884,8 +885,8 @@ event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: ic ## that if the *too big* includes only a partial IP header for some reason, no ## fields of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_time_exceeded +## .. bro:see:: icmp_error_message icmp_unreachable +## icmp_time_exceeded icmp_parameter_problem event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); ## Generated for ICMP *time exceeded* messages. @@ -907,15 +908,15 @@ event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: ## if the *exceeded* includes only a partial IP header for some reason, no fields ## of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_unreachable +## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big +## icmp_parameter_problem event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -## Generated for ICMP *parameter problem* messages. +## Generated for ICMPv6 *parameter problem* messages. ## ## See `Wikipedia -## `__ for more -## information about the ICMP protocol. +## `__ for more +## information about the ICMPv6 protocol. ## ## c: The connection record for the corresponding ICMP flow. ## @@ -930,8 +931,8 @@ event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: ## if the *parameter problem* includes only a partial IP header for some reason, no fields ## of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_unreachable +## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big +## icmp_time_exceeded event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); ## Generated for ICMP *router solicitation* messages. @@ -945,8 +946,8 @@ event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, conte ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_router_advertisement +## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); ## Generated for ICMP *router advertisement* messages. @@ -975,8 +976,14 @@ event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); ## ## rsv: Remaining two reserved bits of router advertisement flags. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## router_lifetime: How long this router should be used as a default router. +## +## reachable_time: How long a neighbor should be considered reachable. +## +## retrans_timer: How long a host should wait before retransmitting. +## +## .. bro:see:: icmp_router_solicitation +## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval%); ## Generated for ICMP *neighbor solicitation* messages. @@ -992,8 +999,8 @@ event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: ## ## tgt: The IP address of the target of the solicitation. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_advertisement icmp_redirect event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); ## Generated for ICMP *neighbor advertisement* messages. @@ -1016,8 +1023,8 @@ event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); ## tgt: the Target Address in the soliciting message or the address whose ## link-layer address has changed for unsolicited adverts. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_solicitation icmp_redirect event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt:addr%); ## Generated for ICMP *redirect* messages. @@ -1036,10 +1043,8 @@ event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, ## ## dest: The address of the destination which is redirected to the target. ## -## a: The new destination address the message is redirecting to. -## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_solicitation icmp_neighbor_advertisement event icmp_redirect%(c: connection, icmp: icmp_conn, tgt: addr, dest: addr%); ## Generated when a TCP connection terminated, passing on statistics about the From 508d39457a7b9ea1c63acb71f753520c0b01eac6 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 11 May 2012 17:09:01 -0500 Subject: [PATCH 280/651] Update tests (use weird.log instead of stderr) --- testing/btest/Baseline/core.checksums/bad.out | 96 ++++++++++++++++--- .../btest/Baseline/core.checksums/good.out | 59 +++++++++++- .../Baseline/core.disable-mobile-ipv6/output | 1 - .../core.disable-mobile-ipv6/weird.log | 8 ++ testing/btest/Baseline/core.truncation/output | 27 +++++- testing/btest/core/checksums.test | 57 +++++++---- testing/btest/core/disable-mobile-ipv6.test | 4 +- testing/btest/core/truncation.test | 9 +- 8 files changed, 217 insertions(+), 44 deletions(-) delete mode 100644 testing/btest/Baseline/core.disable-mobile-ipv6/output create mode 100644 testing/btest/Baseline/core.disable-mobile-ipv6/weird.log diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index 57089a72a6..44a27f7f0f 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -1,13 +1,83 @@ -1332784981.078396 weird: bad_IP_checksum -1332784885.686428 weird: bad_TCP_checksum -1332784933.501023 weird: bad_UDP_checksum -1334075363.536871 weird: bad_ICMP_checksum -1332785210.013051 weird: routing0_hdr -1332785210.013051 weird: bad_TCP_checksum -1332782580.798420 weird: routing0_hdr -1332782580.798420 weird: bad_UDP_checksum -1334075111.800086 weird: routing0_hdr -1334075111.800086 weird: bad_ICMP_checksum -1332785250.469132 weird: bad_TCP_checksum -1332781342.923813 weird: bad_UDP_checksum -1334074939.467194 weird: bad_ICMP_checksum +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332784981.078396 - - - - - bad_IP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332784885.686428 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332784933.501023 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 13000 bad_UDP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075363.536871 UWkUyAuUGXf 192.168.1.100 8 192.168.1.101 0 bad_ICMP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332785210.013051 - - - - - routing0_hdr - F bro +1332785210.013051 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332782580.798420 - - - - - routing0_hdr - F bro +1332782580.798420 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 13000 bad_UDP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075111.800086 - - - - - routing0_hdr - F bro +1334075111.800086 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:78:1:32::1 129 bad_ICMP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332785250.469132 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332781342.923813 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro diff --git a/testing/btest/Baseline/core.checksums/good.out b/testing/btest/Baseline/core.checksums/good.out index 4330967d8d..0010974b7f 100644 --- a/testing/btest/Baseline/core.checksums/good.out +++ b/testing/btest/Baseline/core.checksums/good.out @@ -1,3 +1,56 @@ -1332785125.596793 weird: routing0_hdr -1332782508.592037 weird: routing0_hdr -1334075027.053380 weird: routing0_hdr +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332785125.596793 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332782508.592037 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/output b/testing/btest/Baseline/core.disable-mobile-ipv6/output deleted file mode 100644 index b156353f74..0000000000 --- a/testing/btest/Baseline/core.disable-mobile-ipv6/output +++ /dev/null @@ -1 +0,0 @@ -1333663011.602839 weird: unknown_protocol_135 diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log new file mode 100644 index 0000000000..478cfe8667 --- /dev/null +++ b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333663011.602839 - - - - - unknown_protocol_135 - F bro diff --git a/testing/btest/Baseline/core.truncation/output b/testing/btest/Baseline/core.truncation/output index ba8d3eedee..f3d64b8b28 100644 --- a/testing/btest/Baseline/core.truncation/output +++ b/testing/btest/Baseline/core.truncation/output @@ -1,3 +1,24 @@ -1334160095.895421 weird: truncated_IP -1334156241.519125 weird: truncated_IP -1334094648.590126 weird: truncated_IP +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334160095.895421 - - - - - truncated_IP - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334156241.519125 - - - - - truncated_IP - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334094648.590126 - - - - - truncated_IP - F bro diff --git a/testing/btest/core/checksums.test b/testing/btest/core/checksums.test index f5b3230686..77fe2a62d3 100644 --- a/testing/btest/core/checksums.test +++ b/testing/btest/core/checksums.test @@ -1,23 +1,42 @@ -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-icmp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-icmp6-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-icmp6-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-bad-chksum.pcap +# @TEST-EXEC: mv weird.log bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-icmp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-icmp6-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-icmp6-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-tcp-good-chksum.pcap +# @TEST-EXEC: mv weird.log good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-udp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-icmp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-icmp6-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-tcp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-udp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-icmp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-icmp6-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: btest-diff bad.out # @TEST-EXEC: btest-diff good.out diff --git a/testing/btest/core/disable-mobile-ipv6.test b/testing/btest/core/disable-mobile-ipv6.test index 84dc43dae8..5151a12b38 100644 --- a/testing/btest/core/disable-mobile-ipv6.test +++ b/testing/btest/core/disable-mobile-ipv6.test @@ -1,6 +1,6 @@ # @TEST-REQUIRES: grep -q "#undef ENABLE_MOBILE_IPV6" $BUILD/config.h -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output +# @TEST-EXEC: bro -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT +# @TEST-EXEC: btest-diff weird.log event mobile_ipv6_message(p: pkt_hdr) { diff --git a/testing/btest/core/truncation.test b/testing/btest/core/truncation.test index 16a60fe6db..ee8bdd5bf9 100644 --- a/testing/btest/core/truncation.test +++ b/testing/btest/core/truncation.test @@ -1,6 +1,9 @@ # Truncated IP packet's should not be analyzed, and generate truncated_IP weird -# @TEST-EXEC: bro -b -r $TRACES/trunc/ip4-trunc.pcap >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/trunc/ip6-trunc.pcap >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/trunc/ip6-ext-trunc.pcap >>output 2>&1 +# @TEST-EXEC: bro -r $TRACES/trunc/ip4-trunc.pcap +# @TEST-EXEC: mv weird.log output +# @TEST-EXEC: bro -r $TRACES/trunc/ip6-trunc.pcap +# @TEST-EXEC: cat weird.log >> output +# @TEST-EXEC: bro -r $TRACES/trunc/ip6-ext-trunc.pcap +# @TEST-EXEC: cat weird.log >> output # @TEST-EXEC: btest-diff output From d3ea3127822e2c51edbba02a88dc49a591ef9d11 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 11 May 2012 17:16:57 -0500 Subject: [PATCH 281/651] Add unit tests for Broccoli SSL and Broccoli IPv6 connectivity. --- .../istate.broccoli-ipv6-socket/bro..stdout | 9 +++ .../broccoli..stdout | 6 ++ .../Baseline/istate.broccoli-ssl/bro..stdout | 9 +++ .../istate.broccoli-ssl/broccoli..stdout | 6 ++ testing/btest/istate/broccoli-ipv6-socket.bro | 10 +++ testing/btest/istate/broccoli-ipv6.bro | 8 +-- testing/btest/istate/broccoli-ssl.bro | 68 +++++++++++++++++++ 7 files changed, 109 insertions(+), 7 deletions(-) create mode 100644 testing/btest/Baseline/istate.broccoli-ipv6-socket/bro..stdout create mode 100644 testing/btest/Baseline/istate.broccoli-ipv6-socket/broccoli..stdout create mode 100644 testing/btest/Baseline/istate.broccoli-ssl/bro..stdout create mode 100644 testing/btest/Baseline/istate.broccoli-ssl/broccoli..stdout create mode 100644 testing/btest/istate/broccoli-ipv6-socket.bro create mode 100644 testing/btest/istate/broccoli-ssl.bro diff --git a/testing/btest/Baseline/istate.broccoli-ipv6-socket/bro..stdout b/testing/btest/Baseline/istate.broccoli-ipv6-socket/bro..stdout new file mode 100644 index 0000000000..0a7bac52c5 --- /dev/null +++ b/testing/btest/Baseline/istate.broccoli-ipv6-socket/bro..stdout @@ -0,0 +1,9 @@ +handshake done with peer +bro_addr(1.2.3.4) +bro_subnet(10.0.0.0/16) +bro_addr(2607:f8b0:4009:802::1014) +bro_subnet(2607:f8b0::/32) +broccoli_addr(1.2.3.4) +broccoli_subnet(10.0.0.0/16) +broccoli_addr(2607:f8b0:4009:802::1014) +broccoli_subnet(2607:f8b0::/32) diff --git a/testing/btest/Baseline/istate.broccoli-ipv6-socket/broccoli..stdout b/testing/btest/Baseline/istate.broccoli-ipv6-socket/broccoli..stdout new file mode 100644 index 0000000000..dba9318891 --- /dev/null +++ b/testing/btest/Baseline/istate.broccoli-ipv6-socket/broccoli..stdout @@ -0,0 +1,6 @@ +Connected to Bro instance at: ::1:47757 +Received bro_addr(1.2.3.4) +Received bro_subnet(10.0.0.0/16) +Received bro_addr(2607:f8b0:4009:802::1014) +Received bro_subnet(2607:f8b0::/32) +Terminating diff --git a/testing/btest/Baseline/istate.broccoli-ssl/bro..stdout b/testing/btest/Baseline/istate.broccoli-ssl/bro..stdout new file mode 100644 index 0000000000..0a7bac52c5 --- /dev/null +++ b/testing/btest/Baseline/istate.broccoli-ssl/bro..stdout @@ -0,0 +1,9 @@ +handshake done with peer +bro_addr(1.2.3.4) +bro_subnet(10.0.0.0/16) +bro_addr(2607:f8b0:4009:802::1014) +bro_subnet(2607:f8b0::/32) +broccoli_addr(1.2.3.4) +broccoli_subnet(10.0.0.0/16) +broccoli_addr(2607:f8b0:4009:802::1014) +broccoli_subnet(2607:f8b0::/32) diff --git a/testing/btest/Baseline/istate.broccoli-ssl/broccoli..stdout b/testing/btest/Baseline/istate.broccoli-ssl/broccoli..stdout new file mode 100644 index 0000000000..481778c98a --- /dev/null +++ b/testing/btest/Baseline/istate.broccoli-ssl/broccoli..stdout @@ -0,0 +1,6 @@ +Connected to Bro instance at: localhost:47757 +Received bro_addr(1.2.3.4) +Received bro_subnet(10.0.0.0/16) +Received bro_addr(2607:f8b0:4009:802::1014) +Received bro_subnet(2607:f8b0::/32) +Terminating diff --git a/testing/btest/istate/broccoli-ipv6-socket.bro b/testing/btest/istate/broccoli-ipv6-socket.bro new file mode 100644 index 0000000000..e36ac9e9f7 --- /dev/null +++ b/testing/btest/istate/broccoli-ipv6-socket.bro @@ -0,0 +1,10 @@ +# @TEST-GROUP: comm +# +# @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib +# @TEST-REQUIRES: ifconfig | grep -q -E "inet6 ::1|inet6 addr: ::1" +# +# @TEST-EXEC: btest-bg-run bro bro $DIST/aux/broccoli/test/broccoli-v6addrs.bro "Communication::listen_ipv6=T" +# @TEST-EXEC: btest-bg-run broccoli $BUILD/aux/broccoli/test/broccoli-v6addrs -6 ::1 +# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-diff bro/.stdout +# @TEST-EXEC: btest-diff broccoli/.stdout diff --git a/testing/btest/istate/broccoli-ipv6.bro b/testing/btest/istate/broccoli-ipv6.bro index b7ab5bdb05..415c8bb2d2 100644 --- a/testing/btest/istate/broccoli-ipv6.bro +++ b/testing/btest/istate/broccoli-ipv6.bro @@ -2,14 +2,8 @@ # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # -# @TEST-EXEC: btest-bg-run bro bro %INPUT $DIST/aux/broccoli/test/broccoli-v6addrs.bro +# @TEST-EXEC: btest-bg-run bro bro $DIST/aux/broccoli/test/broccoli-v6addrs.bro # @TEST-EXEC: btest-bg-run broccoli $BUILD/aux/broccoli/test/broccoli-v6addrs # @TEST-EXEC: btest-bg-wait -k 20 # @TEST-EXEC: btest-diff bro/.stdout # @TEST-EXEC: btest-diff broccoli/.stdout - -event remote_connection_closed(p: event_peer) - { - terminate(); - } - diff --git a/testing/btest/istate/broccoli-ssl.bro b/testing/btest/istate/broccoli-ssl.bro new file mode 100644 index 0000000000..61401c483a --- /dev/null +++ b/testing/btest/istate/broccoli-ssl.bro @@ -0,0 +1,68 @@ +# @TEST-GROUP: comm +# +# @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib +# +# @TEST-EXEC: chmod 600 broccoli.conf +# @TEST-EXEC: btest-bg-run bro bro $DIST/aux/broccoli/test/broccoli-v6addrs.bro "Communication::listen_ssl=T" "ssl_ca_certificate=../ca_cert.pem" "ssl_private_key=../bro.pem" +# @TEST-EXEC: btest-bg-run broccoli BROCCOLI_CONFIG_FILE=../broccoli.conf $BUILD/aux/broccoli/test/broccoli-v6addrs +# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-diff bro/.stdout +# @TEST-EXEC: btest-diff broccoli/.stdout + +@TEST-START-FILE broccoli.conf +/broccoli/use_ssl yes +/broccoli/ca_cert ../ca_cert.pem +/broccoli/host_cert ../bro.pem +/broccoli/host_key ../bro.pem +@TEST-END-FILE + +@TEST-START-FILE bro.pem +-----BEGIN RSA PRIVATE KEY----- +MIICXgIBAAKBgQD17FE8UVaO224Y8UL2bH1okCYxr5dVytTQ93uE5J9caGADzPZe +qYPuvtPt9ivhBtf2L9odK7unQU60v6RsO3bb9bQktQbEdh0FEjnso2UHe/nLreYn +VyLCEp9Sh1OFQnMhJNYuzNwVzWOqH/TYNy3ODueZTS4YBsRyEkpEfgeoaQIDAQAB +AoGAJ/S1Xi94+Mz+Hl9UmeUWmx6QlhIJbI7/9NPA5d6fZcwvjW6HuOmh3fBzTn5o +sq8B96Xesk6gtpQNzaA1fsBKlzDSpGRDVg2odN9vIT3jd0Dub2F47JHdFCqtMUIV +rCsO+fpGtavv1zJ/rzlJz7rx4cRP+/Gwd5YlH0q5cFuHhAECQQD9q328Ye4A7o2e +cLOhzuWUZszqdIY7ZTgDtk06F57VrjLVERrZjrtAwbs77m+ybw4pDKKU7H5inhQQ +03PU40ARAkEA+C6cCM6E4hRwuR+QyIqpNC4CzgPaKlF+VONZLYYvHEwFvx2/EPtX +zOZdE4HdJwnXBYx7+AGFeq8uHhrN2Tq62QJBAMory2JAinejqKsGF6R2SPMlm1ug +0vqziRksShBqkuSqmUjHASczYnoR7S+usMb9S8PblhgrA++FHWjrnf2lwIECQQCj ++/AfpY2J8GWW/HNm/q/UiX5S75qskZI+tsXK3bmtIdI+OIJxzxFxktj3NbyRud+4 +i92xvhebO7rmK2HOYg7pAkEA2wrwY1E237twoYXuUInv9F9kShKLQs19nup/dfmF +xfoVqYjJwidzPfgngowJZij7SoTaIBKv/fKp5Tq6xW3AEg== +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICZDCCAc2gAwIBAgIJAKoxR9yFGsk8MA0GCSqGSIb3DQEBBQUAMCsxKTAnBgNV +BAMTIEJybyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MCAXDTExMDYxNTIx +MjgxNVoYDzIxMTEwNTIyMjEyODE1WjArMSkwJwYDVQQDEyBCcm8gUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +9exRPFFWjttuGPFC9mx9aJAmMa+XVcrU0Pd7hOSfXGhgA8z2XqmD7r7T7fYr4QbX +9i/aHSu7p0FOtL+kbDt22/W0JLUGxHYdBRI57KNlB3v5y63mJ1ciwhKfUodThUJz +ISTWLszcFc1jqh/02Dctzg7nmU0uGAbEchJKRH4HqGkCAwEAAaOBjTCBijAdBgNV +HQ4EFgQU2vIsKYuGhHP8c7GeJLfWAjbKCFgwWwYDVR0jBFQwUoAU2vIsKYuGhHP8 +c7GeJLfWAjbKCFihL6QtMCsxKTAnBgNVBAMTIEJybyBSb290IENlcnRpZmljYXRp +b24gQXV0aG9yaXR5ggkAqjFH3IUayTwwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B +AQUFAAOBgQAF2oceL61dA7WxA9lxcxsA/Fccr7+J6sO+pLXoZtx5tpknEuIUebkm +UfMGAiyYIenHi8u0Sia8KrIfuCDc2dG3DYmfX7/faCEbtSx8KtNQFIs3aXr1zhsw +3sX9fLS0gp/qHoPMuhbhlvTlMFSE/Mih3KDsZEGcifzI6ooLF0YP5A== +-----END CERTIFICATE----- +@TEST-END-FILE + +@TEST-START-FILE ca_cert.pem +-----BEGIN CERTIFICATE----- +MIICZDCCAc2gAwIBAgIJAKoxR9yFGsk8MA0GCSqGSIb3DQEBBQUAMCsxKTAnBgNV +BAMTIEJybyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MCAXDTExMDYxNTIx +MjgxNVoYDzIxMTEwNTIyMjEyODE1WjArMSkwJwYDVQQDEyBCcm8gUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +9exRPFFWjttuGPFC9mx9aJAmMa+XVcrU0Pd7hOSfXGhgA8z2XqmD7r7T7fYr4QbX +9i/aHSu7p0FOtL+kbDt22/W0JLUGxHYdBRI57KNlB3v5y63mJ1ciwhKfUodThUJz +ISTWLszcFc1jqh/02Dctzg7nmU0uGAbEchJKRH4HqGkCAwEAAaOBjTCBijAdBgNV +HQ4EFgQU2vIsKYuGhHP8c7GeJLfWAjbKCFgwWwYDVR0jBFQwUoAU2vIsKYuGhHP8 +c7GeJLfWAjbKCFihL6QtMCsxKTAnBgNVBAMTIEJybyBSb290IENlcnRpZmljYXRp +b24gQXV0aG9yaXR5ggkAqjFH3IUayTwwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B +AQUFAAOBgQAF2oceL61dA7WxA9lxcxsA/Fccr7+J6sO+pLXoZtx5tpknEuIUebkm +UfMGAiyYIenHi8u0Sia8KrIfuCDc2dG3DYmfX7/faCEbtSx8KtNQFIs3aXr1zhsw +3sX9fLS0gp/qHoPMuhbhlvTlMFSE/Mih3KDsZEGcifzI6ooLF0YP5A== +-----END CERTIFICATE----- +@TEST-END-FILE From 751cc1cd5105014c307d15602493e1801d636803 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 14 May 2012 18:34:25 -0500 Subject: [PATCH 282/651] Fix typos and improve INSTALL document Updated the list of required and optional libraries and tools. Rewrote the paragraph about the "aux" tools to more accurately describe what actually gets installed. --- INSTALL | 56 ++++++++++++++++++++++++++++++++----------------------- configure | 2 +- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/INSTALL b/INSTALL index d4aa93d11f..d9f7963ec4 100644 --- a/INSTALL +++ b/INSTALL @@ -5,34 +5,44 @@ Installing Bro Prerequisites ============= -Bro relies on the following libraries and tools, which need to be installed +Bro requires the following libraries and tools to be installed before you begin: * CMake 2.6.3 or greater http://www.cmake.org - * Libpcap (headers and libraries) http://www.tcpdump.org + * Perl (used only during the Bro build process) - * OpenSSL (headers and libraries) http://www.openssl.org + * Libpcap headers and libraries http://www.tcpdump.org - * SWIG http://www.swig.org + * OpenSSL headers and libraries http://www.openssl.org + + * BIND8 headers and libraries * Libmagic * Libz -Bro can make uses of some optional libraries if they are found at -installation time: + * SWIG http://www.swig.org - * LibGeoIP For geo-locating IP addresses. - -Bro also needs the following tools, but on most systems they will -already come preinstalled: - - * Bash (For Bro Control). - * BIND8 (headers and libraries) * Bison (GNU Parser Generator) + * Flex (Fast Lexical Analyzer) - * Perl (Used only during the Bro build process) + + * Bash (for BroControl) + + +Bro can make use of some optional libraries and tools if they are found at +build time: + + * LibGeoIP (for geo-locating IP addresses) + + * gperftools (tcmalloc is used to improve memory and CPU usage) + + * sendmail (for BroControl) + + * ipsumdump (for trace-summary) http://www.cs.ucla.edu/~kohler/ipsumdump + + * Ruby executable, library, and headers (for Broccoli Ruby bindings) Installation @@ -44,7 +54,7 @@ To build and install into ``/usr/local/bro``:: make make install -This will first build Bro into a directory inside the distribution +This will first build Bro in a directory inside the distribution called ``build/``, using default build options. It then installs all required files into ``/usr/local/bro``, including the Bro binary in ``/usr/local/bro/bin/bro``. @@ -60,22 +70,22 @@ choices unless you are creating such a package. Run ``./configure --help`` for more options. Depending on the Bro package you downloaded, there may be auxiliary -tools and libraries available in the ``aux/`` directory. All of them -except for ``aux/bro-aux`` will also be built and installed by doing -``make install``. To install the programs that come in the -``aux/bro-aux`` directory, use ``make install-aux``. There are +tools and libraries available in the ``aux/`` directory. Some of them +will be automatically built and installed along with Bro. There are ``--disable-*`` options that can be given to the configure script to -turn off unwanted auxiliary projects. +turn off unwanted auxiliary projects that would otherwise be installed +automatically. Finally, use ``make install-aux`` to install some of +the other programs that are in the ``aux/bro-aux`` directory. -OpenBSD users, please see our `FAQ -` if you are having +OpenBSD users, please see our FAQ at +http://www.bro-ids.org/documentation/faq.html if you are having problems installing Bro. Running Bro =========== Bro is a complex program and it takes a bit of time to get familiar -with it. A good place for newcomers to start is the Quickstart Guide +with it. A good place for newcomers to start is the Quick Start Guide at http://www.bro-ids.org/documentation/quickstart.html. For developers that wish to run Bro directly from the ``build/`` diff --git a/configure b/configure index b6af505ffe..85b6af2d7d 100755 --- a/configure +++ b/configure @@ -32,7 +32,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --enable-perftools-debug use Google's perftools for debugging --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl - --disable-auxtools don't build or install auxilliary tools + --disable-auxtools don't build or install auxiliary tools --disable-python don't try to build python bindings for broccoli --disable-ruby don't try to build ruby bindings for broccoli From c0257c55213237caa1fe5972f5db7ecb8a0b8308 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 14 May 2012 16:50:48 -0700 Subject: [PATCH 283/651] Switching to new btest command TEST-SERIALIZE for communication tests. --- testing/btest/istate/broccoli-ipv6.bro | 2 +- testing/btest/istate/broccoli.bro | 2 +- testing/btest/istate/events-ssl.bro | 2 +- testing/btest/istate/events.bro | 2 +- testing/btest/istate/pybroccoli.py | 2 +- testing/btest/istate/sync.bro | 2 +- testing/btest/scripts/base/frameworks/cluster/start-it-up.bro | 2 +- .../frameworks/communication/communication_log_baseline.bro | 2 +- .../scripts/base/frameworks/control/configuration_update.bro | 2 +- testing/btest/scripts/base/frameworks/control/id_value.bro | 2 +- testing/btest/scripts/base/frameworks/control/shutdown.bro | 2 +- testing/btest/scripts/base/frameworks/logging/remote-types.bro | 2 +- testing/btest/scripts/base/frameworks/logging/remote.bro | 2 +- testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro | 2 +- .../base/frameworks/metrics/cluster-intermediate-update.bro | 2 +- testing/btest/scripts/base/frameworks/notice/cluster.bro | 2 +- .../scripts/base/frameworks/notice/suppression-cluster.bro | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/testing/btest/istate/broccoli-ipv6.bro b/testing/btest/istate/broccoli-ipv6.bro index b7ab5bdb05..cd0b546ce7 100644 --- a/testing/btest/istate/broccoli-ipv6.bro +++ b/testing/btest/istate/broccoli-ipv6.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # diff --git a/testing/btest/istate/broccoli.bro b/testing/btest/istate/broccoli.bro index 235ff9119c..2bae5dc080 100644 --- a/testing/btest/istate/broccoli.bro +++ b/testing/btest/istate/broccoli.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # diff --git a/testing/btest/istate/events-ssl.bro b/testing/btest/istate/events-ssl.bro index 25aa2dc8fb..03784addef 100644 --- a/testing/btest/istate/events-ssl.bro +++ b/testing/btest/istate/events-ssl.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index 81a1d765db..81d9cc61b6 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro diff --git a/testing/btest/istate/pybroccoli.py b/testing/btest/istate/pybroccoli.py index 1a5830b41a..9f26efca31 100644 --- a/testing/btest/istate/pybroccoli.py +++ b/testing/btest/istate/pybroccoli.py @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/bindings/broccoli-python/_broccoli_intern.so diff --git a/testing/btest/istate/sync.bro b/testing/btest/istate/sync.bro index db5ea0bbb4..776ddfd2fa 100644 --- a/testing/btest/istate/sync.bro +++ b/testing/btest/istate/sync.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro %INPUT ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro %INPUT ../receiver.bro diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro index d7b552d962..a1069d1bd0 100644 --- a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro +++ b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro b/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro index 85b23dbdc0..3d80ef7777 100644 --- a/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro +++ b/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run receiver bro -b ../receiver.bro # @TEST-EXEC: btest-bg-run sender bro -b ../sender.bro diff --git a/testing/btest/scripts/base/frameworks/control/configuration_update.bro b/testing/btest/scripts/base/frameworks/control/configuration_update.bro index d09105ca7a..920a162503 100644 --- a/testing/btest/scripts/base/frameworks/control/configuration_update.bro +++ b/testing/btest/scripts/base/frameworks/control/configuration_update.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Communication::listen_port=65531/tcp # @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT test-redef frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=configuration_update diff --git a/testing/btest/scripts/base/frameworks/control/id_value.bro b/testing/btest/scripts/base/frameworks/control/id_value.bro index 7235521034..c5d1d063f5 100644 --- a/testing/btest/scripts/base/frameworks/control/id_value.bro +++ b/testing/btest/scripts/base/frameworks/control/id_value.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT only-for-controllee frameworks/control/controllee Communication::listen_port=65532/tcp # @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65532/tcp Control::cmd=id_value Control::arg=test_var diff --git a/testing/btest/scripts/base/frameworks/control/shutdown.bro b/testing/btest/scripts/base/frameworks/control/shutdown.bro index ec1ca6da16..7b6e5713f8 100644 --- a/testing/btest/scripts/base/frameworks/control/shutdown.bro +++ b/testing/btest/scripts/base/frameworks/control/shutdown.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Communication::listen_port=65530/tcp # @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65530/tcp Control::cmd=shutdown diff --git a/testing/btest/scripts/base/frameworks/logging/remote-types.bro b/testing/btest/scripts/base/frameworks/logging/remote-types.bro index ce93495bc8..f1ef4f0c31 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote-types.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote-types.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro diff --git a/testing/btest/scripts/base/frameworks/logging/remote.bro b/testing/btest/scripts/base/frameworks/logging/remote.bro index bb1e5b8ce3..8375d7915a 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro # @TEST-EXEC: sleep 1 diff --git a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro index b801074b33..09479b7a2f 100644 --- a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro +++ b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro index 701d2ea378..654e42976a 100644 --- a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro +++ b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/notice/cluster.bro b/testing/btest/scripts/base/frameworks/notice/cluster.bro index 97470eaa7f..8d54a27eaf 100644 --- a/testing/btest/scripts/base/frameworks/notice/cluster.bro +++ b/testing/btest/scripts/base/frameworks/notice/cluster.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro index d56d940e8e..b812c6451d 100644 --- a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro +++ b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT From 0197a9a55144a1f5f2463b2da9038fd34fd669c7 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 14 May 2012 17:52:00 -0700 Subject: [PATCH 284/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- aux/btest | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/broctl b/aux/broctl index 76e6bd4b18..5137c63751 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 76e6bd4b182e9ff43456890e08aeaf451f9e4615 +Subproject commit 5137c6375162f121348095205aaaec04a86de632 diff --git a/aux/btest b/aux/btest index 054d656aa3..76876ce0e7 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 054d656aa3c3827762c07bf62f31e0930fb84a0c +Subproject commit 76876ce0e7da4888c91b3aea024c5cfd36405310 From 99f59dc5c0b100208e378a1cdfa7d38df0650f2f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 14 May 2012 18:17:03 -0700 Subject: [PATCH 285/651] Quieting external test output somehwat. --- VERSION | 2 +- testing/external/Makefile | 4 ++-- testing/external/scripts/update-traces | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/VERSION b/VERSION index 88c99ebbf5..b59cb1ff9b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-336 +2.0-338 diff --git a/testing/external/Makefile b/testing/external/Makefile index b705734003..9715b3d669 100644 --- a/testing/external/Makefile +++ b/testing/external/Makefile @@ -6,11 +6,11 @@ DIAG=diag.log all: @rm -f $(DIAG) - @for repo in $(REPOS); do (cd $$repo && make ); done + @for repo in $(REPOS); do (cd $$repo && make -s ); done brief: @rm -f $(DIAG) - @for repo in $(REPOS); do (cd $$repo && make brief ); done + @for repo in $(REPOS); do (cd $$repo && make -s brief ); done init: git clone $(PUBLIC_REPO) diff --git a/testing/external/scripts/update-traces b/testing/external/scripts/update-traces index 8c27fb055e..8dd8d09e9c 100755 --- a/testing/external/scripts/update-traces +++ b/testing/external/scripts/update-traces @@ -69,9 +69,9 @@ cat $cfg | while read line; do eval "$proxy curl $auth -f --anyauth $url -o $file" echo mv $fp.tmp $fp - else - echo "`basename $file` already available." - fi + #else + # echo "`basename $file` already available." + fi rm -f $fp.tmp From 00b592f933b69079afcab527c8be5387b625fddd Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 14 May 2012 21:38:27 -0700 Subject: [PATCH 286/651] Adding format specifier to DS spec to print out double as %.6f. --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- aux/btest | 2 +- cmake | 2 +- src/logging/writers/DataSeries.cc | 7 ++++--- 7 files changed, 10 insertions(+), 9 deletions(-) diff --git a/aux/binpac b/aux/binpac index 56ae73ab99..dd1a3a95f0 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 56ae73ab995dda665d8918d1a6b3af39b15991e3 +Subproject commit dd1a3a95f07082efcd5274b21104a038d523d132 diff --git a/aux/bro-aux b/aux/bro-aux index 12d32194c1..a59b35bdad 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 12d32194c19d2dce06818588a2aeccf234de1889 +Subproject commit a59b35bdada8f70fb1a59bf7bb2976534c86d378 diff --git a/aux/broccoli b/aux/broccoli index 60898666ba..a4046c2f79 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 60898666ba1df1913c08ad5045b1e56f974060cc +Subproject commit a4046c2f79b6ab0ac19ae8be94b79c6ce578bea7 diff --git a/aux/broctl b/aux/broctl index d50e0efe13..c86b7e990b 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit d50e0efe133c50d824753c86d068467e54a3c47d +Subproject commit c86b7e990b4d39cd48c0cb692077aa081b418149 diff --git a/aux/btest b/aux/btest index 1897d224ce..c8e8fe477b 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 1897d224ce295e91d20e458851759c99734a0a74 +Subproject commit c8e8fe477b5dec635e5ce00f3f764fad069c549c diff --git a/cmake b/cmake index d394eadf12..60b2873937 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit d394eadf123f9ff972be4508d34b9614ebcc32a4 +Subproject commit 60b28739379da75f26c5c2a312b7886f5209a1cc diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index bd1da57403..32a93a5dd4 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -194,12 +194,13 @@ std::string DataSeries::GetDSOptionsForType(const threading::Field *field) case TYPE_INTERVAL: { std::string s; - s += "pack_relative=\"" + std::string(field->name) + "\""; + s += "pack_relative=\"" + std::string(field->name) + "\" "; + s += "print_format=\"%.6f\" "; if ( ! ds_use_integer_for_time ) - s += " pack_scale=\"1000\" pack_scale_warn=\"no\""; + s += "pack_scale=\"1000\" pack_scale_warn=\"no\""; else - s += string(" units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; + s += string("units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; return s; } From fabe891d4fbff62831de1dba677d252e984e2b30 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 14 May 2012 21:58:58 -0700 Subject: [PATCH 287/651] Fixing pack_scale and time-as-int. Also removing now unneccessary canonifier script, and updating test baselines. --- src/logging/writers/DataSeries.cc | 7 +- .../ssh.ds.xml | 2 +- .../out | 140 +++++++++--------- .../ssh.ds.txt | 20 +-- .../conn.ds.txt | 80 +++++----- .../http.ds.txt | 38 ++--- .../logging/dataseries/test-logging.bro | 2 +- .../scripts/diff-remove-timestamps-dataseries | 6 - 8 files changed, 144 insertions(+), 151 deletions(-) delete mode 100755 testing/scripts/diff-remove-timestamps-dataseries diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 32a93a5dd4..a7908a8e04 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -194,13 +194,12 @@ std::string DataSeries::GetDSOptionsForType(const threading::Field *field) case TYPE_INTERVAL: { std::string s; - s += "pack_relative=\"" + std::string(field->name) + "\" "; - s += "print_format=\"%.6f\" "; + s += "pack_relative=\"" + std::string(field->name) + "\""; if ( ! ds_use_integer_for_time ) - s += "pack_scale=\"1000\" pack_scale_warn=\"no\""; + s += " pack_scale=\"1e-6\" print_format=\"%.6f\" pack_scale_warn=\"no\""; else - s += string("units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; + s += string(" units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; return s; } diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml index 9862ae606f..cacc3b0ea4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml @@ -1,5 +1,5 @@ - + diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out index 76e7e77c77..ed2aff0164 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out @@ -20,7 +20,7 @@ test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataserie - + @@ -34,17 +34,17 @@ test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataserie extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +508 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299467e+09 10.0.0.1 20 10.0.0.2 1024 -1.299471e+09 10.0.0.2 20 10.0.0.3 0 +1299466805.000000 10.0.0.1 20 10.0.0.2 1024 +1299470395.000000 10.0.0.2 20 10.0.0.3 0 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +508 DataSeries: ExtentIndex > test.2011-03-07-04-00-05.ds # Extent Types ... @@ -57,7 +57,7 @@ offset extenttype - + @@ -71,17 +71,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.29947e+09 10.0.0.1 20 10.0.0.2 1025 -1.299474e+09 10.0.0.2 20 10.0.0.3 1 +1299470405.000000 10.0.0.1 20 10.0.0.2 1025 +1299473995.000000 10.0.0.2 20 10.0.0.3 1 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex > test.2011-03-07-05-00-05.ds # Extent Types ... @@ -94,7 +94,7 @@ offset extenttype - + @@ -108,17 +108,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299474e+09 10.0.0.1 20 10.0.0.2 1026 -1.299478e+09 10.0.0.2 20 10.0.0.3 2 +1299474005.000000 10.0.0.1 20 10.0.0.2 1026 +1299477595.000000 10.0.0.2 20 10.0.0.3 2 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex > test.2011-03-07-06-00-05.ds # Extent Types ... @@ -131,7 +131,7 @@ offset extenttype - + @@ -145,17 +145,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299478e+09 10.0.0.1 20 10.0.0.2 1027 -1.299482e+09 10.0.0.2 20 10.0.0.3 3 +1299477605.000000 10.0.0.1 20 10.0.0.2 1027 +1299481195.000000 10.0.0.2 20 10.0.0.3 3 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex > test.2011-03-07-07-00-05.ds # Extent Types ... @@ -168,7 +168,7 @@ offset extenttype - + @@ -182,17 +182,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +512 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299481e+09 10.0.0.1 20 10.0.0.2 1028 -1.299485e+09 10.0.0.2 20 10.0.0.3 4 +1299481205.000000 10.0.0.1 20 10.0.0.2 1028 +1299484795.000000 10.0.0.2 20 10.0.0.3 4 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +512 DataSeries: ExtentIndex > test.2011-03-07-08-00-05.ds # Extent Types ... @@ -205,7 +205,7 @@ offset extenttype - + @@ -219,17 +219,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299485e+09 10.0.0.1 20 10.0.0.2 1029 -1.299489e+09 10.0.0.2 20 10.0.0.3 5 +1299484805.000000 10.0.0.1 20 10.0.0.2 1029 +1299488395.000000 10.0.0.2 20 10.0.0.3 5 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex > test.2011-03-07-09-00-05.ds # Extent Types ... @@ -242,7 +242,7 @@ offset extenttype - + @@ -256,17 +256,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299488e+09 10.0.0.1 20 10.0.0.2 1030 -1.299492e+09 10.0.0.2 20 10.0.0.3 6 +1299488405.000000 10.0.0.1 20 10.0.0.2 1030 +1299491995.000000 10.0.0.2 20 10.0.0.3 6 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex > test.2011-03-07-10-00-05.ds # Extent Types ... @@ -279,7 +279,7 @@ offset extenttype - + @@ -293,17 +293,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299492e+09 10.0.0.1 20 10.0.0.2 1031 -1.299496e+09 10.0.0.2 20 10.0.0.3 7 +1299492005.000000 10.0.0.1 20 10.0.0.2 1031 +1299495595.000000 10.0.0.2 20 10.0.0.3 7 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex > test.2011-03-07-11-00-05.ds # Extent Types ... @@ -316,7 +316,7 @@ offset extenttype - + @@ -330,17 +330,17 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299496e+09 10.0.0.1 20 10.0.0.2 1032 -1.2995e+09 10.0.0.2 20 10.0.0.3 8 +1299495605.000000 10.0.0.1 20 10.0.0.2 1032 +1299499195.000000 10.0.0.2 20 10.0.0.3 8 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex > test.2011-03-07-12-00-05.ds # Extent Types ... @@ -353,7 +353,7 @@ offset extenttype - + @@ -367,14 +367,14 @@ offset extenttype extent offset ExtentType 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p -1.299499e+09 10.0.0.1 20 10.0.0.2 1033 -1.299503e+09 10.0.0.2 20 10.0.0.3 9 +1299499205.000000 10.0.0.1 20 10.0.0.2 1033 +1299502795.000000 10.0.0.2 20 10.0.0.3 9 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -372 test -484 DataSeries: ExtentIndex +392 test +516 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt index 8cb1293772..245bdcd9be 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt @@ -9,7 +9,7 @@ - + @@ -27,17 +27,17 @@ extent offset ExtentType 40 DataSeries: XmlType -416 ssh -624 DataSeries: ExtentIndex +436 ssh +644 DataSeries: ExtentIndex # Extent, type='ssh' t id.orig_h id.orig_p id.resp_h id.resp_p status country -X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 success unknown -X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 failure US -X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 failure UK -X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 success BR -X.XXXe+09 1.2.3.4 1234 2.3.4.5 80 failure MX +1337058239.030366 1.2.3.4 1234 2.3.4.5 80 success unknown +1337058239.030366 1.2.3.4 1234 2.3.4.5 80 failure US +1337058239.030366 1.2.3.4 1234 2.3.4.5 80 failure UK +1337058239.030366 1.2.3.4 1234 2.3.4.5 80 success BR +1337058239.030366 1.2.3.4 1234 2.3.4.5 80 failure MX # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -416 ssh -624 DataSeries: ExtentIndex +436 ssh +644 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt index 7a4af6776b..104831f027 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -9,7 +9,7 @@ - + @@ -17,7 +17,7 @@ - + @@ -51,46 +51,46 @@ extent offset ExtentType 40 DataSeries: XmlType -680 conn -2592 DataSeries: ExtentIndex +700 conn +2860 DataSeries: ExtentIndex # Extent, type='conn' ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -1.300475e+09 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 -1.300475e+09 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 -1.300475e+09 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 -1.300475e+09 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.300475e+09 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.300475e+09 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.300475e+09 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.300475e+09 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.300475e+09 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.300475e+09 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.300475e+09 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.300475e+09 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.300475e+09 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 0 0 89 SHR F 0 Cd 0 0 1 117 -1.300475e+09 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 0 0 99 SHR F 0 Cd 0 0 1 127 -1.300475e+09 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 0 0 183 SHR F 0 Cd 0 0 1 211 -1.300475e+09 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 0 0 131 SHR F 0 Cd 0 0 1 159 -1.300475e+09 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 0 0 198 SHR F 0 Cd 0 0 1 226 -1.300475e+09 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 -1.300475e+09 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 0 350 0 S0 F 0 D 7 546 0 0 -1.300475e+09 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 -1.300475e+09 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 -1.300475e+09 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0 66 0 S0 F 0 D 2 162 0 0 -1.300475e+09 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0 66 0 S0 F 0 D 2 122 0 0 -1.300475e+09 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 -1.300475e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 0 1130 734 S1 F 1130 ShACad 4 216 4 950 -1.300475e+09 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0 0 350 OTH F 0 CdA 1 52 1 402 -1.300475e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 0 1178 734 S1 F 1178 ShACad 4 216 4 950 -1.300475e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0 534 412 S1 F 534 ShACad 3 164 3 576 -1.300475e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 0 1148 734 S1 F 1148 ShACad 4 216 4 950 -1.300475e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 0 1171 733 S1 F 1171 ShACad 4 216 4 949 -1.300475e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 0 1137 733 S1 F 1137 ShACad 4 216 4 949 -1.300475e+09 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 -1.300475e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0 525 232 S1 F 525 ShACad 3 164 3 396 -1.300475e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0 1125 734 S1 F 1125 ShACad 4 216 4 950 +1300475167.096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 +1300475167.097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 +1300475167.099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 +1300475168.853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.857956 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.858306 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.858713 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.891644 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.892037 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.892414 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.893988 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.894422 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.894787 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.901749 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 0 131 SHR F 0 Cd 0 0 1 159 +1300475168.902195 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 0 198 SHR F 0 Cd 0 0 1 226 +1300475169.899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 85 0 0 +1300475170.862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2.613017 350 0 S0 F 0 D 7 546 0 0 +1300475171.675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0.100096 66 0 S0 F 0 D 2 162 0 0 +1300475171.677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0.100021 66 0 S0 F 0 D 2 122 0 0 +1300475173.116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0.099801 66 0 S0 F 0 D 2 162 0 0 +1300475173.117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0.099849 66 0 S0 F 0 D 2 122 0 0 +1300475173.153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0.000000 0 0 S0 F 0 D 1 78 0 0 +1300475168.859163 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 0.215893 1130 734 S1 F 1130 ShACad 4 216 4 950 +1300475168.652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0.061329 0 350 OTH F 0 CdA 1 52 1 402 +1300475168.895267 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 0.227284 1178 734 S1 F 1178 ShACad 4 216 4 950 +1300475168.902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0.120041 534 412 S1 F 534 ShACad 3 164 3 576 +1300475168.892936 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 0.229603 1148 734 S1 F 1148 ShACad 4 216 4 950 +1300475168.855305 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 0.218501 1171 733 S1 F 1171 ShACad 4 216 4 949 +1300475168.892913 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 0.220961 1137 733 S1 F 1137 ShACad 4 216 4 949 +1300475169.780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0.000000 0 0 OTH F 0 h 0 0 1 48 +1300475168.724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0.119905 525 232 S1 F 525 ShACad 3 164 3 396 +1300475168.855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0.219720 1125 734 S1 F 1125 ShACad 4 216 4 950 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -680 conn -2592 DataSeries: ExtentIndex +700 conn +2860 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt index 0b16a69a6f..0f1eebd251 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt @@ -9,7 +9,7 @@ - + @@ -65,26 +65,26 @@ extent offset ExtentType 40 DataSeries: XmlType -784 http -1172 DataSeries: ExtentIndex +804 http +1252 DataSeries: ExtentIndex # Extent, type='http' ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file -1.300475e+09 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 -1.300475e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 0 0 0 304 Not Modified 0 -1.300475e+09 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 -1.300475e+09 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.843894 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 +1300475168.975800 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.976327 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.979160 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.012666 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.012730 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.014860 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.022665 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 0 0 0 304 Not Modified 0 +1300475169.036294 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.036798 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.039923 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.074793 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.074938 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.075065 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 # Extent, type='DataSeries: ExtentIndex' offset extenttype 40 DataSeries: XmlType -784 http -1172 DataSeries: ExtentIndex +804 http +1252 DataSeries: ExtentIndex diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro index d04b0acf44..0c5c52460b 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro @@ -3,7 +3,7 @@ # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt ssh.ds | ${SCRIPTS}/diff-remove-timestamps-dataseries >ssh.ds.txt +# @TEST-EXEC: ds2txt ssh.ds >ssh.ds.txt # @TEST-EXEC: btest-diff ssh.ds.txt module SSH; diff --git a/testing/scripts/diff-remove-timestamps-dataseries b/testing/scripts/diff-remove-timestamps-dataseries deleted file mode 100755 index 5b20f138af..0000000000 --- a/testing/scripts/diff-remove-timestamps-dataseries +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env bash -# -# Replace anything which looks like DataSeries timestamps (which is a double) with XXXs. - -sed 's/1\.[0-9]*e+09/X.XXXe+09/g' - From ac09bae7d59a6794a02ae40197c9ea0c7cb40f1a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 14 May 2012 22:14:31 -0700 Subject: [PATCH 288/651] Updating doc. --- doc/logging-dataseries.rst | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index 1a5f4ae520..5f1ad7f7c6 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -72,12 +72,15 @@ With that, Bro will now write all its output into DataSeries files tools, which its installation process will have installed into ``/bin``. For example, to convert a file back into an ASCII representation:: - # ds2txt conn .log + # ds2txt conn.log [... We skip a bunch of meta data here ...] - ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts res - 1.3e+09 9CqElRsB9Q 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 - 1.3e+09 3bNPfUWuIhb fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 - 1.3e+09 ZoDDN7YuYx3 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 + ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes + 1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 + 1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 + 1300475167.099816 pXPi1kPMgxb 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 + 1300475168.853899 R7sOc16woCj 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117 + 1300475168.854378 Z6dfHVmt0X7 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127 + 1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 [...] Note that is ASCII format is *not* equivalent to Bro's default format From d6fdc10242a409bc58829830dd48b0b6b5503f7b Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 15 May 2012 10:19:56 -0500 Subject: [PATCH 289/651] Add a comment to explain the ICMPv6 error message types --- src/ICMP.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/ICMP.cc b/src/ICMP.cc index 2f11337d8a..05a6b67dff 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -181,6 +181,9 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c case MLD_LISTENER_REDUCTION: #endif default: + // Error messages (i.e., ICMPv6 type < 128) all have + // the same structure for their context, and are + // handled by the same function. if ( icmpp->icmp_type < 128 ) Context6(t, icmpp, len, caplen, data, ip_hdr); else From 4fe11cf50361321597177e8d78ce25ffa5ea6f1b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 16 May 2012 17:54:38 -0700 Subject: [PATCH 290/651] Extending DS docs with some examples. --- doc/logging-dataseries.rst | 75 ++++++++++++++++++++++++++++++++++---- 1 file changed, 68 insertions(+), 7 deletions(-) diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index 5f1ad7f7c6..8c797dd46c 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -62,7 +62,7 @@ Activating DataSeries The direct way to use DataSeries is to switch *all* log files over to the binary format. To do that, just add ``redef -Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro`. +Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro``. For testing, you can also just pass that on the command line:: bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES @@ -72,7 +72,8 @@ With that, Bro will now write all its output into DataSeries files tools, which its installation process will have installed into ``/bin``. For example, to convert a file back into an ASCII representation:: - # ds2txt conn.log + + $ ds2txt conn.log [... We skip a bunch of meta data here ...] ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes 1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 @@ -83,13 +84,22 @@ representation:: 1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 [...] +(``--skip-all`` suppresses the meta data.) + Note that is ASCII format is *not* equivalent to Bro's default format as DataSeries uses a different internal representation. You can also switch only individual files over to DataSeries by adding code like this to your ``local.bro``:: - TODO +.. code:: bro + + event bro_init() + { + local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log. + f$writer = Log::WRITER_DATASERIES; # Change writer type. + Log::add_filter(Conn::LOG, f); # Replace filter with adapted version. + } Bro's DataSeries writer comes with a few tuning options, see :doc:`scripts/base/frameworks/logging/writers/dataseries`. @@ -100,9 +110,60 @@ Working with DataSeries Here are few examples of using DataSeries command line tools to work with the output files. -TODO. +* Printing CSV:: -TODO -==== + $ ds2txt --csv conn.log + ts,uid,id.orig_h,id.orig_p,id.resp_h,id.resp_p,proto,service,duration,orig_bytes,resp_bytes,conn_state,local_orig,missed_bytes,history,orig_pkts,orig_ip_bytes,resp_pkts,resp_ip_bytes + 1258790493.773208,ZTtgbHvf4s3,192.168.1.104,137,192.168.1.255,137,udp,dns,3.748891,350,0,S0,F,0,D,7,546,0,0 + 1258790451.402091,pOY6Rw7lhUd,192.168.1.106,138,192.168.1.255,138,udp,,0.000000,0,0,S0,F,0,D,1,229,0,0 + 1258790493.787448,pn5IiEslca9,192.168.1.104,138,192.168.1.255,138,udp,,2.243339,348,0,S0,F,0,D,2,404,0,0 + 1258790615.268111,D9slyIu3hFj,192.168.1.106,137,192.168.1.255,137,udp,dns,3.764626,350,0,S0,F,0,D,7,546,0,0 + [...] -* Do we have a leak? + Add ``--separator=X`` to set a different separator. + +* Extracting a subset of columns:: + + $ ds2txt --select '*' ts,id.resp_h,id.resp_p --skip-all conn.log + 1258790493.773208 192.168.1.255 137 + 1258790451.402091 192.168.1.255 138 + 1258790493.787448 192.168.1.255 138 + 1258790615.268111 192.168.1.255 137 + 1258790615.289842 192.168.1.255 138 + [...] + +* Filtering rows:: + + $ ds2txt --where '*' 'duration > 5 && id.resp_p > 1024' --skip-all conn.ds + 1258790631.532888 V8mV5WLITu5 192.168.1.105 55890 239.255.255.250 1900 udp 15.004568 798 0 S0 F 0 D 6 966 0 0 + 1258792413.439596 tMcWVWQptvd 192.168.1.105 55890 239.255.255.250 1900 udp 15.004581 798 0 S0 F 0 D 6 966 0 0 + 1258794195.346127 cQwQMRdBrKa 192.168.1.105 55890 239.255.255.250 1900 udp 15.005071 798 0 S0 F 0 D 6 966 0 0 + 1258795977.253200 i8TEjhWd2W8 192.168.1.105 55890 239.255.255.250 1900 udp 15.004824 798 0 S0 F 0 D 6 966 0 0 + 1258797759.160217 MsLsBA8Ia49 192.168.1.105 55890 239.255.255.250 1900 udp 15.005078 798 0 S0 F 0 D 6 966 0 0 + 1258799541.068452 TsOxRWJRGwf 192.168.1.105 55890 239.255.255.250 1900 udp 15.004082 798 0 S0 F 0 D 6 966 0 0 + [...] + +* Calculate some statistics: + + Mean/stdev/min/max over a column:: + + $ dsstatgroupby '*' basic duration from conn.ds + # Begin DSStatGroupByModule + # processed 2159 rows, where clause eliminated 0 rows + # count(*), mean(duration), stddev, min, max + 2159, 42.7938, 1858.34, 0, 86370 + [...] + + Quantiles of total connection volume:: + + > dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds + [...] + 2159 data points, mean 24616 +- 343295 [0,1.26615e+07] + quantiles about every 216 data points: + 10%: 0, 124, 317, 348, 350, 350, 601, 798, 1469 + tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262 + [...] + +The ``man`` pages for these tool show further options, and their +``-h`` option gives some more information (either can be a bit cryptic +unfortunately though). From 99db264775cfc2d1aae5f0c8cd264deb37f497e3 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 16 May 2012 18:00:44 -0700 Subject: [PATCH 291/651] Portability fixes. - Fix for time-as-int on 32-bit systems. - Skipping ds2txt's index output for test diffing, as it seems non-portable. --- src/logging/writers/DataSeries.cc | 2 +- .../out | 90 ------------------- .../ssh.ds.txt | 19 ++-- .../conn.ds.txt | 9 -- .../conn.ds.txt | 9 -- .../http.ds.txt | 9 -- .../btest/core/leaks/dataseries-rotate.bro | 1 + testing/btest/core/leaks/dataseries.bro | 1 + .../frameworks/logging/dataseries/rotate.bro | 2 +- .../logging/dataseries/test-logging.bro | 2 +- .../logging/dataseries/time-as-int.bro | 2 +- .../logging/dataseries/wikipedia.bro | 4 +- 12 files changed, 13 insertions(+), 137 deletions(-) diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index a7908a8e04..9f19028be3 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -61,7 +61,7 @@ std::string DataSeries::LogValueToString(threading::Value *val) if ( ds_use_integer_for_time ) { std::ostringstream ostr; - ostr << (unsigned long)(DataSeries::TIME_SCALE * val->val.double_val); + ostr << (uint64_t)(DataSeries::TIME_SCALE * val->val.double_val); return ostr.str(); } else diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out index ed2aff0164..1e5e1b05c6 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out @@ -32,19 +32,10 @@ test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataserie -extent offset ExtentType -40 DataSeries: XmlType -392 test -508 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 1299470395.000000 10.0.0.2 20 10.0.0.3 0 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -508 DataSeries: ExtentIndex > test.2011-03-07-04-00-05.ds # Extent Types ... @@ -69,19 +60,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299470405.000000 10.0.0.1 20 10.0.0.2 1025 1299473995.000000 10.0.0.2 20 10.0.0.3 1 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex > test.2011-03-07-05-00-05.ds # Extent Types ... @@ -106,19 +88,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299474005.000000 10.0.0.1 20 10.0.0.2 1026 1299477595.000000 10.0.0.2 20 10.0.0.3 2 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex > test.2011-03-07-06-00-05.ds # Extent Types ... @@ -143,19 +116,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299477605.000000 10.0.0.1 20 10.0.0.2 1027 1299481195.000000 10.0.0.2 20 10.0.0.3 3 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex > test.2011-03-07-07-00-05.ds # Extent Types ... @@ -180,19 +144,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -512 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299481205.000000 10.0.0.1 20 10.0.0.2 1028 1299484795.000000 10.0.0.2 20 10.0.0.3 4 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -512 DataSeries: ExtentIndex > test.2011-03-07-08-00-05.ds # Extent Types ... @@ -217,19 +172,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299484805.000000 10.0.0.1 20 10.0.0.2 1029 1299488395.000000 10.0.0.2 20 10.0.0.3 5 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex > test.2011-03-07-09-00-05.ds # Extent Types ... @@ -254,19 +200,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299488405.000000 10.0.0.1 20 10.0.0.2 1030 1299491995.000000 10.0.0.2 20 10.0.0.3 6 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex > test.2011-03-07-10-00-05.ds # Extent Types ... @@ -291,19 +228,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299492005.000000 10.0.0.1 20 10.0.0.2 1031 1299495595.000000 10.0.0.2 20 10.0.0.3 7 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex > test.2011-03-07-11-00-05.ds # Extent Types ... @@ -328,19 +256,10 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299495605.000000 10.0.0.1 20 10.0.0.2 1032 1299499195.000000 10.0.0.2 20 10.0.0.3 8 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex > test.2011-03-07-12-00-05.ds # Extent Types ... @@ -365,16 +284,7 @@ offset extenttype -extent offset ExtentType -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex # Extent, type='test' t id.orig_h id.orig_p id.resp_h id.resp_p 1299499205.000000 10.0.0.1 20 10.0.0.2 1033 1299502795.000000 10.0.0.2 20 10.0.0.3 9 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -392 test -516 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt index 245bdcd9be..e9640dfd9d 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt @@ -25,19 +25,10 @@ -extent offset ExtentType -40 DataSeries: XmlType -436 ssh -644 DataSeries: ExtentIndex # Extent, type='ssh' t id.orig_h id.orig_p id.resp_h id.resp_p status country -1337058239.030366 1.2.3.4 1234 2.3.4.5 80 success unknown -1337058239.030366 1.2.3.4 1234 2.3.4.5 80 failure US -1337058239.030366 1.2.3.4 1234 2.3.4.5 80 failure UK -1337058239.030366 1.2.3.4 1234 2.3.4.5 80 success BR -1337058239.030366 1.2.3.4 1234 2.3.4.5 80 failure MX -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -436 ssh -644 DataSeries: ExtentIndex +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 success unknown +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure US +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure UK +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 success BR +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure MX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt index 65d4ba0a67..1d7cba3b3c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt @@ -49,10 +49,6 @@ -extent offset ExtentType -40 DataSeries: XmlType -672 conn -2948 DataSeries: ExtentIndex # Extent, type='conn' ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes 1300475167096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 @@ -89,8 +85,3 @@ ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes 1300475169780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 1300475168724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 119904 525 232 S1 F 525 ShACad 3 164 3 396 1300475168855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 219720 1125 734 S1 F 1125 ShACad 4 216 4 950 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -672 conn -2948 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt index 104831f027..3cafa078de 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -49,10 +49,6 @@ -extent offset ExtentType -40 DataSeries: XmlType -700 conn -2860 DataSeries: ExtentIndex # Extent, type='conn' ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes 1300475167.096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 @@ -89,8 +85,3 @@ ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes 1300475169.780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0.000000 0 0 OTH F 0 h 0 0 1 48 1300475168.724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0.119905 525 232 S1 F 525 ShACad 3 164 3 396 1300475168.855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0.219720 1125 734 S1 F 1125 ShACad 4 216 4 950 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -700 conn -2860 DataSeries: ExtentIndex diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt index 0f1eebd251..adb7bb3f7b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt @@ -63,10 +63,6 @@ -extent offset ExtentType -40 DataSeries: XmlType -804 http -1252 DataSeries: ExtentIndex # Extent, type='http' ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file 1300475168.843894 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 @@ -83,8 +79,3 @@ ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri refer 1300475169.074793 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 1300475169.074938 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 1300475169.075065 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 -# Extent, type='DataSeries: ExtentIndex' -offset extenttype -40 DataSeries: XmlType -804 http -1252 DataSeries: ExtentIndex diff --git a/testing/btest/core/leaks/dataseries-rotate.bro b/testing/btest/core/leaks/dataseries-rotate.bro index 188de9717b..1afc517d1a 100644 --- a/testing/btest/core/leaks/dataseries-rotate.bro +++ b/testing/btest/core/leaks/dataseries-rotate.bro @@ -3,6 +3,7 @@ # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks +# @TEST-GROUP: dataseries # # @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r %DIR/../rotation.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES diff --git a/testing/btest/core/leaks/dataseries.bro b/testing/btest/core/leaks/dataseries.bro index 886ee54dd9..01dc3ffd79 100644 --- a/testing/btest/core/leaks/dataseries.bro +++ b/testing/btest/core/leaks/dataseries.bro @@ -4,6 +4,7 @@ # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks +# @TEST-GROUP: dataseries # # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro index 6a0cee5888..652a4596fb 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro @@ -3,7 +3,7 @@ # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b -r %DIR/../rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out -# @TEST-EXEC: for i in test.*.ds; do printf '> %s\n' $i; ds2txt $i; done >>out +# @TEST-EXEC: for i in test.*.ds; do printf '> %s\n' $i; ds2txt --skip-index $i; done >>out # @TEST-EXEC: btest-diff out module Test; diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro index 0c5c52460b..ee0426ae55 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro @@ -3,7 +3,7 @@ # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt ssh.ds >ssh.ds.txt +# @TEST-EXEC: ds2txt --skip-index ssh.ds >ssh.ds.txt # @TEST-EXEC: btest-diff ssh.ds.txt module SSH; diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro index e4dd6a5431..5e3f864b33 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro @@ -3,7 +3,7 @@ # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt conn.ds >conn.ds.txt +# @TEST-EXEC: ds2txt --skip-index conn.ds >conn.ds.txt # @TEST-EXEC: btest-diff conn.ds.txt redef LogDataSeries::use_integer_for_time = T; diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro index 38726a8b10..ee1342c470 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro @@ -3,7 +3,7 @@ # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt conn.ds >conn.ds.txt -# @TEST-EXEC: ds2txt http.ds >http.ds.txt +# @TEST-EXEC: ds2txt --skip-index conn.ds >conn.ds.txt +# @TEST-EXEC: ds2txt --skip-index http.ds >http.ds.txt # @TEST-EXEC: btest-diff conn.ds.txt # @TEST-EXEC: btest-diff http.ds.txt From 5dae925f670c1f9976ae6a344fb60293f34a6df2 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 16 May 2012 18:24:55 -0700 Subject: [PATCH 292/651] Fixing a rotation race condition at termination. Noticed with DS, but could just as well happen with ASCII. --- src/logging/Manager.cc | 13 +++++++++++++ src/logging/Manager.h | 1 + src/threading/Manager.h | 6 ++++++ 3 files changed, 20 insertions(+) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 7f785e1080..a4dea1c909 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -7,6 +7,7 @@ #include "../NetVar.h" #include "../Net.h" +#include "threading/Manager.h" #include "threading/SerialTypes.h" #include "Manager.h" @@ -124,6 +125,7 @@ Manager::Stream::~Stream() Manager::Manager() { + rotations_pending = 0; } Manager::~Manager() @@ -1127,6 +1129,13 @@ bool Manager::Flush(EnumVal* id) void Manager::Terminate() { + // Make sure we process all the pending rotations. + while ( rotations_pending ) + { + thread_mgr->ForceProcessing(); // A blatant layering violation ... + usleep(1000); + } + for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) { if ( ! *s ) @@ -1235,6 +1244,8 @@ void Manager::Rotate(WriterInfo* winfo) // Trigger the rotation. winfo->writer->Rotate(tmp, winfo->open_time, network_time, terminating); + + ++rotations_pending; } bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string old_name, @@ -1243,6 +1254,8 @@ bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string o DBG_LOG(DBG_LOGGING, "Finished rotating %s at %.6f, new name %s", writer->Path().c_str(), network_time, new_name.c_str()); + --rotations_pending; + WriterInfo* winfo = FindWriter(writer); if ( ! winfo ) return true; diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 5af3e55b4a..f5e62b0683 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -200,6 +200,7 @@ private: WriterInfo* FindWriter(WriterFrontend* writer); vector streams; // Indexed by stream enum. + int rotations_pending; // Number of rotations not yet finished. }; } diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 7d9ba766d4..ab8189f39d 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -77,6 +77,12 @@ public: */ int NumThreads() const { return all_threads.size(); } + /** Manually triggers processing of any thread input. This can be useful + * if the main thread is waiting for a specific message from a child. + * Usually, though, one should avoid using it. + */ + void ForceProcessing() { Process(); } + protected: friend class BasicThread; friend class MsgThread; From 122f6ee4c64b46cd5264ea0964ba366ddc73446c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 16 May 2012 18:28:51 -0700 Subject: [PATCH 293/651] Moving trace for rotation test into traces directory. --- .../frameworks/logging => Traces}/rotation.trace | Bin testing/btest/core/leaks/dataseries-rotate.bro | 2 +- .../base/frameworks/logging/dataseries/rotate.bro | 2 +- .../scripts/base/frameworks/logging/rotate.bro | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) rename testing/btest/{scripts/base/frameworks/logging => Traces}/rotation.trace (100%) diff --git a/testing/btest/scripts/base/frameworks/logging/rotation.trace b/testing/btest/Traces/rotation.trace similarity index 100% rename from testing/btest/scripts/base/frameworks/logging/rotation.trace rename to testing/btest/Traces/rotation.trace diff --git a/testing/btest/core/leaks/dataseries-rotate.bro b/testing/btest/core/leaks/dataseries-rotate.bro index 1afc517d1a..f0a5f3079d 100644 --- a/testing/btest/core/leaks/dataseries-rotate.bro +++ b/testing/btest/core/leaks/dataseries-rotate.bro @@ -5,7 +5,7 @@ # @TEST-GROUP: leaks # @TEST-GROUP: dataseries # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r %DIR/../rotation.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r $TRACES/rotation.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES module Test; diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro index 652a4596fb..7b708473e3 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro @@ -2,7 +2,7 @@ # @TEST-REQUIRES: has-writer DataSeries && which ds2txt # @TEST-GROUP: dataseries # -# @TEST-EXEC: bro -b -r %DIR/../rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out +# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out # @TEST-EXEC: for i in test.*.ds; do printf '> %s\n' $i; ds2txt --skip-index $i; done >>out # @TEST-EXEC: btest-diff out diff --git a/testing/btest/scripts/base/frameworks/logging/rotate.bro b/testing/btest/scripts/base/frameworks/logging/rotate.bro index 14123c56c6..212dba3bf7 100644 --- a/testing/btest/scripts/base/frameworks/logging/rotate.bro +++ b/testing/btest/scripts/base/frameworks/logging/rotate.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro -b -r %DIR/rotation.trace %INPUT 2>&1 | grep "test" >out +# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT 2>&1 | grep "test" >out # @TEST-EXEC: for i in test.*.log; do printf '> %s\n' $i; cat $i; done >>out # @TEST-EXEC: btest-diff out From e34f27b928667eefdd9b6aa92457d4a345c25f68 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 17 May 2012 09:54:30 -0700 Subject: [PATCH 294/651] Updating submodule(s). [nomail] --- CHANGES | 6 ++++++ VERSION | 2 +- aux/broctl | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index da527a60f6..6471dbe4be 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,10 @@ +2.0-341 | 2012-05-17 09:54:30 -0700 + + * Add a comment to explain the ICMPv6 error message types. (Daniel Thayer) + + * Quieting external test output somehwat. (Robin Sommer) + 2.0-336 | 2012-05-14 17:15:44 -0700 * Don't print the various "weird" events to stderr. Address #805. diff --git a/VERSION b/VERSION index b59cb1ff9b..358412b7a2 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-338 +2.0-341 diff --git a/aux/broctl b/aux/broctl index 5137c63751..519d2e21ee 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 5137c6375162f121348095205aaaec04a86de632 +Subproject commit 519d2e21ee375833c89eb6f7dc95c1eac3de17ab From 74f3a32321010928cf380abec3df30640382b289 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 17 May 2012 12:59:20 -0500 Subject: [PATCH 295/651] Enable Bro to communicate with peers over non-global IPv6 addresses. This usually requires specifying an additional zone identifier (see RFC 4007). The connect() and listen() BIFs have been changed to accept this zone identifier as an argument. --- scripts/base/frameworks/cluster/main.bro | 3 + .../frameworks/cluster/setup-connections.bro | 25 ++- .../base/frameworks/communication/main.bro | 16 +- scripts/base/frameworks/control/main.bro | 4 + .../frameworks/communication/listen.bro | 2 +- .../policy/frameworks/control/controller.bro | 4 +- src/RemoteSerializer.cc | 197 +++++++++++++----- src/RemoteSerializer.h | 18 +- src/bro.bif | 19 +- src/util.cc | 2 + 10 files changed, 215 insertions(+), 75 deletions(-) diff --git a/scripts/base/frameworks/cluster/main.bro b/scripts/base/frameworks/cluster/main.bro index 1e89e9b2a7..766dea912f 100644 --- a/scripts/base/frameworks/cluster/main.bro +++ b/scripts/base/frameworks/cluster/main.bro @@ -77,6 +77,9 @@ export { node_type: NodeType; ## The IP address of the cluster node. ip: addr; + ## If the *ip* field is a non-global IPv6 address, this field + ## can specify a particular :rfc:`4007` ``zone_id``. + zone_id: string &default=""; ## The port to which the this local node can connect when ## establishing communication. p: port; diff --git a/scripts/base/frameworks/cluster/setup-connections.bro b/scripts/base/frameworks/cluster/setup-connections.bro index b5a0d25e1f..3d89e39f30 100644 --- a/scripts/base/frameworks/cluster/setup-connections.bro +++ b/scripts/base/frameworks/cluster/setup-connections.bro @@ -19,23 +19,26 @@ event bro_init() &priority=9 # Connections from the control node for runtime control and update events. # Every node in a cluster is eligible for control from this host. if ( n$node_type == CONTROL ) - Communication::nodes["control"] = [$host=n$ip, $connect=F, - $class="control", $events=control_events]; + Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id, + $connect=F, $class="control", + $events=control_events]; if ( me$node_type == MANAGER ) { if ( n$node_type == WORKER && n$manager == node ) Communication::nodes[i] = - [$host=n$ip, $connect=F, + [$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i, $events=worker2manager_events, $request_logs=T]; if ( n$node_type == PROXY && n$manager == node ) Communication::nodes[i] = - [$host=n$ip, $connect=F, + [$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i, $events=proxy2manager_events, $request_logs=T]; if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i ) - Communication::nodes["time-machine"] = [$host=nodes[i]$ip, $p=nodes[i]$p, + Communication::nodes["time-machine"] = [$host=nodes[i]$ip, + $zone_id=nodes[i]$zone_id, + $p=nodes[i]$p, $connect=T, $retry=1min, $events=tm2manager_events]; } @@ -44,7 +47,8 @@ event bro_init() &priority=9 { if ( n$node_type == WORKER && n$proxy == node ) Communication::nodes[i] = - [$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events]; + [$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i, + $sync=T, $auth=T, $events=worker2proxy_events]; # accepts connections from the previous one. # (This is not ideal for setups with many proxies) @@ -53,16 +57,18 @@ event bro_init() &priority=9 { if ( n?$proxy ) Communication::nodes[i] - = [$host=n$ip, $p=n$p, + = [$host=n$ip, $zone_id=n$zone_id, $p=n$p, $connect=T, $auth=F, $sync=T, $retry=1mins]; else if ( me?$proxy && me$proxy == i ) Communication::nodes[me$proxy] - = [$host=nodes[i]$ip, $connect=F, $auth=T, $sync=T]; + = [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id, + $connect=F, $auth=T, $sync=T]; } # Finally the manager, to send it status updates. if ( n$node_type == MANAGER && me$manager == i ) Communication::nodes["manager"] = [$host=nodes[i]$ip, + $zone_id=nodes[i]$zone_id, $p=nodes[i]$p, $connect=T, $retry=1mins, $class=node, @@ -72,6 +78,7 @@ event bro_init() &priority=9 { if ( n$node_type == MANAGER && me$manager == i ) Communication::nodes["manager"] = [$host=nodes[i]$ip, + $zone_id=nodes[i]$zone_id, $p=nodes[i]$p, $connect=T, $retry=1mins, $class=node, @@ -79,6 +86,7 @@ event bro_init() &priority=9 if ( n$node_type == PROXY && me$proxy == i ) Communication::nodes["proxy"] = [$host=nodes[i]$ip, + $zone_id=nodes[i]$zone_id, $p=nodes[i]$p, $connect=T, $retry=1mins, $sync=T, $class=node, @@ -87,6 +95,7 @@ event bro_init() &priority=9 if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i ) Communication::nodes["time-machine"] = [$host=nodes[i]$ip, + $zone_id=nodes[i]$zone_id, $p=nodes[i]$p, $connect=T, $retry=1min, diff --git a/scripts/base/frameworks/communication/main.bro b/scripts/base/frameworks/communication/main.bro index 26ec9f41b8..b9b15bfd22 100644 --- a/scripts/base/frameworks/communication/main.bro +++ b/scripts/base/frameworks/communication/main.bro @@ -23,9 +23,14 @@ export { ## Defines if a listening socket can bind to IPv6 addresses. const listen_ipv6 = F &redef; + ## If :bro:id:`Communication::listen_interface` is a non-global + ## IPv6 address and requires a specific :rfc:`4007` ``zone_id``, + ## it can be specified here. + const listen_ipv6_zone_id = "" &redef; + ## Defines the interval at which to retry binding to - ## :bro:id:`listen_interface` on :bro:id:`listen_port` if it's already in - ## use. + ## :bro:id:`Communication::listen_interface` on + ## :bro:id:`Communication::listen_port` if it's already in use. const listen_retry = 30 secs &redef; ## Default compression level. Compression level is 0-9, with 0 = no @@ -60,6 +65,10 @@ export { type Node: record { ## Remote address. host: addr; + + ## If the *host* field is a non-global IPv6 address, this field + ## can specify a particular :rfc:`4007` ``zone_id``. + zone_id: string &optional; ## Port of the remote Bro communication endpoint if we are initiating ## the connection based on the :bro:id:`connect` field. @@ -187,7 +196,8 @@ function connect_peer(peer: string) p = node$p; local class = node?$class ? node$class : ""; - local id = connect(node$host, p, class, node$retry, node$ssl); + local zone_id = node?$zone_id ? node$zone_id : ""; + local id = connect(node$host, zone_id, p, class, node$retry, node$ssl); if ( id == PEER_ID_NONE ) Log::write(Communication::LOG, [$ts = network_time(), diff --git a/scripts/base/frameworks/control/main.bro b/scripts/base/frameworks/control/main.bro index 4fe8872801..63e5f639a0 100644 --- a/scripts/base/frameworks/control/main.bro +++ b/scripts/base/frameworks/control/main.bro @@ -11,6 +11,10 @@ export { ## The port of the host that will be controlled. const host_port = 0/tcp &redef; + ## If :bro:id:`Control::host` is a non-global IPv6 address and + ## requires a specific :rfc:`4007` ``zone_id``, it can be set here. + const zone_id = "" &redef; + ## The command that is being done. It's typically set on the ## command line. const cmd = "" &redef; diff --git a/scripts/policy/frameworks/communication/listen.bro b/scripts/policy/frameworks/communication/listen.bro index 609e8c91d6..111bc64a23 100644 --- a/scripts/policy/frameworks/communication/listen.bro +++ b/scripts/policy/frameworks/communication/listen.bro @@ -9,5 +9,5 @@ event bro_init() &priority=-10 { enable_communication(); listen(listen_interface, listen_port, listen_ssl, listen_ipv6, - listen_retry); + listen_ipv6_zone_id, listen_retry); } diff --git a/scripts/policy/frameworks/control/controller.bro b/scripts/policy/frameworks/control/controller.bro index 39647095db..22b19bf973 100644 --- a/scripts/policy/frameworks/control/controller.bro +++ b/scripts/policy/frameworks/control/controller.bro @@ -25,8 +25,8 @@ event bro_init() &priority=5 # Establish the communication configuration and only request response # messages. - Communication::nodes["control"] = [$host=host, $p=host_port, - $sync=F, $connect=T, + Communication::nodes["control"] = [$host=host, $zone_id=zone_id, + $p=host_port, $sync=F, $connect=T, $class="control", $events=Control::controllee_events]; } diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 9123e99ef4..b73494204c 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -173,6 +173,9 @@ #include #include +#include +#include +#include #include "RemoteSerializer.h" #include "Func.h" @@ -323,6 +326,16 @@ static const char* msgToStr(int msg) } } +static vector tokenize(const string& s, char delim) + { + vector tokens; + stringstream ss(s); + string token; + while ( std::getline(ss, token, delim) ) + tokens.push_back(token); + return tokens; + } + // Start of every message between two processes. We do the low-level work // ourselves to make this 64-bit safe. (The actual layout is an artifact of // an earlier design that depended on how a 32-bit GCC lays out its structs ...) @@ -665,7 +678,8 @@ void RemoteSerializer::Fork() } RemoteSerializer::PeerID RemoteSerializer::Connect(const IPAddr& ip, - uint16 port, const char* our_class, double retry, bool use_ssl) + const string& zone_id, uint16 port, const char* our_class, double retry, + bool use_ssl) { if ( ! using_communication ) return true; @@ -682,11 +696,13 @@ RemoteSerializer::PeerID RemoteSerializer::Connect(const IPAddr& ip, if ( our_class ) p->our_class = our_class; - uint32 bytes[4]; - ip.CopyIPv6(bytes, IPAddr::Host); + const size_t BUFSIZE = 1024; + char* data = new char[BUFSIZE]; + snprintf(data, BUFSIZE, "%"PRIu64",%s,%s,%"PRIu16",%"PRIu32",%d", p->id, + ip.AsString().c_str(), zone_id.c_str(), port, uint32(retry), + use_ssl); - if ( ! SendToChild(MSG_CONNECT_TO, p, 8, p->id, bytes[0], bytes[1], - bytes[2], bytes[3], port, uint32(retry), use_ssl) ) + if ( ! SendToChild(MSG_CONNECT_TO, p, data) ) { RemovePeer(p); return false; @@ -1219,7 +1235,7 @@ bool RemoteSerializer::SendCapabilities(Peer* peer) } bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl, - bool ipv6, double retry) + bool ipv6, const string& zone_id, double retry) { if ( ! using_communication ) return true; @@ -1229,13 +1245,16 @@ bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl, if ( ! ipv6 && ip.GetFamily() == IPv6 && ip != IPAddr("0.0.0.0") && ip != IPAddr("::") ) - reporter->FatalError("Attempt to listen on address %s, but IPv6 communication disabled", ip.AsString().c_str()); + reporter->FatalError("Attempt to listen on address %s, but IPv6 " + "communication disabled", ip.AsString().c_str()); - uint32 bytes[4]; - ip.CopyIPv6(bytes, IPAddr::Host); + const size_t BUFSIZE = 1024; + char* data = new char[BUFSIZE]; + snprintf(data, BUFSIZE, "%s,%"PRIu16",%d,%d,%s,%"PRIu32, + ip.AsString().c_str(), port, expect_ssl, ipv6, zone_id.c_str(), + (uint32) retry); - if ( ! SendToChild(MSG_LISTEN, 0, 8, bytes[0], bytes[1], bytes[2], bytes[3], - port, expect_ssl, ipv6, (uint32) retry) ) + if ( ! SendToChild(MSG_LISTEN, 0, data) ) return false; listening = true; @@ -1947,9 +1966,22 @@ bool RemoteSerializer::EnterPhaseRunning(Peer* peer) bool RemoteSerializer::ProcessConnected() { // IP and port follow. - uint32* args = (uint32*) current_args->data; - IPAddr host = IPAddr(IPv6, args, IPAddr::Network); - uint16 port = (uint16) ntohl(args[4]); + vector args = tokenize(current_args->data, ','); + + if ( args.size() != 2 ) + { + InternalCommError("ProcessConnected() bad number of arguments"); + return false; + } + + IPAddr host = IPAddr(args[0]); + uint16 port; + + if ( ! atoi_n(args[1].size(), args[1].c_str(), 0, 10, port) ) + { + InternalCommError("ProcessConnected() bad peer port string"); + return false; + } if ( ! current_peer ) { @@ -3692,14 +3724,43 @@ bool SocketComm::ForwardChunkToPeer() bool SocketComm::ProcessConnectTo() { assert(parent_args); - uint32* args = (uint32*) parent_args->data; + vector args = tokenize(parent_args->data, ','); + + if ( args.size() != 6 ) + { + Error(fmt("ProcessConnectTo() bad number of arguments")); + return false; + } Peer* peer = new Peer; - peer->id = ntohl(args[0]); - peer->ip = IPAddr(IPv6, &args[1], IPAddr::Network); - peer->port = ntohl(args[5]); - peer->retry = ntohl(args[6]); - peer->ssl = ntohl(args[7]); + + if ( ! atoi_n(args[0].size(), args[0].c_str(), 0, 10, peer->id) ) + { + Error(fmt("ProccessConnectTo() bad peer id string")); + delete peer; + return false; + } + + peer->ip = IPAddr(args[1]); + peer->zone_id = args[2]; + + if ( ! atoi_n(args[3].size(), args[3].c_str(), 0, 10, peer->port) ) + { + Error(fmt("ProcessConnectTo() bad peer port string")); + delete peer; + return false; + } + + if ( ! atoi_n(args[4].size(), args[4].c_str(), 0, 10, peer->retry) ) + { + Error(fmt("ProcessConnectTo() bad peer retry string")); + delete peer; + return false; + } + + peer->ssl = false; + if ( args[5] != "0" ) + peer->ssl = true; return Connect(peer); } @@ -3707,13 +3768,37 @@ bool SocketComm::ProcessConnectTo() bool SocketComm::ProcessListen() { assert(parent_args); - uint32* args = (uint32*) parent_args->data; + vector args = tokenize(parent_args->data, ','); - listen_if = IPAddr(IPv6, args, IPAddr::Network); - listen_port = uint16(ntohl(args[4])); - listen_ssl = ntohl(args[5]) != 0; - enable_ipv6 = ntohl(args[6]) != 0; - bind_retry_interval = ntohl(args[7]); + if ( args.size() != 6 ) + { + Error(fmt("ProcessListen() bad number of arguments")); + return false; + } + + listen_if = args[0]; + + if ( ! atoi_n(args[1].size(), args[1].c_str(), 0, 10, listen_port) ) + { + Error(fmt("ProcessListen() bad peer port string")); + return false; + } + + listen_ssl = false; + if ( args[2] != "0" ) + listen_ssl = true; + + enable_ipv6 = false; + if ( args[3] != "0" ) + enable_ipv6 = true; + + listen_zone_id = args[4]; + + if ( ! atoi_n(args[5].size(), args[5].c_str(), 0, 10, bind_retry_interval) ) + { + Error(fmt("ProcessListen() bad peer port string")); + return false; + } return Listen(); } @@ -3889,10 +3974,11 @@ bool SocketComm::Connect(Peer* peer) char port_str[16]; modp_uitoa10(peer->port, port_str); - // TODO: better to accept string arguments from the user to pass into - // getaddrinfo? This might make it easier to explicitly connect to - // non-global IPv6 addresses with a scope zone identifier (RFC 4007). - status = getaddrinfo(peer->ip.AsString().c_str(), port_str, &hints, &res0); + string gaihostname(peer->ip.AsString()); + if ( peer->zone_id != "" ) + gaihostname.append("%").append(peer->zone_id); + + status = getaddrinfo(gaihostname.c_str(), port_str, &hints, &res0); if ( status != 0 ) { Error(fmt("getaddrinfo error: %s", gai_strerror(status))); @@ -3964,11 +4050,12 @@ bool SocketComm::Connect(Peer* peer) { Log("connected", peer); - uint32 bytes[4]; - peer->ip.CopyIPv6(bytes, IPAddr::Host); + const size_t BUFSIZE = 1024; + char* data = new char[BUFSIZE]; + snprintf(data, BUFSIZE, "%s,%"PRIu32, peer->ip.AsString().c_str(), + peer->port); - if ( ! SendToParent(MSG_CONNECTED, peer, 5, bytes[0], bytes[1], - bytes[2], bytes[3], peer->port) ) + if ( ! SendToParent(MSG_CONNECTED, peer, data) ) return false; } @@ -4011,12 +4098,14 @@ bool SocketComm::Listen() addrinfo hints, *res, *res0; bzero(&hints, sizeof(hints)); + IPAddr listen_ip(listen_if); + if ( enable_ipv6 ) { - if ( listen_if == IPAddr("0.0.0.0") || listen_if == IPAddr("::") ) + if ( listen_ip == IPAddr("0.0.0.0") || listen_ip == IPAddr("::") ) hints.ai_family = PF_UNSPEC; else - hints.ai_family = listen_if.GetFamily() == IPv4 ? PF_INET : PF_INET6; + hints.ai_family = listen_ip.GetFamily() == IPv4 ? PF_INET : PF_INET6; } else hints.ai_family = PF_INET; @@ -4028,15 +4117,15 @@ bool SocketComm::Listen() char port_str[16]; modp_uitoa10(listen_port, port_str); + string scoped_addr(listen_if); + if ( listen_zone_id != "" ) + scoped_addr.append("%").append(listen_zone_id); const char* addr_str = 0; - if ( listen_if != IPAddr("0.0.0.0") && listen_if != IPAddr("::") ) - addr_str = listen_if.AsString().c_str(); + if ( listen_ip != IPAddr("0.0.0.0") && listen_ip != IPAddr("::") ) + addr_str = scoped_addr.c_str(); CloseListenFDs(); - // TODO: better to accept string arguments from the user to pass into - // getaddrinfo? This might make it easier to explicitly bind to a - // non-global IPv6 address with a scope zone identifier (RFC 4007). if ( (status = getaddrinfo(addr_str, port_str, &hints, &res0)) != 0 ) { Error(fmt("getaddrinfo error: %s", gai_strerror(status))); @@ -4056,6 +4145,10 @@ bool SocketComm::Listen() IPAddr(((sockaddr_in*)res->ai_addr)->sin_addr) : IPAddr(((sockaddr_in6*)res->ai_addr)->sin6_addr); + string l_addr_str(a.AsURIString()); + if ( listen_zone_id != "") + l_addr_str.append("%").append(listen_zone_id); + int fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); if ( fd < 0 ) { @@ -4075,7 +4168,7 @@ bool SocketComm::Listen() if ( bind(fd, res->ai_addr, res->ai_addrlen) < 0 ) { - Error(fmt("can't bind to %s:%s, %s", a.AsURIString().c_str(), + Error(fmt("can't bind to %s:%s, %s", l_addr_str.c_str(), port_str, strerror(errno))); close(fd); @@ -4092,14 +4185,14 @@ bool SocketComm::Listen() if ( listen(fd, 50) < 0 ) { - Error(fmt("can't listen on %s:%s, %s", a.AsURIString().c_str(), + Error(fmt("can't listen on %s:%s, %s", l_addr_str.c_str(), port_str, strerror(errno))); close(fd); continue; } listen_fds.push_back(fd); - Log(fmt("listening on %s:%s (%s)", a.AsURIString().c_str(), port_str, + Log(fmt("listening on %s:%s (%s)", l_addr_str.c_str(), port_str, listen_ssl ? "ssl" : "clear")); } @@ -4155,11 +4248,12 @@ bool SocketComm::AcceptConnection(int fd) Log(fmt("accepted %s connection", peer->ssl ? "SSL" : "clear"), peer); - uint32 bytes[4]; - peer->ip.CopyIPv6(bytes, IPAddr::Host); + const size_t BUFSIZE = 1024; + char* data = new char[BUFSIZE]; + snprintf(data, BUFSIZE, "%s,%"PRIu32, peer->ip.AsString().c_str(), + peer->port); - if ( ! SendToParent(MSG_CONNECTED, peer, 5, bytes[0], bytes[1], bytes[2], - bytes[3], peer->port) ) + if ( ! SendToParent(MSG_CONNECTED, peer, data) ) return false; return true; @@ -4176,8 +4270,13 @@ const char* SocketComm::MakeLogString(const char* msg, Peer* peer) int len = 0; if ( peer ) + { + string scoped_addr(peer->ip.AsURIString()); + if ( peer->zone_id != "" ) + scoped_addr.append("%").append(peer->zone_id); len = snprintf(buffer, BUFSIZE, "[#%d/%s:%d] ", int(peer->id), - peer->ip.AsURIString().c_str(), peer->port); + scoped_addr.c_str(), peer->port); + } len += safe_snprintf(buffer + len, BUFSIZE - len, "%s", msg); return buffer; diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index f6f94f53d3..4ebf15e68d 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -11,6 +11,7 @@ #include "File.h" #include +#include class IncrementalSendTimer; @@ -34,7 +35,8 @@ public: static const PeerID PEER_NONE = SOURCE_LOCAL; // Connect to host (returns PEER_NONE on error). - PeerID Connect(const IPAddr& ip, uint16 port, const char* our_class, double retry, bool use_ssl); + PeerID Connect(const IPAddr& ip, const string& zone_id, uint16 port, + const char* our_class, double retry, bool use_ssl); // Close connection to host. bool CloseConnection(PeerID peer); @@ -63,7 +65,7 @@ public: // Start to listen. bool Listen(const IPAddr& ip, uint16 port, bool expect_ssl, bool ipv6, - double retry); + const string& zone_id, double retry); // Stop it. bool StopListening(); @@ -422,6 +424,7 @@ protected: RemoteSerializer::PeerID id; ChunkedIO* io; IPAddr ip; + string zone_id; uint16 port; char state; bool connected; @@ -502,12 +505,13 @@ protected: // If the port we're trying to bind to is already in use, we will retry // it regularly. - IPAddr listen_if; + string listen_if; + string listen_zone_id; // RFC 4007 IPv6 zone_id uint16 listen_port; - bool listen_ssl; - bool enable_ipv6; // allow IPv6 listen sockets - uint32 bind_retry_interval; - time_t listen_next_try; + bool listen_ssl; // use SSL for IO + bool enable_ipv6; // allow IPv6 listen sockets + uint32 bind_retry_interval; // retry interval for already-in-use sockets + time_t listen_next_try; // time at which to try another bind bool shutting_conns_down; bool terminating; bool killing; diff --git a/src/bro.bif b/src/bro.bif index 3f4215dc13..f1e451bb03 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -5267,6 +5267,10 @@ function capture_state_updates%(filename: string%) : bool ## ## ip: The IP address of the remote peer. ## +## zone_id: If *ip* is a non-global IPv6 address, a particular :rfc:`4007` +## ``zone_id`` can given here. An empty string, ``""``, means +## not to add any ``zone_id``. +## ## port: The port of the remote peer. ## ## our_class: If an non-empty string, the remote (listening) peer checks it @@ -5290,10 +5294,11 @@ function capture_state_updates%(filename: string%) : bool ## set_compression_level ## send_state ## send_id -function connect%(ip: addr, p: port, our_class: string, retry: interval, ssl: bool%) : count +function connect%(ip: addr, zone_id: string, p: port, our_class: string, retry: interval, ssl: bool%) : count %{ - return new Val(uint32(remote_serializer->Connect(ip->AsAddr(), p->Port(), - our_class->CheckString(), retry, ssl)), + return new Val(uint32(remote_serializer->Connect(ip->AsAddr(), + zone_id->CheckString(), p->Port(), our_class->CheckString(), + retry, ssl)), TYPE_COUNT); %} @@ -5404,15 +5409,19 @@ function set_compression_level%(p: event_peer, level: count%) : bool ## ## ipv6: If true, enable listening on IPv6 addresses. ## +## zone_id: If *ip* is a non-global IPv6 address, a particular :rfc:`4007` +## ``zone_id`` can given here. An empty string, ``""``, means +## not to add any ``zone_id``. +## ## retry_interval: If address *ip* is found to be already in use, this is ## the interval at which to automatically retry binding. ## ## Returns: True on success. ## ## .. bro:see:: connect disconnect -function listen%(ip: addr, p: port, ssl: bool, ipv6: bool, retry_interval: interval%) : bool +function listen%(ip: addr, p: port, ssl: bool, ipv6: bool, zone_id: string, retry_interval: interval%) : bool %{ - return new Val(remote_serializer->Listen(ip->AsAddr(), p->Port(), ssl, ipv6, retry_interval), TYPE_BOOL); + return new Val(remote_serializer->Listen(ip->AsAddr(), p->Port(), ssl, ipv6, zone_id->CheckString(), retry_interval), TYPE_BOOL); %} ## Checks whether the last raised event came from a remote peer. diff --git a/src/util.cc b/src/util.cc index 90143923f1..798be400d1 100644 --- a/src/util.cc +++ b/src/util.cc @@ -376,6 +376,8 @@ template int atoi_n(int len, const char* s, const char** end, int base, // Instantiate the ones we need. template int atoi_n(int len, const char* s, const char** end, int base, int& result); +template int atoi_n(int len, const char* s, const char** end, int base, uint16_t& result); +template int atoi_n(int len, const char* s, const char** end, int base, uint32_t& result); template int atoi_n(int len, const char* s, const char** end, int base, int64_t& result); template int atoi_n(int len, const char* s, const char** end, int base, uint64_t& result); From ea2bd659f3b206eb4d6001c8f5b290b9e0cd3e06 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 17 May 2012 12:41:10 -0700 Subject: [PATCH 296/651] Adding target update-doc-sources to top-level Makefile that runs genDocSourcesList.sh. --- DocSourcesList.cmake | 144 ++++++++++++++++++++++++++++++++ Makefile | 3 + testing/btest/coverage/doc.test | 5 +- 3 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 DocSourcesList.cmake diff --git a/DocSourcesList.cmake b/DocSourcesList.cmake new file mode 100644 index 0000000000..1743b0258f --- /dev/null +++ b/DocSourcesList.cmake @@ -0,0 +1,144 @@ +# DO NOT EDIT +# This file is auto-generated from the genDocSourcesList.sh script. +# +# This is a list of Bro script sources for which to generate reST documentation. +# It will be included inline in the CMakeLists.txt found in the same directory +# in order to create Makefile targets that define how to generate reST from +# a given Bro script. +# +# Note: any path prefix of the script (2nd argument of rest_target macro) +# will be used to derive what path under scripts/ the generated documentation +# will be placed. + +set(psd ${PROJECT_SOURCE_DIR}/scripts) + +rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal) +rest_target(${psd} base/init-default.bro internal) +rest_target(${psd} base/init-bare.bro internal) + +rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/types.bif.bro) +rest_target(${psd} base/frameworks/cluster/main.bro) +rest_target(${psd} base/frameworks/cluster/nodes/manager.bro) +rest_target(${psd} base/frameworks/cluster/nodes/proxy.bro) +rest_target(${psd} base/frameworks/cluster/nodes/worker.bro) +rest_target(${psd} base/frameworks/cluster/setup-connections.bro) +rest_target(${psd} base/frameworks/communication/main.bro) +rest_target(${psd} base/frameworks/control/main.bro) +rest_target(${psd} base/frameworks/dpd/main.bro) +rest_target(${psd} base/frameworks/intel/main.bro) +rest_target(${psd} base/frameworks/logging/main.bro) +rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro) +rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro) +rest_target(${psd} base/frameworks/logging/writers/ascii.bro) +rest_target(${psd} base/frameworks/logging/writers/dataseries.bro) +rest_target(${psd} base/frameworks/metrics/cluster.bro) +rest_target(${psd} base/frameworks/metrics/main.bro) +rest_target(${psd} base/frameworks/metrics/non-cluster.bro) +rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro) +rest_target(${psd} base/frameworks/notice/actions/drop.bro) +rest_target(${psd} base/frameworks/notice/actions/email_admin.bro) +rest_target(${psd} base/frameworks/notice/actions/page.bro) +rest_target(${psd} base/frameworks/notice/actions/pp-alarms.bro) +rest_target(${psd} base/frameworks/notice/cluster.bro) +rest_target(${psd} base/frameworks/notice/extend-email/hostnames.bro) +rest_target(${psd} base/frameworks/notice/main.bro) +rest_target(${psd} base/frameworks/notice/weird.bro) +rest_target(${psd} base/frameworks/packet-filter/main.bro) +rest_target(${psd} base/frameworks/packet-filter/netstats.bro) +rest_target(${psd} base/frameworks/reporter/main.bro) +rest_target(${psd} base/frameworks/signatures/main.bro) +rest_target(${psd} base/frameworks/software/main.bro) +rest_target(${psd} base/protocols/conn/contents.bro) +rest_target(${psd} base/protocols/conn/inactivity.bro) +rest_target(${psd} base/protocols/conn/main.bro) +rest_target(${psd} base/protocols/dns/consts.bro) +rest_target(${psd} base/protocols/dns/main.bro) +rest_target(${psd} base/protocols/ftp/file-extract.bro) +rest_target(${psd} base/protocols/ftp/main.bro) +rest_target(${psd} base/protocols/ftp/utils-commands.bro) +rest_target(${psd} base/protocols/http/file-extract.bro) +rest_target(${psd} base/protocols/http/file-hash.bro) +rest_target(${psd} base/protocols/http/file-ident.bro) +rest_target(${psd} base/protocols/http/main.bro) +rest_target(${psd} base/protocols/http/utils.bro) +rest_target(${psd} base/protocols/irc/dcc-send.bro) +rest_target(${psd} base/protocols/irc/main.bro) +rest_target(${psd} base/protocols/smtp/entities-excerpt.bro) +rest_target(${psd} base/protocols/smtp/entities.bro) +rest_target(${psd} base/protocols/smtp/main.bro) +rest_target(${psd} base/protocols/ssh/main.bro) +rest_target(${psd} base/protocols/ssl/consts.bro) +rest_target(${psd} base/protocols/ssl/main.bro) +rest_target(${psd} base/protocols/ssl/mozilla-ca-list.bro) +rest_target(${psd} base/protocols/syslog/consts.bro) +rest_target(${psd} base/protocols/syslog/main.bro) +rest_target(${psd} base/utils/addrs.bro) +rest_target(${psd} base/utils/conn-ids.bro) +rest_target(${psd} base/utils/directions-and-hosts.bro) +rest_target(${psd} base/utils/files.bro) +rest_target(${psd} base/utils/numbers.bro) +rest_target(${psd} base/utils/paths.bro) +rest_target(${psd} base/utils/patterns.bro) +rest_target(${psd} base/utils/site.bro) +rest_target(${psd} base/utils/strings.bro) +rest_target(${psd} base/utils/thresholds.bro) +rest_target(${psd} policy/frameworks/communication/listen.bro) +rest_target(${psd} policy/frameworks/control/controllee.bro) +rest_target(${psd} policy/frameworks/control/controller.bro) +rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro) +rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro) +rest_target(${psd} policy/frameworks/metrics/conn-example.bro) +rest_target(${psd} policy/frameworks/metrics/http-example.bro) +rest_target(${psd} policy/frameworks/metrics/ssl-example.bro) +rest_target(${psd} policy/frameworks/software/version-changes.bro) +rest_target(${psd} policy/frameworks/software/vulnerable.bro) +rest_target(${psd} policy/integration/barnyard2/main.bro) +rest_target(${psd} policy/integration/barnyard2/types.bro) +rest_target(${psd} policy/misc/analysis-groups.bro) +rest_target(${psd} policy/misc/capture-loss.bro) +rest_target(${psd} policy/misc/loaded-scripts.bro) +rest_target(${psd} policy/misc/profiling.bro) +rest_target(${psd} policy/misc/stats.bro) +rest_target(${psd} policy/misc/trim-trace-file.bro) +rest_target(${psd} policy/protocols/conn/known-hosts.bro) +rest_target(${psd} policy/protocols/conn/known-services.bro) +rest_target(${psd} policy/protocols/conn/weirds.bro) +rest_target(${psd} policy/protocols/dns/auth-addl.bro) +rest_target(${psd} policy/protocols/dns/detect-external-names.bro) +rest_target(${psd} policy/protocols/ftp/detect.bro) +rest_target(${psd} policy/protocols/ftp/software.bro) +rest_target(${psd} policy/protocols/http/detect-MHR.bro) +rest_target(${psd} policy/protocols/http/detect-intel.bro) +rest_target(${psd} policy/protocols/http/detect-sqli.bro) +rest_target(${psd} policy/protocols/http/detect-webapps.bro) +rest_target(${psd} policy/protocols/http/header-names.bro) +rest_target(${psd} policy/protocols/http/software-browser-plugins.bro) +rest_target(${psd} policy/protocols/http/software.bro) +rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro) +rest_target(${psd} policy/protocols/http/var-extraction-uri.bro) +rest_target(${psd} policy/protocols/smtp/blocklists.bro) +rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro) +rest_target(${psd} policy/protocols/smtp/software.bro) +rest_target(${psd} policy/protocols/ssh/detect-bruteforcing.bro) +rest_target(${psd} policy/protocols/ssh/geo-data.bro) +rest_target(${psd} policy/protocols/ssh/interesting-hostnames.bro) +rest_target(${psd} policy/protocols/ssh/software.bro) +rest_target(${psd} policy/protocols/ssl/cert-hash.bro) +rest_target(${psd} policy/protocols/ssl/expiring-certs.bro) +rest_target(${psd} policy/protocols/ssl/extract-certs-pem.bro) +rest_target(${psd} policy/protocols/ssl/known-certs.bro) +rest_target(${psd} policy/protocols/ssl/validate-certs.bro) +rest_target(${psd} policy/tuning/defaults/packet-fragments.bro) +rest_target(${psd} policy/tuning/defaults/warnings.bro) +rest_target(${psd} policy/tuning/track-all-assets.bro) +rest_target(${psd} site/local-manager.bro) +rest_target(${psd} site/local-proxy.bro) +rest_target(${psd} site/local-worker.bro) +rest_target(${psd} site/local.bro) +rest_target(${psd} test-all-policy.bro) diff --git a/Makefile b/Makefile index 455fa6ed88..8633c736a4 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,9 @@ broxygen: configured broxygenclean: configured $(MAKE) -C $(BUILD) $@ +update-doc-sources: + ./doc/scripts/genDocSourcesList.sh ./doc/scripts/DocSourcesList.cmake + dist: @rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz @rm -rf $(VERSION_MIN) $(VERSION_MIN).tgz diff --git a/testing/btest/coverage/doc.test b/testing/btest/coverage/doc.test index 18ed13e6fa..d99122575d 100644 --- a/testing/btest/coverage/doc.test +++ b/testing/btest/coverage/doc.test @@ -1,7 +1,10 @@ # This tests that we're generating bro script documentation for all the # available bro scripts. If this fails, then the genDocSources.sh needs # to be run to produce a new DocSourcesList.cmake or genDocSources.sh needs -# to be updated to blacklist undesired scripts. +# to be updated to blacklist undesired scripts. To update, run the +# top-level Makefile: +# +# make update-doc-sources # # @TEST-EXEC: $DIST/doc/scripts/genDocSourcesList.sh # @TEST-EXEC: cmp $DIST/doc/scripts/DocSourcesList.cmake ./DocSourcesList.cmake From 1824808dcc73aa4a63ff48cf4de4b606042d1dda Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 17 May 2012 12:42:30 -0700 Subject: [PATCH 297/651] Updating submodule(s). [nomail] --- CHANGES | 26 ++++++++++++++++++++++++++ VERSION | 2 +- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- 6 files changed, 31 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index 6471dbe4be..9ea16475af 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,30 @@ +2.0-367 | 2012-05-17 12:42:30 -0700 + + * Adding support for binary output via DataSeries. See + logging-dataseries.rst for more information. (Gilbert Clark and + Robin Sommer) + + * Adding target update-doc-sources to top-level Makefile that runs + genDocSourcesList.sh. (Robin Sommer) + + * Moving trace for rotation test into traces directory. (Robin Sommer) + + * Fixing a rotation race condition at termination. (Robin Sommer) + + * Extending log post-processor call to include the name of the + writer. (Robin Sommer) + + * In threads, an internal error now immediately aborts. Otherwise, + the error won't make it back to the main thread for a while and + subsequent code in the thread would still execute. (Robin Sommer) + + * DataSeries cleanup. (Robin Sommer) + + * Fixing threads' DoFinish() method. It wasn't called reliably. Now, + it's always called before the thread is destroyed (assuming + processing has went normally so far). (Robin Sommer) + 2.0-341 | 2012-05-17 09:54:30 -0700 * Add a comment to explain the ICMPv6 error message types. (Daniel Thayer) diff --git a/VERSION b/VERSION index 358412b7a2..c3d8a81658 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-341 +2.0-367 diff --git a/aux/binpac b/aux/binpac index 71c37019bc..b4094cb75e 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 71c37019bc371eb7863fb6aa47a7daa4540f4f1f +Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 diff --git a/aux/bro-aux b/aux/bro-aux index d885987e79..2038e3de04 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit d885987e7968669e34504b0403ac89bd13928e9a +Subproject commit 2038e3de042115c3caa706426e16c830c1fd1e9e diff --git a/aux/broccoli b/aux/broccoli index 157c18427c..95c93494d7 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 157c18427cb9bb52564e65d8224b95f70dc79e66 +Subproject commit 95c93494d7192f69d30f208c4caa3bd38adda6fd diff --git a/aux/broctl b/aux/broctl index ed933502b4..ba9e1aa2f2 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit ed933502b4d2518f94b6cfa7a5b371e53fda5c3d +Subproject commit ba9e1aa2f2159deac0cf96863f54405643764df0 From 3fedd32f4de9dcdf430c7d2bd54d8a84a352f0fe Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 17 May 2012 14:47:09 -0500 Subject: [PATCH 298/651] Fix mobility checksums unit test. Was failing because it depended on weirds being sent to stderr. --- .../Baseline/core.mobility-checksums/bad.out | 27 ++++++++++++++++--- .../Baseline/core.mobility-checksums/good.out | 0 testing/btest/core/mobility-checksums.test | 20 +++++++++----- 3 files changed, 37 insertions(+), 10 deletions(-) delete mode 100644 testing/btest/Baseline/core.mobility-checksums/good.out diff --git a/testing/btest/Baseline/core.mobility-checksums/bad.out b/testing/btest/Baseline/core.mobility-checksums/bad.out index 6ea9955402..dfbd5006a9 100644 --- a/testing/btest/Baseline/core.mobility-checksums/bad.out +++ b/testing/btest/Baseline/core.mobility-checksums/bad.out @@ -1,3 +1,24 @@ -1333988844.893456 weird: bad_MH_checksum -1333995733.276730 weird: bad_TCP_checksum -1333995701.656496 weird: bad_UDP_checksum +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333988844.893456 - - - - - bad_MH_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333640536.489921 UWkUyAuUGXf 2001:78:1:32::1 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333640468.146461 UWkUyAuUGXf 2001:78:1:32::1 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F bro diff --git a/testing/btest/Baseline/core.mobility-checksums/good.out b/testing/btest/Baseline/core.mobility-checksums/good.out deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/testing/btest/core/mobility-checksums.test b/testing/btest/core/mobility-checksums.test index 1d41daf543..8a88eb8194 100644 --- a/testing/btest/core/mobility-checksums.test +++ b/testing/btest/core/mobility-checksums.test @@ -1,9 +1,15 @@ # @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/config.h -# @TEST-EXEC: bro -b -r $TRACES/chksums/mip6-bad-mh-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/mip6-good-mh-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -r $TRACES/chksums/mip6-bad-mh-chksum.pcap +# @TEST-EXEC: mv weird.log bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: rm weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/mip6-good-mh-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log # @TEST-EXEC: btest-diff bad.out -# @TEST-EXEC: btest-diff good.out From 90a1814a0a2257ca2aa8d04f7b07389bc207c00e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 17 May 2012 12:47:30 -0700 Subject: [PATCH 299/651] Linking in the DS docs. --- doc/logging.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/logging.rst b/doc/logging.rst index 30a793df7d..384996c28a 100644 --- a/doc/logging.rst +++ b/doc/logging.rst @@ -373,3 +373,13 @@ record, care must be given to when and how long data is stored. Normally data saved to the connection record will remain there for the duration of the connection and from a practical perspective it's not uncommon to need to delete that data before the end of the connection. + +Other Writers +------------- + +Bro support the following output formats other than ASCII: + +.. toctree:: + :maxdepth: 1 + + logging-dataseries From 5f3575425d6e485bc2fed9dfc295ae4fb191d0f7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 17 May 2012 15:26:28 -0500 Subject: [PATCH 300/651] Fix compile errors. Preprocess out DataSeries.cc based on config.h's USE_DATASERIES value and one reference to threading::Field needed scoping. --- src/logging/Manager.cc | 2 +- src/logging/writers/DataSeries.cc | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index f78e47da73..34d10a1abf 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -466,7 +466,7 @@ bool Manager::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, filter->fields = (threading::Field**) realloc(filter->fields, - sizeof(Field) * ++filter->num_fields); + sizeof(threading::Field) * ++filter->num_fields); if ( ! filter->fields ) { diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 9f19028be3..1d5a6ea4da 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -1,5 +1,9 @@ // See the file "COPYING" in the main distribution directory for copyright. +#include "config.h" + +#ifdef USE_DATASERIES + #include #include #include @@ -415,3 +419,5 @@ bool DataSeries::DoSetBuf(bool enabled) // DataSeries is *always* buffered to some degree. This option is ignored. return true; } + +#endif /* USE_DATASERIES */ From be65ddca375e906bd6d409d50fbe894c759bb32d Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 17 May 2012 16:03:17 -0500 Subject: [PATCH 301/651] Correct various errors in the BIF documentation --- src/bro.bif | 210 ++++++++++++++++++++++++------------------------ src/strings.bif | 50 ++++++------ 2 files changed, 131 insertions(+), 129 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index 15740a83c7..212a27044f 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -963,7 +963,7 @@ function sha256_hash_finish%(index: any%): string ## Generates a random number. ## -## max: The maximum value the random number. +## max: The maximum value of the random number. ## ## Returns: a random positive integer in the interval *[0, max)*. ## @@ -1020,7 +1020,7 @@ extern "C" { ## data: The data to find the MIME type for. ## ## return_mime: If true, the function returns a short MIME type string (e.g., -## ``text/plain`` instead of a more elaborate textual description. +## ``text/plain`` instead of a more elaborate textual description). ## ## Returns: The MIME type of *data*. function identify_data%(data: string, return_mime: bool%): string @@ -1241,8 +1241,6 @@ function unique_id_from%(pool: int, prefix: string%) : string ## Removes all elements from a set or table. ## ## v: The set or table -## -## Returns: The cleared set/table or 0 if *v* is not a set/table type. function clear_table%(v: any%): any %{ if ( v->Type()->Tag() == TYPE_TABLE ) @@ -1290,7 +1288,7 @@ function same_object%(o1: any, o2: any%): bool return new Val(o1 == o2, TYPE_BOOL); %} -## Returns the number bytes that a value occupies in memory. +## Returns the number of bytes that a value occupies in memory. ## ## v: The value ## @@ -1306,7 +1304,7 @@ function val_size%(v: any%): count ## ## newsize: The new size of *aggr*. ## -## Returns: The old size of *aggr* and 0 if *aggr* is not a :bro:type:`vector`. +## Returns: The old size of *aggr*, or 0 if *aggr* is not a :bro:type:`vector`. function resize%(aggr: any, newsize: count%) : count %{ if ( aggr->Type()->Tag() != TYPE_VECTOR ) @@ -1423,7 +1421,7 @@ bool indirect_int_sort_function(int a, int b) %%} ## Sorts a vector in place. The second argument is a comparison function that -## takes two arguments: if the vector type is \verb|vector of T|, then the +## takes two arguments: if the vector type is ``vector of T``, then the ## comparison function must be ``function(a: T, b: T): bool``, which returns ## ``a < b`` for some type-specific notion of the less-than operator. ## @@ -1599,7 +1597,7 @@ function cat%(...%): string ## given argument. If any of the variable arguments is an empty string it is ## replaced by a given default string instead. ## -## sep: The separator to place betwen each argument. +## sep: The separator to place between each argument. ## ## def: The default string to use when an argument is the empty string. ## @@ -1657,7 +1655,7 @@ function cat_sep%(sep: string, def: string, ...%): string ## ## - ``[DT]``: ISO timestamp with microsecond precision ## -## - ``d``: Signed/Unsigned integer (using C-style ``%lld|``/``%llu`` +## - ``d``: Signed/Unsigned integer (using C-style ``%lld``/``%llu`` ## for ``int``/``count``) ## ## - ``x``: Unsigned hexadecimal (using C-style ``%llx``); @@ -1782,7 +1780,7 @@ function log10%(d: double%): double # =========================================================================== ## Determines whether *c* has been received externally. For example, -## Broccoli or the Time Machine can send packets to Bro via a mechanism that +## Broccoli or the Time Machine can send packets to Bro via a mechanism that is ## one step lower than sending events. This function checks whether the packets ## of a connection stem from one of these external *packet sources*. ## @@ -1796,7 +1794,7 @@ function is_external_connection%(c: connection%) : bool ## Returns the ID of the analyzer which raised the current event. ## -## Returns: The ID of the analyzer which raised hte current event, or 0 if +## Returns: The ID of the analyzer which raised the current event, or 0 if ## none. function current_analyzer%(%) : count %{ @@ -2053,7 +2051,7 @@ function get_gap_summary%(%): gap_info %} ## Generates a table of the size of all global variables. The table index is -## the variable name and the value the variable size in bytes. +## the variable name and the value is the variable size in bytes. ## ## Returns: A table that maps variable names to their sizes. ## @@ -2138,7 +2136,7 @@ function lookup_ID%(id: string%) : any return i->ID_Val()->Ref(); %} -## Generates meta data about a record fields. The returned information +## Generates metadata about a record's fields. The returned information ## includes the field name, whether it is logged, its value (if it has one), ## and its default value (if specified). ## @@ -2269,11 +2267,11 @@ function dump_rule_stats%(f: file%): bool return new Val(1, TYPE_BOOL); %} -## Checks wheter Bro is terminating. +## Checks if Bro is terminating. ## ## Returns: True if Bro is in the process of shutting down. ## -## .. bro:see: terminate +## .. bro:see:: terminate function bro_is_terminating%(%): bool %{ return new Val(terminating, TYPE_BOOL); @@ -2354,7 +2352,7 @@ function routing0_data_to_addrs%(s: string%): addr_vec return rval; %} -## Converts a :bro:type:`addr` to a :bro:type:`index_vec`. +## Converts an :bro:type:`addr` to an :bro:type:`index_vec`. ## ## a: The address to convert into a vector of counts. ## @@ -2374,7 +2372,7 @@ function addr_to_counts%(a: addr%): index_vec return rval; %} -## Converts a :bro:type:`index_vec` to a :bro:type:`addr`. +## Converts an :bro:type:`index_vec` to an :bro:type:`addr`. ## ## v: The vector containing host-order IP address representation, ## one element for IPv4 addresses, four elements for IPv6 addresses. @@ -2404,7 +2402,7 @@ function counts_to_addr%(v: index_vec%): addr } %} -## Converts a :bro:type:`string` to a :bro:type:`int`. +## Converts a :bro:type:`string` to an :bro:type:`int`. ## ## str: The :bro:type:`string` to convert. ## @@ -2434,7 +2432,7 @@ function to_int%(str: string%): int ## ## n: The :bro:type:`int` to convert. ## -## Returns: The :bro:type:`int` *n* as unsigned integer or 0 if *n* < 0. +## Returns: The :bro:type:`int` *n* as unsigned integer, or 0 if *n* < 0. function int_to_count%(n: int%): count %{ if ( n < 0 ) @@ -2449,7 +2447,7 @@ function int_to_count%(n: int%): count ## ## d: The :bro:type:`double` to convert. ## -## Returns: The :bro:type:`double` *d* as unsigned integer or 0 if *d* < 0.0. +## Returns: The :bro:type:`double` *d* as unsigned integer, or 0 if *d* < 0.0. ## ## .. bro:see:: double_to_time function double_to_count%(d: double%): count @@ -2464,8 +2462,8 @@ function double_to_count%(d: double%): count ## ## str: The :bro:type:`string` to convert. ## -## Returns: The :bro:type:`string` *str* as unsigned integer or if in invalid -## format. +## Returns: The :bro:type:`string` *str* as unsigned integer, or 0 if *str* has +## an invalid format. ## ## .. bro:see:: to_addr to_int to_port to_subnet function to_count%(str: string%): count @@ -2498,7 +2496,7 @@ function interval_to_double%(i: interval%): double ## Converts a :bro:type:`time` value to a :bro:type:`double`. ## -## t: The :bro:type:`interval` to convert. +## t: The :bro:type:`time` to convert. ## ## Returns: The :bro:type:`time` value *t* as :bro:type:`double`. ## @@ -2508,11 +2506,11 @@ function time_to_double%(t: time%): double return new Val(t, TYPE_DOUBLE); %} -## Converts a :bro:type:`time` value to a :bro:type:`double`. +## Converts a :bro:type:`double` value to a :bro:type:`time`. ## -## t: The :bro:type:`interval` to convert. +## d: The :bro:type:`double` to convert. ## -## Returns: The :bro:type:`time` value *t* as :bro:type:`double`. +## Returns: The :bro:type:`double` value *d* as :bro:type:`time`. ## ## .. bro:see:: time_to_double double_to_count function double_to_time%(d: double%): time @@ -2550,7 +2548,7 @@ function port_to_count%(p: port%): count ## ## proto: The transport protocol. ## -## Returns: The :bro:type:`count` *c* as :bro:type:`port`. +## Returns: The :bro:type:`count` *num* as :bro:type:`port`. ## ## .. bro:see:: port_to_count function count_to_port%(num: count, proto: transport_proto%): port @@ -2562,7 +2560,7 @@ function count_to_port%(num: count, proto: transport_proto%): port ## ## ip: The :bro:type:`string` to convert. ## -## Returns: The :bro:type:`string` *ip* as :bro:type:`addr` or the unspecified +## Returns: The :bro:type:`string` *ip* as :bro:type:`addr`, or the unspecified ## address ``::`` if the input string does not parse correctly. ## ## .. bro:see:: to_count to_int to_port count_to_v4_addr raw_bytes_to_v4_addr @@ -2579,7 +2577,7 @@ function to_addr%(ip: string%): addr ## ## sn: The subnet to convert. ## -## Returns: The *sn* string as a :bro:type:`subnet` or the unspecified subnet +## Returns: The *sn* string as a :bro:type:`subnet`, or the unspecified subnet ## ``::/0`` if the input string does not parse correctly. ## ## .. bro:see:: to_count to_int to_port count_to_v4_addr raw_bytes_to_v4_addr @@ -2616,7 +2614,7 @@ function count_to_v4_addr%(ip: count%): addr ## ## b: The raw bytes (:bro:type:`string`) to convert. ## -## Returns: The byte :bro:type:`string` *ip* as :bro:type:`addr`. +## Returns: The byte :bro:type:`string` *b* as :bro:type:`addr`. ## ## .. bro:see:: raw_bytes_to_v4_addr to_addr to_subnet function raw_bytes_to_v4_addr%(b: string%): addr @@ -2635,7 +2633,7 @@ function raw_bytes_to_v4_addr%(b: string%): addr return new AddrVal(htonl(a)); %} -## Converts a :bro:type:`string` to an :bro:type:`port`. +## Converts a :bro:type:`string` to a :bro:type:`port`. ## ## s: The :bro:type:`string` to convert. ## @@ -2885,7 +2883,7 @@ function parse_ftp_port%(s: string%): ftp_port %} ## Converts a string representation of the FTP EPRT command to an ``ftp_port``. -## (see `RFC 2428 `_). +## See `RFC 2428 `_. ## The format is ``EPRT``, ## where ```` is a delimiter in the ASCII range 33-126 (usually ``|``). ## @@ -2976,7 +2974,7 @@ function fmt_ftp_port%(a: addr, p: port%): string ## Decode a NetBIOS name. See http://support.microsoft.com/kb/194203. ## -## name: The encoded NetBIOS name, e.g., ``"FEEIEFCAEOEFFEECEJEPFDCAEOEBENEF:``. +## name: The encoded NetBIOS name, e.g., ``"FEEIEFCAEOEFFEECEJEPFDCAEOEBENEF"``. ## ## Returns: The decoded NetBIOS name, e.g., ``"THE NETBIOS NAME"``. ## @@ -3009,7 +3007,7 @@ function decode_netbios_name%(name: string%): string return new StringVal(i, result); %} -## Converts a NetBIOS name type to its corresonding numeric value. +## Converts a NetBIOS name type to its corresponding numeric value. ## See http://support.microsoft.com/kb/163409. ## ## name: The NetBIOS name type. @@ -3029,7 +3027,7 @@ function decode_netbios_name_type%(name: string%): count ## ## bytestring: The string of bytes. ## -## Returns: The hexadecimal reprsentation of *bytestring*. +## Returns: The hexadecimal representation of *bytestring*. ## ## .. bro:see:: hexdump function bytestring_to_hexstr%(bytestring: string%): string @@ -3069,7 +3067,7 @@ function decode_base64%(s: string%): string ## s: The Base64-encoded string. ## ## a: The custom alphabet. The empty string indicates the default alphabet. The -## lengh of *a* must bt 64. For example, a custom alphabet could be +## length of *a* must be 64. For example, a custom alphabet could be ## ``"!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"``. ## ## Returns: The decoded version of *s*. @@ -3138,7 +3136,7 @@ function uuid_to_string%(uuid: string%): string ## ## p2: The second pattern. ## -## Returns: The compiled pattern of the concatentation of *p1* and *p2*. +## Returns: The compiled pattern of the concatenation of *p1* and *p2*. ## ## .. bro:see:: convert_for_pattern string_to_pattern ## @@ -3277,7 +3275,7 @@ function strftime%(fmt: string, d: time%) : string ## a: The address to mask. ## ## top_bits_to_keep: The number of top bits to keep in *a*; must be greater -## than 0 and less than 33. +## than 0 and less than 33 for IPv4, or 129 for IPv6. ## ## Returns: The address *a* masked down to *top_bits_to_keep* bits. ## @@ -3341,7 +3339,7 @@ function is_udp_port%(p: port%): bool ## ## p: The :bro:type:`port` to check. ## -## Returns: True iff *p* is a ICMP port. +## Returns: True iff *p* is an ICMP port. ## ## .. bro:see:: is_tcp_port is_udp_port function is_icmp_port%(p: port%): bool @@ -3383,7 +3381,7 @@ EnumVal* map_conn_type(TransportProto tp) ## ## cid: The connection identifier. ## -## Returns: The transport protocol of the connection identified by *id*. +## Returns: The transport protocol of the connection identified by *cid*. ## ## .. bro:see:: get_port_transport_proto ## get_orig_seq get_resp_seq @@ -3497,7 +3495,7 @@ const char* conn_id_string(Val* c) ## ## c: The HTTP connection. ## -## is_orig: If true, the client data is skipped and the server data otherwise. +## is_orig: If true, the client data is skipped, and the server data otherwise. ## ## .. bro:see:: skip_smtp_data function skip_http_entity_data%(c: connection, is_orig: bool%): any @@ -3572,7 +3570,7 @@ function dump_current_packet%(file_name: string%) : bool ## Returns the currently processed PCAP packet. ## -## Returns: The currently processed packet, which is as a record +## Returns: The currently processed packet, which is a record ## containing the timestamp, ``snaplen``, and packet data. ## ## .. bro:see:: dump_current_packet dump_packet send_current_packet @@ -3730,7 +3728,7 @@ function lookup_addr%(host: addr%) : string ## ## host: The hostname to lookup. ## -## Returns: A set of DNS A records associated with *host*. +## Returns: A set of DNS A and AAAA records associated with *host*. ## ## .. bro:see:: lookup_addr function lookup_hostname%(host: string%) : addr_set @@ -3897,6 +3895,7 @@ function lookup_location%(a: addr%) : geo_location %} ## Performs an AS lookup of an IP address. +## Requires Bro to be built with ``libgeoip``. ## ## a: The IP address to lookup. ## @@ -4096,7 +4095,7 @@ function x509_err2str%(err_num: count%): string ## Converts UNIX file permissions given by a mode to an ASCII string. ## -## mode: The permisssions, e.g., 644 or 755. +## mode: The permissions (an octal number like 0644 converted to decimal). ## ## Returns: A string representation of *mode* in the format ## ``rw[xsS]rw[xsS]rw[xtT]``. @@ -4273,7 +4272,7 @@ function analyzer_name%(aid: count%) : string ## ## cid: The connection ID. ## -## Returns: False if *id* does not point to an active connection and true +## Returns: False if *cid* does not point to an active connection, and true ## otherwise. ## ## .. note:: @@ -4295,10 +4294,10 @@ function skip_further_processing%(cid: conn_id%): bool ## ## cid: The connection identifier. ## -## do_record: True to enable packet contens and false to disable for the +## do_record: True to enable packet contents, and false to disable for the ## connection identified by *cid*. ## -## Returns: False if *id* does not point to an active connection and true +## Returns: False if *cid* does not point to an active connection, and true ## otherwise. ## ## .. bro:see:: skip_further_processing @@ -4309,7 +4308,7 @@ function skip_further_processing%(cid: conn_id%): bool ## connection, which is controlled separately by ## :bro:id:`skip_further_processing`. ## -## .. bro:see: get_contents_file set_contents_file +## .. bro:see:: get_contents_file set_contents_file function set_record_packets%(cid: conn_id, do_record: bool%): bool %{ Connection* c = sessions->FindConnection(cid); @@ -4326,7 +4325,7 @@ function set_record_packets%(cid: conn_id, do_record: bool%): bool ## cid: The connection ID. ## ## direction: Controls what sides of the connection to record. The argument can -## take one the four values: +## take one of the four values: ## ## - ``CONTENTS_NONE``: Stop recording the connection's content. ## - ``CONTENTS_ORIG``: Record the data sent by the connection @@ -4340,7 +4339,7 @@ function set_record_packets%(cid: conn_id, do_record: bool%): bool ## ## f: The file handle of the file to write the contents to. ## -## Returns: Returns false if *id* does not point to an active connection and +## Returns: Returns false if *cid* does not point to an active connection, and ## true otherwise. ## ## .. note:: @@ -4351,7 +4350,7 @@ function set_record_packets%(cid: conn_id, do_record: bool%): bool ## missing data; this can happen, e.g., due to an ## :bro:id:`ack_above_hole` event. ## -## .. bro:see: get_contents_file set_record_packets +## .. bro:see:: get_contents_file set_record_packets function set_contents_file%(cid: conn_id, direction: count, f: file%): bool %{ Connection* c = sessions->FindConnection(cid); @@ -4366,15 +4365,15 @@ function set_contents_file%(cid: conn_id, direction: count, f: file%): bool ## ## cid: The connection ID. ## -## direction: Controls what sides of the connection to record. SEe +## direction: Controls what sides of the connection to record. See ## :bro:id:`set_contents_file` for possible values. ## -## Returns: The :bro:type:`file` handle for the contentents file of the +## Returns: The :bro:type:`file` handle for the contents file of the ## connection identified by *cid*. If the connection exists -## but no contents file for *direction*, the function generates a -## error and returns a file handle to ``stderr``. +## but there is no contents file for *direction*, then the function +## generates an error and returns a file handle to ``stderr``. ## -## .. bro:see: set_contents_file set_record_packets +## .. bro:see:: set_contents_file set_record_packets function get_contents_file%(cid: conn_id, direction: count%): file %{ Connection* c = sessions->FindConnection(cid); @@ -4425,7 +4424,7 @@ function set_inactivity_timeout%(cid: conn_id, t: interval%): interval ## ## - ``LOGIN_STATE_AUTHENTICATE``: The connection is in its ## initial authentication dialog. -## - ``OGIN_STATE_LOGGED_IN``: The analyzer believes the user has +## - ``LOGIN_STATE_LOGGED_IN``: The analyzer believes the user has ## successfully authenticated. ## - ``LOGIN_STATE_SKIP``: The analyzer has skipped any further ## processing of the connection. @@ -4433,7 +4432,7 @@ function set_inactivity_timeout%(cid: conn_id, t: interval%): interval ## does not correctly know the state of the connection, and/or ## the username associated with it. ## -## .. bro:see: set_login_state +## .. bro:see:: set_login_state function get_login_state%(cid: conn_id%): count %{ Connection* c = sessions->FindConnection(cid); @@ -4456,9 +4455,9 @@ function get_login_state%(cid: conn_id%): count ## :bro:id:`get_login_state` for possible values. ## ## Returns: Returns false if *cid* is not an active connection -## or does not tagged as login analyzer, and true otherwise. +## or is not tagged as a login analyzer, and true otherwise. ## -## .. bro:see: get_login_state +## .. bro:see:: get_login_state function set_login_state%(cid: conn_id, new_state: count%): bool %{ Connection* c = sessions->FindConnection(cid); @@ -4590,9 +4589,9 @@ function disable_event_group%(group: string%) : any ## ## f: The path to the file. ## -## Returns: A :bro:type:`file` handle for subsequent operations. +## Returns: A :bro:type:`file` handle for subsequent operations. ## -## .. bro:see;: active_file open_for_append close write_file +## .. bro:see:: active_file open_for_append close write_file ## get_file_name set_buf flush_all mkdir enable_raw_output function open%(f: string%): file %{ @@ -4609,9 +4608,9 @@ function open%(f: string%): file ## ## f: The path to the file. ## -## Returns: A :bro:type:`file` handle for subsequent operations. +## Returns: A :bro:type:`file` handle for subsequent operations. ## -## .. bro:see;: active_file open close write_file +## .. bro:see:: active_file open close write_file ## get_file_name set_buf flush_all mkdir enable_raw_output function open_for_append%(f: string%): file %{ @@ -4619,13 +4618,12 @@ function open_for_append%(f: string%): file %} ## Closes an open file and flushes any buffered content. -## exists, this function appends to it (as opposed to :bro:id:`open`). ## ## f: A :bro:type:`file` handle to an open file. ## -## Returns: True on success. +## Returns: True on success. ## -## .. bro:see;: active_file open open_for_append write_file +## .. bro:see:: active_file open open_for_append write_file ## get_file_name set_buf flush_all mkdir enable_raw_output function close%(f: file%): bool %{ @@ -4638,9 +4636,9 @@ function close%(f: file%): bool ## ## data: The data to write to *f*. ## -## Returns: True on success. +## Returns: True on success. ## -## .. bro:see;: active_file open open_for_append close +## .. bro:see:: active_file open open_for_append close ## get_file_name set_buf flush_all mkdir enable_raw_output function write_file%(f: file, data: string%): bool %{ @@ -4656,11 +4654,11 @@ function write_file%(f: file, data: string%): bool ## f: A :bro:type:`file` handle to an open file. ## ## buffered: When true, *f* is fully buffered, i.e., bytes are saved in a -## buffered until the block size has been reached. When +## buffer until the block size has been reached. When ## false, *f* is line buffered, i.e., bytes are saved up until a ## newline occurs. ## -## .. bro:see;: active_file open open_for_append close +## .. bro:see:: active_file open open_for_append close ## get_file_name write_file flush_all mkdir enable_raw_output function set_buf%(f: file, buffered: bool%): any %{ @@ -4670,9 +4668,9 @@ function set_buf%(f: file, buffered: bool%): any ## Flushes all open files to disk. ## -## Returns: True on success. +## Returns: True on success. ## -## .. bro:see;: active_file open open_for_append close +## .. bro:see:: active_file open open_for_append close ## get_file_name write_file set_buf mkdir enable_raw_output function flush_all%(%): bool %{ @@ -4683,10 +4681,10 @@ function flush_all%(%): bool ## ## f: The directory name. ## -## Returns: Returns true if the operation succeeds and false if the +## Returns: Returns true if the operation succeeds, or false if the ## creation fails or if *f* exists already. ## -## .. bro:see;: active_file open_for_append close write_file +## .. bro:see:: active_file open_for_append close write_file ## get_file_name set_buf flush_all enable_raw_output function mkdir%(f: string%): bool %{ @@ -4731,7 +4729,7 @@ function get_file_name%(f: file%): string ## ## f: An open file handle. ## -## Returns: Rotations statistics which include the original file name, the name +## Returns: Rotation statistics which include the original file name, the name ## after the rotation, and the time when *f* was opened/closed. ## ## .. bro:see:: rotate_file_by_name calc_next_rotate @@ -4755,7 +4753,7 @@ function rotate_file%(f: file%): rotate_info ## ## f: The name of the file to rotate ## -## Returns: Rotations statistics which include the original file name, the name +## Returns: Rotation statistics which include the original file name, the name ## after the rotation, and the time when *f* was opened/closed. ## ## .. bro:see:: rotate_file calc_next_rotate @@ -4851,7 +4849,7 @@ function disable_print_hook%(f: file%): any return 0; %} -## Prevents escaping of non-ASCII character when writing to a file. +## Prevents escaping of non-ASCII characters when writing to a file. ## This function is equivalent to :bro:attr:`&disable_print_hook`. ## ## f: The file to disable raw output for. @@ -5213,9 +5211,9 @@ function checkpoint_state%(%) : bool return new Val(persistence_serializer->WriteState(true), TYPE_BOOL); %} -## Reads persistent state from the \texttt{.state} directory and populates the -## in-memory data structures accordingly. This function is the dual to -## :bro:id:`checkpoint_state`. +## Reads persistent state and populates the in-memory data structures +## accordingly. Persistent state is read from the ``.state`` directory. +## This function is the dual to :bro:id:`checkpoint_state`. ## ## Returns: True on success. ## @@ -5267,16 +5265,16 @@ function capture_state_updates%(filename: string%) : bool ## ## ip: The IP address of the remote peer. ## -## port: The port of the remote peer. +## p: The port of the remote peer. ## -## our_class: If an non-empty string, the remote (listening) peer checks it +## our_class: If a non-empty string, then the remote (listening) peer checks it ## against its class name in its peer table and terminates the ## connection if they don't match. ## ## retry: If the connection fails, try to reconnect with the peer after this ## time interval. ## -## ssl: If true, uses SSL to encrypt the session. +## ssl: If true, use SSL to encrypt the session. ## ## Returns: A locally unique ID of the new peer. ## @@ -5299,7 +5297,7 @@ function connect%(ip: addr, p: port, our_class: string, retry: interval, ssl: bo ## Terminate the connection with a peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## Returns: True on success. ## @@ -5313,7 +5311,7 @@ function disconnect%(p: event_peer%) : bool ## Subscribes to all events from a remote peer whose names match a given ## pattern. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## handlers: The pattern describing the events to request from peer *p*. ## @@ -5331,7 +5329,7 @@ function request_remote_events%(p: event_peer, handlers: pattern%) : bool ## Requests synchronization of IDs with a remote peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## auth: If true, the local instance considers its current state authoritative ## and sends it to *p* right after the handshake. @@ -5349,7 +5347,7 @@ function request_remote_sync%(p: event_peer, auth: bool%) : bool ## Requests logs from a remote peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## Returns: True on success. ## @@ -5361,9 +5359,11 @@ function request_remote_logs%(p: event_peer%) : bool return new Val(remote_serializer->RequestLogs(id), TYPE_BOOL); %} -## Sets a boolean flag whether Bro accepts state from a remote peer. +## Sets a boolean flag indicating whether Bro accepts state from a remote peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. +## +## accept: True if Bro accepts state from peer *p*, or false otherwise. ## ## Returns: True on success. ## @@ -5379,7 +5379,7 @@ function set_accept_state%(p: event_peer, accept: bool%) : bool ## Sets the compression level of the session with a remote peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## level: Allowed values are in the range *[0, 9]*, where 0 is the default and ## means no compression. @@ -5394,11 +5394,11 @@ function set_compression_level%(p: event_peer, level: count%) : bool TYPE_BOOL); %} -## Listens on address a given IP address and port for remote connections. +## Listens on a given IP address and port for remote connections. ## ## ip: The IP address to bind to. ## -## p: The TCP port to listen to. +## p: The TCP port to listen on. ## ## ssl: If true, Bro uses SSL to encrypt the session. ## @@ -5420,7 +5420,7 @@ function is_remote_event%(%) : bool ## Sends all persistent state to a remote peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## Returns: True on success. ## @@ -5431,10 +5431,10 @@ function send_state%(p: event_peer%) : bool return new Val(persistence_serializer->SendState(id, true), TYPE_BOOL); %} -## Sends a global identifier to a remote peer, which them might install it +## Sends a global identifier to a remote peer, which then might install it ## locally. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## id: The identifier to send. ## @@ -5468,7 +5468,7 @@ function terminate_communication%(%) : bool ## Signals a remote peer that the local Bro instance finished the initial ## handshake. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## Returns: True on success. function complete_handshake%(p: event_peer%) : bool @@ -5481,7 +5481,7 @@ function complete_handshake%(p: event_peer%) : bool ## for :bro:id:`remote_pong`, this function can be used to measure latency ## between two peers. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## seq: A sequence number (also included by :bro:id:`remote_pong`). ## @@ -5496,7 +5496,7 @@ function send_ping%(p: event_peer, seq: count%) : bool ## Sends the currently processed packet to a remote peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## Returns: True if sending the packet succeeds. ## @@ -5522,7 +5522,7 @@ function send_current_packet%(p: event_peer%) : bool ## Returns the peer who generated the last event. ## -## Returns: The ID of the peer who genereated the last event. +## Returns: The ID of the peer who generated the last event. ## ## .. bro:see:: get_local_event_peer function get_event_peer%(%) : event_peer @@ -5565,7 +5565,7 @@ function get_local_event_peer%(%) : event_peer ## Sends a capture filter to a remote peer. ## -## p: The peer ID return from :bro:id:`connect`. +## p: The peer ID returned from :bro:id:`connect`. ## ## s: The capture filter. ## @@ -5582,7 +5582,7 @@ function send_capture_filter%(p: event_peer, s: string%) : bool ## distributed trace processing with communication enabled ## (*pseudo-realtime* mode). ## -## .. bro:see: continue_processing suspend_state_updates resume_state_updates +## .. bro:see:: continue_processing suspend_state_updates resume_state_updates function suspend_processing%(%) : any %{ net_suspend_processing(); @@ -5591,7 +5591,7 @@ function suspend_processing%(%) : any ## Resumes Bro's packet processing. ## -## .. bro:see: suspend_processing suspend_state_updates resume_state_updates +## .. bro:see:: suspend_processing suspend_state_updates resume_state_updates function continue_processing%(%) : any %{ net_continue_processing(); @@ -5600,7 +5600,7 @@ function continue_processing%(%) : any ## Stops propagating :bro:attr:`&synchronized` accesses. ## -## .. bro:see: suspend_processing continue_processing resume_state_updates +## .. bro:see:: suspend_processing continue_processing resume_state_updates function suspend_state_updates%(%) : any %{ if ( remote_serializer ) @@ -5610,7 +5610,7 @@ function suspend_state_updates%(%) : any ## Resumes propagating :bro:attr:`&synchronized` accesses. ## -## .. bro:see: suspend_processing continue_processing suspend_state_updates +## .. bro:see:: suspend_processing continue_processing suspend_state_updates function resume_state_updates%(%) : any %{ if ( remote_serializer ) diff --git a/src/strings.bif b/src/strings.bif index ebe16529ea..27c11b4013 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -11,8 +11,8 @@ using namespace std; %%} -## Concates all arguments into a single string. The function takes a variable -## number of arguments of type string and stiches them together. +## Concatenates all arguments into a single string. The function takes a +## variable number of arguments of type string and stitches them together. ## ## Returns: The concatenation of all (string) arguments. ## @@ -157,9 +157,9 @@ function join_string_array%(sep: string, a: string_array%): string ## ## sep: The separator to place between each element. ## -## a: The :bro:type:`string_vec` (``vector of string``). +## vec: The :bro:type:`string_vec` (``vector of string``). ## -## Returns: The concatenation of all elements in *a*, with *sep* placed +## Returns: The concatenation of all elements in *vec*, with *sep* placed ## between each element. ## ## .. bro:see:: cat cat_sep string_cat cat_string_array cat_string_array_n @@ -219,7 +219,7 @@ function sort_string_array%(a: string_array%): string_array ## Returns an edited version of a string that applies a special ## "backspace character" (usually ``\x08`` for backspace or ``\x7f`` for DEL). -## For ## example, ``edit("hello there", "e")`` returns ``"llo t"``. +## For example, ``edit("hello there", "e")`` returns ``"llo t"``. ## ## arg_s: The string to edit. ## @@ -229,7 +229,7 @@ function sort_string_array%(a: string_array%): string_array ## the string. ## ## Returns: An edited version of *arg_s* where *arg_edit_char* triggers the -## deletetion of the last character. +## deletion of the last character. ## ## .. bro:see:: clean ## to_string_literal @@ -278,7 +278,7 @@ function byte_len%(s: string%): count return new Val(s->Len(), TYPE_COUNT); %} -## Get a substring of from a string, given a starting position length. +## Get a substring from a string, given a starting position and length. ## ## s: The string to obtain a substring from. ## @@ -486,10 +486,10 @@ function split%(str: string, re: pattern%): string_array return do_split(str, re, 0, 0, 0); %} -## Splits a string *once* into a a two-element array of strings according to a -## pattern. This function is the same as :bro:id:`split`, but * is only split -## once (if possible) at the earliest position and an array of two strings is -## returned. +## Splits a string *once* into a two-element array of strings according to a +## pattern. This function is the same as :bro:id:`split`, but *str* is only +## split once (if possible) at the earliest position and an array of two strings +## is returned. ## ## str: The string to split. ## @@ -518,7 +518,7 @@ function split1%(str: string, re: pattern%): string_array ## ## Returns: An array of strings where each two successive elements correspond ## to a substring in *str* of the part not matching *re* (odd-indexed) -## and thei part that matches *re* (even-indexed). +## and the part that matches *re* (even-indexed). ## ## .. bro:see:: split split1 split_n str_split function split_all%(str: string, re: pattern%): string_array @@ -568,7 +568,7 @@ function split_complete%(str: string, ## ## re: The pattern being replaced with *repl*. ## -## repl: The string that replacs *re*. +## repl: The string that replaces *re*. ## ## Returns: A copy of *str* with the first occurence of *re* replaced with ## *repl*. @@ -579,16 +579,16 @@ function sub%(str: string, re: pattern, repl: string%): string return do_sub(str, re, repl, 0); %} -## Substitutes a given replacement string for the all occurrences of a pattern +## Substitutes a given replacement string for all occurrences of a pattern ## in a given string. ## ## str: The string to perform the substitution in. ## ## re: The pattern being replaced with *repl*. ## -## repl: The string that replacs *re*. +## repl: The string that replaces *re*. ## -## Returns: A copy of *str* with all occurences of *re* replaced with *repl*. +## Returns: A copy of *str* with all occurrences of *re* replaced with *repl*. ## ## .. bro:see:: sub subst_string function gsub%(str: string, re: pattern, repl: string%): string @@ -597,7 +597,7 @@ function gsub%(str: string, re: pattern, repl: string%): string %} -## Lexicographically compares two string. +## Lexicographically compares two strings. ## ## s1: The first string. ## @@ -616,7 +616,7 @@ function strcmp%(s1: string, s2: string%): int ## ## little: The (smaller) string to find inside *big*. ## -## Returns: The location of *little* in *big* or 0 if *little* is not found in +## Returns: The location of *little* in *big*, or 0 if *little* is not found in ## *big*. ## ## .. bro:see:: find_all find_last @@ -685,7 +685,7 @@ function subst_string%(s: string, from: string, to: string%): string ## str: The string to convert to lowercase letters. ## ## Returns: A copy of the given string with the uppercase letters (as indicated -## by ``isascii`` and \verb|isupper|``) folded to lowercase +## by ``isascii`` and ``isupper``) folded to lowercase ## (via ``tolower``). ## ## .. bro:see:: to_upper is_ascii @@ -714,7 +714,7 @@ function to_lower%(str: string%): string ## str: The string to convert to uppercase letters. ## ## Returns: A copy of the given string with the lowercase letters (as indicated -## by ``isascii`` and \verb|islower|``) folded to uppercase +## by ``isascii`` and ``islower``) folded to uppercase ## (via ``toupper``). ## ## .. bro:see:: to_lower is_ascii @@ -744,7 +744,7 @@ function to_upper%(str: string%): string ## - ``NUL`` to ``\0`` ## - ``DEL`` to ``^?`` ## - values <= 26 to ``^[A-Z]`` -## - values not in *[32, 126]** to ``%XX`` +## - values not in *[32, 126]* to ``%XX`` ## ## If the string does not yet have a trailing NUL, one is added. ## @@ -765,7 +765,7 @@ function clean%(str: string%): string ## - ``NUL`` to ``\0`` ## - ``DEL`` to ``^?`` ## - values <= 26 to ``^[A-Z]`` -## - values not in *[32, 126]** to ``%XX`` +## - values not in *[32, 126]* to ``%XX`` ## ## str: The string to escape. ## @@ -831,14 +831,16 @@ function string_to_ascii_hex%(s: string%): string return new StringVal(new BroString(1, (u_char*) x, s->Len() * 2)); %} -## Uses the Smith Waterman algorithm to find similar/overlapping substrings. +## Uses the Smith-Waterman algorithm to find similar/overlapping substrings. ## See `Wikipedia `_. ## ## s1: The first string. ## ## s2: The second string. ## -## Returns: The result of the Smit Waterman algorithm calculation. +## params: Parameters for the Smith-Waterman algorithm. +## +## Returns: The result of the Smith-Waterman algorithm calculation. function str_smith_waterman%(s1: string, s2: string, params: sw_params%) : sw_substring_vec %{ SWParams sw_params(params->AsRecordVal()->Lookup(0)->AsCount(), From 5ab765b4b6643fa872889ef03f56604ba7748a2a Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 18 May 2012 11:23:09 -0500 Subject: [PATCH 302/651] Replace ip6_hdr_chain with ip6_ext_hdr in comments This fixes some warnings that were appearing when building the documentation. --- scripts/base/init-bare.bro | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 20ce7b8ff5..73f7d725d4 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -967,7 +967,7 @@ const IPPROTO_MOBILITY = 135; ##< IPv6 mobility header. ## Values extracted from an IPv6 extension header's (e.g. hop-by-hop or ## destination option headers) option field. ## -## .. bro:see:: ip6_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts +## .. bro:see:: ip6_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts type ip6_option: record { otype: count; ##< Option type. len: count; ##< Option data length. @@ -976,7 +976,7 @@ type ip6_option: record { ## Values extracted from an IPv6 Hop-by-Hop options extension header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain ip6_option +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option type ip6_hopopts: record { ## Protocol number of the next header (RFC 1700 et seq., IANA assigned ## number), e.g. :bro:id:`IPPROTO_ICMP`. @@ -989,7 +989,7 @@ type ip6_hopopts: record { ## Values extracted from an IPv6 Destination options extension header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain ip6_option +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option type ip6_dstopts: record { ## Protocol number of the next header (RFC 1700 et seq., IANA assigned ## number), e.g. :bro:id:`IPPROTO_ICMP`. @@ -1002,7 +1002,7 @@ type ip6_dstopts: record { ## Values extracted from an IPv6 Routing extension header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr type ip6_routing: record { ## Protocol number of the next header (RFC 1700 et seq., IANA assigned ## number), e.g. :bro:id:`IPPROTO_ICMP`. @@ -1019,7 +1019,7 @@ type ip6_routing: record { ## Values extracted from an IPv6 Fragment extension header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr type ip6_fragment: record { ## Protocol number of the next header (RFC 1700 et seq., IANA assigned ## number), e.g. :bro:id:`IPPROTO_ICMP`. @@ -1038,7 +1038,7 @@ type ip6_fragment: record { ## Values extracted from an IPv6 Authentication extension header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr type ip6_ah: record { ## Protocol number of the next header (RFC 1700 et seq., IANA assigned ## number), e.g. :bro:id:`IPPROTO_ICMP`. @@ -1057,7 +1057,7 @@ type ip6_ah: record { ## Values extracted from an IPv6 ESP extension header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr type ip6_esp: record { ## Security Parameters Index. spi: count; @@ -1067,7 +1067,7 @@ type ip6_esp: record { ## Values extracted from an IPv6 Mobility Binding Refresh Request message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_brr: record { ## Reserved. rsv: count; @@ -1077,7 +1077,7 @@ type ip6_mobility_brr: record { ## Values extracted from an IPv6 Mobility Home Test Init message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_hoti: record { ## Reserved. rsv: count; @@ -1089,7 +1089,7 @@ type ip6_mobility_hoti: record { ## Values extracted from an IPv6 Mobility Care-of Test Init message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_coti: record { ## Reserved. rsv: count; @@ -1101,7 +1101,7 @@ type ip6_mobility_coti: record { ## Values extracted from an IPv6 Mobility Home Test message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_hot: record { ## Home Nonce Index. nonce_idx: count; @@ -1115,7 +1115,7 @@ type ip6_mobility_hot: record { ## Values extracted from an IPv6 Mobility Care-of Test message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_cot: record { ## Care-of Nonce Index. nonce_idx: count; @@ -1129,7 +1129,7 @@ type ip6_mobility_cot: record { ## Values extracted from an IPv6 Mobility Binding Update message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_bu: record { ## Sequence number. seq: count; @@ -1149,7 +1149,7 @@ type ip6_mobility_bu: record { ## Values extracted from an IPv6 Mobility Binding Acknowledgement message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_back: record { ## Status. status: count; @@ -1165,7 +1165,7 @@ type ip6_mobility_back: record { ## Values extracted from an IPv6 Mobility Binding Error message. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg type ip6_mobility_be: record { ## Status. status: count; @@ -1177,7 +1177,7 @@ type ip6_mobility_be: record { ## Values extracted from an IPv6 Mobility header's message data. ## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr type ip6_mobility_msg: record { ## The type of message from the header's MH Type field. id: count; @@ -1201,7 +1201,7 @@ type ip6_mobility_msg: record { ## Values extracted from an IPv6 Mobility header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain +## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr type ip6_mobility_hdr: record { ## Protocol number of the next header (RFC 1700 et seq., IANA assigned ## number), e.g. :bro:id:`IPPROTO_ICMP`. @@ -1244,7 +1244,7 @@ type ip6_ext_hdr: record { ## Values extracted from an IPv6 header. ## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts +## .. bro:see:: pkt_hdr ip4_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts ## ip6_routing ip6_fragment ip6_ah ip6_esp type ip6_hdr: record { class: count; ##< Traffic class. From 5312b21d7bd4e19f7fbd8dffa6e0f6277014fb01 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 22 May 2012 15:18:33 -0500 Subject: [PATCH 303/651] Improve availability of IPv6 flow label in connection records. Without this change, flow labeling of connections over IPv6 are only available in the per-packet types of events (e.g. new_packet) in which header fields can be inspected, but now minimal tracking of the most recent flow label is done internally and that's available per-connection for all events that use connection record arguments. Specifically, this adds a "flow_label" field to the "endpoint" record type, which is used for both the "orig" and "resp" fields of "connection" records. The new "connection_flow_label_changed" event also allows tracking of changes in flow labels: it's raised each time one direction of the connection starts using a different label. --- scripts/base/init-bare.bro | 7 +- src/Conn.cc | 49 +++++++++++- src/Conn.h | 7 +- src/IP.h | 6 ++ src/Sessions.cc | 10 ++- src/Sessions.h | 2 +- src/event.bif | 14 ++++ .../Baseline/core.ipv6-flow-labels/output | 74 +++++++++++++++++++ testing/btest/core/ipv6-flow-labels.test | 32 ++++++++ 9 files changed, 192 insertions(+), 9 deletions(-) create mode 100644 testing/btest/Baseline/core.ipv6-flow-labels/output create mode 100644 testing/btest/core/ipv6-flow-labels.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 20ce7b8ff5..dadeab734a 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -178,9 +178,9 @@ type endpoint_stats: record { ## use ``count``. That should be changed. type AnalyzerID: count; -## Statistics about an endpoint. +## Statistics about a :bro:type:`connection` endpoint. ## -## todo::Where is this used? +## .. bro:see:: connection type endpoint: record { size: count; ##< Logical size of data sent (for TCP: derived from sequence numbers). ## Endpoint state. For TCP connection, one of the constants: @@ -194,6 +194,9 @@ type endpoint: record { ## Number of IP-level bytes sent. Only set if :bro:id:`use_conn_size_analyzer` is ## true. num_bytes_ip: count &optional; + ## The current IPv6 flow label that the connection endpoint is using. + ## Always 0 if the connection is over IPv4. + flow_label: count; }; # A connection. This is Bro's basic connection type describing IP- and diff --git a/src/Conn.cc b/src/Conn.cc index acf17fab3a..3835097b6a 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -111,7 +111,8 @@ unsigned int Connection::external_connections = 0; IMPLEMENT_SERIAL(Connection, SER_CONNECTION); -Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id) +Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, + uint32 flow) { sessions = s; key = k; @@ -122,6 +123,10 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id) orig_port = id->src_port; resp_port = id->dst_port; proto = TRANSPORT_UNKNOWN; + orig_flow_label = flow; + resp_flow_label = 0; + saw_first_orig_packet = 1; + saw_first_resp_packet = 0; conn_val = 0; login_conn = 0; @@ -323,10 +328,12 @@ RecordVal* Connection::BuildConnVal() RecordVal *orig_endp = new RecordVal(endpoint); orig_endp->Assign(0, new Val(0, TYPE_COUNT)); orig_endp->Assign(1, new Val(0, TYPE_COUNT)); + orig_endp->Assign(4, new Val(orig_flow_label, TYPE_COUNT)); RecordVal *resp_endp = new RecordVal(endpoint); resp_endp->Assign(0, new Val(0, TYPE_COUNT)); resp_endp->Assign(1, new Val(0, TYPE_COUNT)); + resp_endp->Assign(4, new Val(resp_flow_label, TYPE_COUNT)); conn_val->Assign(0, id_val); conn_val->Assign(1, orig_endp); @@ -675,6 +682,14 @@ void Connection::FlipRoles() resp_port = orig_port; orig_port = tmp_port; + bool tmp_bool = saw_first_resp_packet; + saw_first_resp_packet = saw_first_orig_packet; + saw_first_orig_packet = tmp_bool; + + uint32 tmp_flow = resp_flow_label; + resp_flow_label = orig_flow_label; + orig_flow_label = tmp_flow; + Unref(conn_val); conn_val = 0; @@ -882,3 +897,35 @@ void Connection::SetRootAnalyzer(TransportLayerAnalyzer* analyzer, PIA* pia) root_analyzer = analyzer; primary_PIA = pia; } + +void Connection::CheckFlowLabel(bool is_orig, uint32 flow_label) + { + uint32& my_flow_label = is_orig ? orig_flow_label : resp_flow_label; + + if ( my_flow_label != flow_label ) + { + if ( conn_val ) + { + RecordVal *endp = conn_val->Lookup(is_orig ? 1 : 2)->AsRecordVal(); + endp->Assign(4, new Val(flow_label, TYPE_COUNT)); + } + + if ( connection_flow_label_changed && + (is_orig ? saw_first_orig_packet : saw_first_resp_packet) ) + { + val_list* vl = new val_list(4); + vl->append(BuildConnVal()); + vl->append(new Val(is_orig, TYPE_BOOL)); + vl->append(new Val(my_flow_label, TYPE_COUNT)); + vl->append(new Val(flow_label, TYPE_COUNT)); + ConnectionEvent(connection_flow_label_changed, 0, vl); + } + + my_flow_label = flow_label; + } + + if ( is_orig ) + saw_first_orig_packet = 1; + else + saw_first_resp_packet = 1; + } diff --git a/src/Conn.h b/src/Conn.h index b3eb9013d0..7404721968 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -50,7 +50,8 @@ class Analyzer; class Connection : public BroObj { public: - Connection(NetSessions* s, HashKey* k, double t, const ConnID* id); + Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, + uint32 flow); virtual ~Connection(); // Invoked when connection is about to be removed. Use Ref(this) @@ -241,6 +242,8 @@ public: void SetUID(uint64 arg_uid) { uid = arg_uid; } + void CheckFlowLabel(bool is_orig, uint32 flow_label); + protected: Connection() { persistent = 0; } @@ -271,6 +274,7 @@ protected: IPAddr resp_addr; uint32 orig_port, resp_port; // in network order TransportProto proto; + uint32 orig_flow_label, resp_flow_label; // most recent IPv6 flow labels double start_time, last_time; double inactivity_timeout; RecordVal* conn_val; @@ -286,6 +290,7 @@ protected: unsigned int record_packets:1, record_contents:1; unsigned int persistent:1; unsigned int record_current_packet:1, record_current_content:1; + unsigned int saw_first_orig_packet:1, saw_first_resp_packet:1; // Count number of connections. static unsigned int total_connections; diff --git a/src/IP.h b/src/IP.h index 502ae857c0..c3a74b4a01 100644 --- a/src/IP.h +++ b/src/IP.h @@ -524,6 +524,12 @@ public: int DF() const { return ip4 ? ((ntohs(ip4->ip_off) & 0x4000) != 0) : 0; } + /** + * Returns value of an IPv6 header's flow label field or 0 if it's IPv4. + */ + uint32 FlowLabel() const + { return ip4 ? 0 : (ntohl(ip6->ip6_flow) & 0x000fffff); } + /** * Returns number of IP headers in packet (includes IPv6 extension headers). */ diff --git a/src/Sessions.cc b/src/Sessions.cc index 7da1f088de..4419936fbd 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -602,7 +602,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, conn = (Connection*) d->Lookup(h); if ( ! conn ) { - conn = NewConn(h, t, &id, data, proto); + conn = NewConn(h, t, &id, data, proto, ip_hdr->FlowLabel()); if ( conn ) d->Insert(h, conn); } @@ -623,7 +623,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, conn->Event(connection_reused, 0); Remove(conn); - conn = NewConn(h, t, &id, data, proto); + conn = NewConn(h, t, &id, data, proto, ip_hdr->FlowLabel()); if ( conn ) d->Insert(h, conn); } @@ -644,6 +644,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, int is_orig = (id.src_addr == conn->OrigAddr()) && (id.src_port == conn->OrigPort()); + conn->CheckFlowLabel(is_orig, ip_hdr->FlowLabel()); + Val* pkt_hdr_val = 0; if ( ipv6_ext_headers && ip_hdr->NumHeaders() > 1 ) @@ -1002,7 +1004,7 @@ void NetSessions::GetStats(SessionStats& s) const } Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, - const u_char* data, int proto) + const u_char* data, int proto, uint32 flow_label) { // FIXME: This should be cleaned up a bit, it's too protocol-specific. // But I'm not yet sure what the right abstraction for these things is. @@ -1058,7 +1060,7 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, id = &flip_id; } - Connection* conn = new Connection(this, k, t, id); + Connection* conn = new Connection(this, k, t, id, flow_label); conn->SetTransport(tproto); dpm->BuildInitialAnalyzerTree(tproto, conn, data); diff --git a/src/Sessions.h b/src/Sessions.h index 06c6057dbf..d29ab0eeab 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -142,7 +142,7 @@ protected: friend class TimerMgrExpireTimer; Connection* NewConn(HashKey* k, double t, const ConnID* id, - const u_char* data, int proto); + const u_char* data, int proto, uint32 flow_label); // Check whether the tag of the current packet is consistent with // the given connection. Returns: diff --git a/src/event.bif b/src/event.bif index ded054dd53..80bb46e561 100644 --- a/src/event.bif +++ b/src/event.bif @@ -401,6 +401,20 @@ event connection_reused%(c: connection%); ## new_connection new_connection_contents partial_connection event connection_status_update%(c: connection%); +## Generated for a connection over IPv6 when one direction has changed +## the flow label that it's using. +## +## c: The connection. +## +## is_orig: True if the event is raised for the originator side. +## +## old_label: The old flow label that the endpoint was using. +## +## new_label: The new flow label that the endpoint is using. +## +## .. bro:see:: connection_established new_connection +event connection_flow_label_changed%(c: connection, is_orig: bool, old_label: count, new_label: count%); + ## Generated at the end of reassembled TCP connections. The TCP reassembler ## raised the event once for each endpoint of a connection when it finished ## reassembling the corresponding side of the communication. diff --git a/testing/btest/Baseline/core.ipv6-flow-labels/output b/testing/btest/Baseline/core.ipv6-flow-labels/output new file mode 100644 index 0000000000..9f7292d485 --- /dev/null +++ b/testing/btest/Baseline/core.ipv6-flow-labels/output @@ -0,0 +1,74 @@ +new_connection: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49185/tcp, resp_h=2001:470:4867:99::21, resp_p=21/tcp] + orig_flow 0 + resp_flow 0 +connection_established: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49185/tcp, resp_h=2001:470:4867:99::21, resp_p=21/tcp] + orig_flow 0 + resp_flow 0 +connection_flow_label_changed(resp): [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49185/tcp, resp_h=2001:470:4867:99::21, resp_p=21/tcp] + orig_flow 0 + resp_flow 7407 + old_label 0 + new_label 7407 +new_connection: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49186/tcp, resp_h=2001:470:4867:99::21, resp_p=57086/tcp] + orig_flow 0 + resp_flow 0 +connection_established: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49186/tcp, resp_h=2001:470:4867:99::21, resp_p=57086/tcp] + orig_flow 0 + resp_flow 0 +connection_flow_label_changed(resp): [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49186/tcp, resp_h=2001:470:4867:99::21, resp_p=57086/tcp] + orig_flow 0 + resp_flow 176012 + old_label 0 + new_label 176012 +new_connection: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49187/tcp, resp_h=2001:470:4867:99::21, resp_p=57087/tcp] + orig_flow 0 + resp_flow 0 +connection_established: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49187/tcp, resp_h=2001:470:4867:99::21, resp_p=57087/tcp] + orig_flow 0 + resp_flow 0 +connection_flow_label_changed(resp): [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49187/tcp, resp_h=2001:470:4867:99::21, resp_p=57087/tcp] + orig_flow 0 + resp_flow 390927 + old_label 0 + new_label 390927 +new_connection: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49188/tcp, resp_h=2001:470:4867:99::21, resp_p=57088/tcp] + orig_flow 0 + resp_flow 0 +connection_established: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49188/tcp, resp_h=2001:470:4867:99::21, resp_p=57088/tcp] + orig_flow 0 + resp_flow 0 +connection_flow_label_changed(resp): [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49188/tcp, resp_h=2001:470:4867:99::21, resp_p=57088/tcp] + orig_flow 0 + resp_flow 364705 + old_label 0 + new_label 364705 +connection_state_remove: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49186/tcp, resp_h=2001:470:4867:99::21, resp_p=57086/tcp] + orig_flow 0 + resp_flow 176012 +connection_state_remove: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49187/tcp, resp_h=2001:470:4867:99::21, resp_p=57087/tcp] + orig_flow 0 + resp_flow 390927 +connection_state_remove: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49188/tcp, resp_h=2001:470:4867:99::21, resp_p=57088/tcp] + orig_flow 0 + resp_flow 364705 +new_connection: [orig_h=2001:470:4867:99::21, orig_p=55785/tcp, resp_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, resp_p=49189/tcp] + orig_flow 267377 + resp_flow 0 +connection_established: [orig_h=2001:470:4867:99::21, orig_p=55785/tcp, resp_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, resp_p=49189/tcp] + orig_flow 267377 + resp_flow 126027 +new_connection: [orig_h=2001:470:4867:99::21, orig_p=55647/tcp, resp_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, resp_p=49190/tcp] + orig_flow 355265 + resp_flow 0 +connection_established: [orig_h=2001:470:4867:99::21, orig_p=55647/tcp, resp_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, resp_p=49190/tcp] + orig_flow 355265 + resp_flow 126028 +connection_state_remove: [orig_h=2001:470:4867:99::21, orig_p=55785/tcp, resp_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, resp_p=49189/tcp] + orig_flow 267377 + resp_flow 126027 +connection_state_remove: [orig_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, orig_p=49185/tcp, resp_h=2001:470:4867:99::21, resp_p=21/tcp] + orig_flow 0 + resp_flow 7407 +connection_state_remove: [orig_h=2001:470:4867:99::21, orig_p=55647/tcp, resp_h=2001:470:1f11:81f:c999:d94:aa7c:2e3e, resp_p=49190/tcp] + orig_flow 355265 + resp_flow 126028 diff --git a/testing/btest/core/ipv6-flow-labels.test b/testing/btest/core/ipv6-flow-labels.test new file mode 100644 index 0000000000..b4e60cb0a4 --- /dev/null +++ b/testing/btest/core/ipv6-flow-labels.test @@ -0,0 +1,32 @@ +# @TEST-EXEC: bro -b -r $TRACES/ipv6-ftp.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +function print_connection(c: connection, event_name: string) + { + print fmt("%s: %s", event_name, c$id); + print fmt(" orig_flow %d", c$orig$flow_label); + print fmt(" resp_flow %d", c$resp$flow_label); + } + +event new_connection(c: connection) + { + print_connection(c, "new_connection"); + } + +event connection_established(c: connection) + { + print_connection(c, "connection_established"); + } + +event connection_state_remove(c: connection) + { + print_connection(c, "connection_state_remove"); + } + +event connection_flow_label_changed(c: connection, is_orig: bool, + old_label: count, new_label: count) + { + print_connection(c, fmt("connection_flow_label_changed(%s)", is_orig ? "orig" : "resp")); + print fmt(" old_label %d", old_label); + print fmt(" new_label %d", new_label); + } From 82a6f3832ae10c5a9b5a1646422511c9fce1b041 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 22 May 2012 13:51:50 -0700 Subject: [PATCH 304/651] fix two memory leaks which occured when one used filters. --- src/input/Manager.cc | 6 +++++- src/input/readers/Ascii.cc | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index bd6cd34991..6b179f66a1 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -750,7 +750,7 @@ Val* Manager::RecordValToIndexVal(RecordVal *r) { int num_fields = type->NumFields(); if ( num_fields == 1 && type->FieldDecl(0)->type->Tag() != TYPE_RECORD ) { - idxval = r->Lookup(0); + idxval = r->LookupWithDefault(0); } else { ListVal *l = new ListVal(TYPE_ANY); for ( int j = 0 ; j < num_fields; j++ ) { @@ -902,6 +902,7 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) { if ( result == false ) { Unref(predidx); + Unref(valval); if ( !updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... // (but why should it be in there? assert this). @@ -956,6 +957,9 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) { Ref(oldval); // otherwise it is no longer accessible after the assignment filter->tab->Assign(idxval, k, valval); Unref(idxval); // asssign does not consume idxval. + if ( predidx != 0 ) { + Unref(predidx); + } filter->currDict->Insert(idxhash, ih); delete idxhash; diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index a1119ed253..a167408a0e 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -101,12 +101,16 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c file = new ifstream(path.c_str()); if ( !file->is_open() ) { Error(Fmt("Init: cannot open %s", fname.c_str())); + delete(file); + file = 0; return false; } if ( ReadHeader(false) == false ) { Error(Fmt("Init: cannot open %s; headers are incorrect", fname.c_str())); file->close(); + delete(file); + file = 0; return false; } From 074a0a9dce5dca4219e213c83264e16ef4450733 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 23 May 2012 14:29:16 -0500 Subject: [PATCH 305/651] Documentation fixes. --- scripts/base/init-bare.bro | 2 +- scripts/base/protocols/conn/main.bro | 2 +- src/event.bif | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 73f7d725d4..17748917b7 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -219,7 +219,7 @@ type connection: record { service: set[string]; addl: string; ##< Deprecated. hot: count; ##< Deprecated. - history: string; ##< State history of TCP connections. See *history* in :bro:see:`Conn::Info`. + history: string; ##< State history of connections. See *history* in :bro:see:`Conn::Info`. ## A globally unique connection identifier. For each connection, Bro creates an ID ## that is very likely unique across independent Bro runs. These IDs can thus be ## used to tag and locate information associated with that connection. diff --git a/scripts/base/protocols/conn/main.bro b/scripts/base/protocols/conn/main.bro index 34ec12fa56..c526681f2a 100644 --- a/scripts/base/protocols/conn/main.bro +++ b/scripts/base/protocols/conn/main.bro @@ -68,7 +68,7 @@ export { missed_bytes: count &log &default=0; ## Records the state history of connections as a string of letters. - ## For TCP connections the meaning of those letters is: + ## The meaning of those letters is: ## ## ====== ==================================================== ## Letter Meaning diff --git a/src/event.bif b/src/event.bif index ded054dd53..e3dcfb6aef 100644 --- a/src/event.bif +++ b/src/event.bif @@ -171,8 +171,11 @@ event new_connection_contents%(c: connection%); ## new_connection new_connection_contents partial_connection event connection_attempt%(c: connection%); -## Generated for an established TCP connection. The event is raised when the -## initial 3-way TCP handshake has successfully finished for a connection. +## Generated when a SYN-ACK packet is seen in response to SYN a packet during +## a TCP handshake. The final ACK of the handshake in response to SYN-ACK may +## or may not occur later, one way to tell is to check the *history* field of +## :bro:type:`connection` to see if the originator sent an ACK, indicated by +## 'A' in the history string. ## ## c: The connection. ## @@ -335,8 +338,6 @@ event connection_SYN_packet%(c: connection, pkt: SYN_packet%); ## ## c: The connection. ## -## pkt: Information extracted from the SYN packet. -## ## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_half_finished connection_partial_close connection_pending From 94e850397b7fe2e2ef6f8548ac5e89c206371417 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 23 May 2012 16:34:03 -0500 Subject: [PATCH 306/651] Add tests for previously-untested strings BIFs --- testing/btest/Baseline/bifs.byte_len/out | 1 + .../btest/Baseline/bifs.cat_string_array/out | 3 + testing/btest/Baseline/bifs.edit/out | 1 + testing/btest/Baseline/bifs.escape_string/out | 10 ++++ testing/btest/Baseline/bifs.find_all/out | 4 ++ testing/btest/Baseline/bifs.find_last/out | 3 + testing/btest/Baseline/bifs.hexdump/out | 1 + testing/btest/Baseline/bifs.join_string/out | 1 + testing/btest/Baseline/bifs.lowerupper/out | 2 + .../btest/Baseline/bifs.sort_string_array/out | 4 ++ testing/btest/Baseline/bifs.split/out | 32 ++++++++++ .../btest/Baseline/bifs.str_shell_escape/out | 4 ++ testing/btest/Baseline/bifs.strcmp/out | 3 + testing/btest/Baseline/bifs.string_fill/out | 3 + .../btest/Baseline/bifs.string_splitting/out | 13 ---- testing/btest/Baseline/bifs.strip/out | 6 ++ testing/btest/Baseline/bifs.strstr/out | 2 + testing/btest/Baseline/bifs.sub/out | 2 + testing/btest/Baseline/bifs.subst_string/out | 1 + testing/btest/bifs/byte_len.bro | 10 ++++ testing/btest/bifs/cat_string_array.bro | 14 +++++ testing/btest/bifs/edit.bro | 10 ++++ testing/btest/bifs/escape_string.bro | 27 +++++++++ testing/btest/bifs/find_all.bro | 18 ++++++ testing/btest/bifs/find_last.bro | 17 ++++++ testing/btest/bifs/hexdump.bro | 10 ++++ testing/btest/bifs/join_string.bro | 14 +++++ testing/btest/bifs/lowerupper.bro | 11 ++++ testing/btest/bifs/sort_string_array.bro | 17 ++++++ testing/btest/bifs/split.bro | 59 +++++++++++++++++++ testing/btest/bifs/str_shell_escape.bro | 15 +++++ testing/btest/bifs/strcmp.bro | 13 ++++ testing/btest/bifs/string_fill.bro | 16 +++++ testing/btest/bifs/string_splitting.bro | 12 ---- testing/btest/bifs/strip.bro | 17 ++++++ testing/btest/bifs/strstr.bro | 13 ++++ testing/btest/bifs/sub.bro | 12 ++++ testing/btest/bifs/subst_string.bro | 12 ++++ 38 files changed, 388 insertions(+), 25 deletions(-) create mode 100644 testing/btest/Baseline/bifs.byte_len/out create mode 100644 testing/btest/Baseline/bifs.cat_string_array/out create mode 100644 testing/btest/Baseline/bifs.edit/out create mode 100644 testing/btest/Baseline/bifs.escape_string/out create mode 100644 testing/btest/Baseline/bifs.find_all/out create mode 100644 testing/btest/Baseline/bifs.find_last/out create mode 100644 testing/btest/Baseline/bifs.hexdump/out create mode 100644 testing/btest/Baseline/bifs.join_string/out create mode 100644 testing/btest/Baseline/bifs.lowerupper/out create mode 100644 testing/btest/Baseline/bifs.sort_string_array/out create mode 100644 testing/btest/Baseline/bifs.split/out create mode 100644 testing/btest/Baseline/bifs.str_shell_escape/out create mode 100644 testing/btest/Baseline/bifs.strcmp/out create mode 100644 testing/btest/Baseline/bifs.string_fill/out delete mode 100644 testing/btest/Baseline/bifs.string_splitting/out create mode 100644 testing/btest/Baseline/bifs.strip/out create mode 100644 testing/btest/Baseline/bifs.strstr/out create mode 100644 testing/btest/Baseline/bifs.sub/out create mode 100644 testing/btest/Baseline/bifs.subst_string/out create mode 100644 testing/btest/bifs/byte_len.bro create mode 100644 testing/btest/bifs/cat_string_array.bro create mode 100644 testing/btest/bifs/edit.bro create mode 100644 testing/btest/bifs/escape_string.bro create mode 100644 testing/btest/bifs/find_all.bro create mode 100644 testing/btest/bifs/find_last.bro create mode 100644 testing/btest/bifs/hexdump.bro create mode 100644 testing/btest/bifs/join_string.bro create mode 100644 testing/btest/bifs/lowerupper.bro create mode 100644 testing/btest/bifs/sort_string_array.bro create mode 100644 testing/btest/bifs/split.bro create mode 100644 testing/btest/bifs/str_shell_escape.bro create mode 100644 testing/btest/bifs/strcmp.bro create mode 100644 testing/btest/bifs/string_fill.bro delete mode 100644 testing/btest/bifs/string_splitting.bro create mode 100644 testing/btest/bifs/strip.bro create mode 100644 testing/btest/bifs/strstr.bro create mode 100644 testing/btest/bifs/sub.bro create mode 100644 testing/btest/bifs/subst_string.bro diff --git a/testing/btest/Baseline/bifs.byte_len/out b/testing/btest/Baseline/bifs.byte_len/out new file mode 100644 index 0000000000..b4de394767 --- /dev/null +++ b/testing/btest/Baseline/bifs.byte_len/out @@ -0,0 +1 @@ +11 diff --git a/testing/btest/Baseline/bifs.cat_string_array/out b/testing/btest/Baseline/bifs.cat_string_array/out new file mode 100644 index 0000000000..963f826db9 --- /dev/null +++ b/testing/btest/Baseline/bifs.cat_string_array/out @@ -0,0 +1,3 @@ +isatest +thisisatest +isa diff --git a/testing/btest/Baseline/bifs.edit/out b/testing/btest/Baseline/bifs.edit/out new file mode 100644 index 0000000000..d8582f9b20 --- /dev/null +++ b/testing/btest/Baseline/bifs.edit/out @@ -0,0 +1 @@ +llo t diff --git a/testing/btest/Baseline/bifs.escape_string/out b/testing/btest/Baseline/bifs.escape_string/out new file mode 100644 index 0000000000..6d79533c61 --- /dev/null +++ b/testing/btest/Baseline/bifs.escape_string/out @@ -0,0 +1,10 @@ +12 +Test \0string +13 +Test \0string +15 +Test \x00string +13 +Test \0string +24 +546573742000737472696e67 diff --git a/testing/btest/Baseline/bifs.find_all/out b/testing/btest/Baseline/bifs.find_all/out new file mode 100644 index 0000000000..17913c44ed --- /dev/null +++ b/testing/btest/Baseline/bifs.find_all/out @@ -0,0 +1,4 @@ +es +hi +------------------- +0 diff --git a/testing/btest/Baseline/bifs.find_last/out b/testing/btest/Baseline/bifs.find_last/out new file mode 100644 index 0000000000..13eabac948 --- /dev/null +++ b/testing/btest/Baseline/bifs.find_last/out @@ -0,0 +1,3 @@ +es +------------------- +0 diff --git a/testing/btest/Baseline/bifs.hexdump/out b/testing/btest/Baseline/bifs.hexdump/out new file mode 100644 index 0000000000..740435f7ea --- /dev/null +++ b/testing/btest/Baseline/bifs.hexdump/out @@ -0,0 +1 @@ +0000 61 62 63 ff 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f abc.defg hijklmno^J0010 70 71 72 73 74 75 76 77 78 79 7a pqrstuvw xyz^J diff --git a/testing/btest/Baseline/bifs.join_string/out b/testing/btest/Baseline/bifs.join_string/out new file mode 100644 index 0000000000..830c2dace5 --- /dev/null +++ b/testing/btest/Baseline/bifs.join_string/out @@ -0,0 +1 @@ +this * is * a * test diff --git a/testing/btest/Baseline/bifs.lowerupper/out b/testing/btest/Baseline/bifs.lowerupper/out new file mode 100644 index 0000000000..96b69a43c8 --- /dev/null +++ b/testing/btest/Baseline/bifs.lowerupper/out @@ -0,0 +1,2 @@ +this is a test +THIS IS A TEST diff --git a/testing/btest/Baseline/bifs.sort_string_array/out b/testing/btest/Baseline/bifs.sort_string_array/out new file mode 100644 index 0000000000..533844768d --- /dev/null +++ b/testing/btest/Baseline/bifs.sort_string_array/out @@ -0,0 +1,4 @@ +a +is +test +this diff --git a/testing/btest/Baseline/bifs.split/out b/testing/btest/Baseline/bifs.split/out new file mode 100644 index 0000000000..0ec2541f3d --- /dev/null +++ b/testing/btest/Baseline/bifs.split/out @@ -0,0 +1,32 @@ +t +s is a t +t +--------------------- +t +s is a test +--------------------- +t +hi +s is a t +es +t +--------------------- +t +s is a test +--------------------- +t +hi +s is a test +--------------------- +[, thi, s i, s a tes, t] +--------------------- +X-Mailer +Testing Test (http://www.example.com) +--------------------- +A += + B += + C += + D diff --git a/testing/btest/Baseline/bifs.str_shell_escape/out b/testing/btest/Baseline/bifs.str_shell_escape/out new file mode 100644 index 0000000000..1845fefa37 --- /dev/null +++ b/testing/btest/Baseline/bifs.str_shell_escape/out @@ -0,0 +1,4 @@ +24 +echo ${TEST} > "my file" +27 +echo \${TEST} > \"my file\" diff --git a/testing/btest/Baseline/bifs.strcmp/out b/testing/btest/Baseline/bifs.strcmp/out new file mode 100644 index 0000000000..d67491ed75 --- /dev/null +++ b/testing/btest/Baseline/bifs.strcmp/out @@ -0,0 +1,3 @@ +T +T +T diff --git a/testing/btest/Baseline/bifs.string_fill/out b/testing/btest/Baseline/bifs.string_fill/out new file mode 100644 index 0000000000..b15a2d1006 --- /dev/null +++ b/testing/btest/Baseline/bifs.string_fill/out @@ -0,0 +1,3 @@ +*\0* 1 +*t\0* 2 +*test test\0* 10 diff --git a/testing/btest/Baseline/bifs.string_splitting/out b/testing/btest/Baseline/bifs.string_splitting/out deleted file mode 100644 index 8514916834..0000000000 --- a/testing/btest/Baseline/bifs.string_splitting/out +++ /dev/null @@ -1,13 +0,0 @@ -{ -[2] = Testing Test (http://www.example.com), -[1] = X-Mailer -} -{ -[2] = =, -[4] = =, -[6] = =, -[7] = D, -[1] = A , -[5] = C , -[3] = B -} diff --git a/testing/btest/Baseline/bifs.strip/out b/testing/btest/Baseline/bifs.strip/out new file mode 100644 index 0000000000..dc1ca4204c --- /dev/null +++ b/testing/btest/Baseline/bifs.strip/out @@ -0,0 +1,6 @@ +* this is a test * +*this is a test* +** +** +* * +** diff --git a/testing/btest/Baseline/bifs.strstr/out b/testing/btest/Baseline/bifs.strstr/out new file mode 100644 index 0000000000..389e262145 --- /dev/null +++ b/testing/btest/Baseline/bifs.strstr/out @@ -0,0 +1,2 @@ +2 +0 diff --git a/testing/btest/Baseline/bifs.sub/out b/testing/btest/Baseline/bifs.sub/out new file mode 100644 index 0000000000..d8860ac5f8 --- /dev/null +++ b/testing/btest/Baseline/bifs.sub/out @@ -0,0 +1,2 @@ +that is a test +that at a test diff --git a/testing/btest/Baseline/bifs.subst_string/out b/testing/btest/Baseline/bifs.subst_string/out new file mode 100644 index 0000000000..be3c92a20b --- /dev/null +++ b/testing/btest/Baseline/bifs.subst_string/out @@ -0,0 +1 @@ +that at another test diff --git a/testing/btest/bifs/byte_len.bro b/testing/btest/bifs/byte_len.bro new file mode 100644 index 0000000000..25191fd173 --- /dev/null +++ b/testing/btest/bifs/byte_len.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "hello\0there"; + + print byte_len(a); + } diff --git a/testing/btest/bifs/cat_string_array.bro b/testing/btest/bifs/cat_string_array.bro new file mode 100644 index 0000000000..d2c2242411 --- /dev/null +++ b/testing/btest/bifs/cat_string_array.bro @@ -0,0 +1,14 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a: string_array = { + [0] = "this", [1] = "is", [2] = "a", [3] = "test" + }; + + print cat_string_array(a); + print cat_string_array_n(a, 0, |a|-1); + print cat_string_array_n(a, 1, 2); + } diff --git a/testing/btest/bifs/edit.bro b/testing/btest/bifs/edit.bro new file mode 100644 index 0000000000..c9a73d17f1 --- /dev/null +++ b/testing/btest/bifs/edit.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "hello there"; + + print edit(a, "e"); + } diff --git a/testing/btest/bifs/escape_string.bro b/testing/btest/bifs/escape_string.bro new file mode 100644 index 0000000000..92b7b535d8 --- /dev/null +++ b/testing/btest/bifs/escape_string.bro @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "Test \0string"; + + print |a|; + print a; + + local b = clean(a); + print |b|; + print b; + + local c = to_string_literal(a); + print |c|; + print c; + + local d = escape_string(a); + print |d|; + print d; + + local e = string_to_ascii_hex(a); + print |e|; + print e; + } diff --git a/testing/btest/bifs/find_all.bro b/testing/btest/bifs/find_all.bro new file mode 100644 index 0000000000..edf3530c8a --- /dev/null +++ b/testing/btest/bifs/find_all.bro @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is a test"; + local pat = /hi|es/; + local pat2 = /aa|bb/; + + local b = find_all(a, pat); + local b2 = find_all(a, pat2); + + for (i in b) + print i; + print "-------------------"; + print |b2|; + } diff --git a/testing/btest/bifs/find_last.bro b/testing/btest/bifs/find_last.bro new file mode 100644 index 0000000000..b1a567f73a --- /dev/null +++ b/testing/btest/bifs/find_last.bro @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is a test"; + local pat = /hi|es/; + local pat2 = /aa|bb/; + + local b = find_last(a, pat); + local b2 = find_last(a, pat2); + + print b; + print "-------------------"; + print |b2|; + } diff --git a/testing/btest/bifs/hexdump.bro b/testing/btest/bifs/hexdump.bro new file mode 100644 index 0000000000..4c248efb77 --- /dev/null +++ b/testing/btest/bifs/hexdump.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "abc\xffdefghijklmnopqrstuvwxyz"; + + print hexdump(a); + } diff --git a/testing/btest/bifs/join_string.bro b/testing/btest/bifs/join_string.bro new file mode 100644 index 0000000000..df5f83449b --- /dev/null +++ b/testing/btest/bifs/join_string.bro @@ -0,0 +1,14 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a: string_array = { + [1] = "this", [2] = "is", [3] = "a", [4] = "test" + }; + local b: string_vec = vector( "this", "is", "another", "test" ); + + print join_string_array(" * ", a); + print join_string_vec(b, "__"); + } diff --git a/testing/btest/bifs/lowerupper.bro b/testing/btest/bifs/lowerupper.bro new file mode 100644 index 0000000000..fcfdcde319 --- /dev/null +++ b/testing/btest/bifs/lowerupper.bro @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is a Test"; + + print to_lower(a); + print to_upper(a); + } diff --git a/testing/btest/bifs/sort_string_array.bro b/testing/btest/bifs/sort_string_array.bro new file mode 100644 index 0000000000..23c4f55848 --- /dev/null +++ b/testing/btest/bifs/sort_string_array.bro @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a: string_array = { + [1] = "this", [2] = "is", [3] = "a", [4] = "test" + }; + + local b = sort_string_array(a); + + print b[1]; + print b[2]; + print b[3]; + print b[4]; + } diff --git a/testing/btest/bifs/split.bro b/testing/btest/bifs/split.bro new file mode 100644 index 0000000000..fc1b5e96a0 --- /dev/null +++ b/testing/btest/bifs/split.bro @@ -0,0 +1,59 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is a test"; + local pat = /hi|es/; + local idx = vector( 3, 6, 13); + + local b = split(a, pat); + local c = split1(a, pat); + local d = split_all(a, pat); + local e1 = split_n(a, pat, F, 1); + local e2 = split_n(a, pat, T, 1); + + print b[1]; + print b[2]; + print b[3]; + print b[4]; + print "---------------------"; + print c[1]; + print c[2]; + print "---------------------"; + print d[1]; + print d[2]; + print d[3]; + print d[4]; + print d[5]; + print "---------------------"; + print e1[1]; + print e1[2]; + print "---------------------"; + print e2[1]; + print e2[2]; + print e2[3]; + print "---------------------"; + print str_split(a, idx); + print "---------------------"; + + a = "X-Mailer: Testing Test (http://www.example.com)"; + pat = /:[[:blank:]]*/; + local f = split1(a, pat); + + print f[1]; + print f[2]; + print "---------------------"; + + a = "A = B = C = D"; + pat = /=/; + local g = split_all(a, pat); + print g[1]; + print g[2]; + print g[3]; + print g[4]; + print g[5]; + print g[6]; + print g[7]; + } diff --git a/testing/btest/bifs/str_shell_escape.bro b/testing/btest/bifs/str_shell_escape.bro new file mode 100644 index 0000000000..a71cb4dcf6 --- /dev/null +++ b/testing/btest/bifs/str_shell_escape.bro @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "echo ${TEST} > \"my file\""; + + print |a|; + print a; + + local b = str_shell_escape(a); + print |b|; + print b; + } diff --git a/testing/btest/bifs/strcmp.bro b/testing/btest/bifs/strcmp.bro new file mode 100644 index 0000000000..af46c7fa96 --- /dev/null +++ b/testing/btest/bifs/strcmp.bro @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this"; + local b = "testing"; + + print strcmp(a, b) > 0; + print strcmp(b, a) < 0; + print strcmp(a, a) == 0; + } diff --git a/testing/btest/bifs/string_fill.bro b/testing/btest/bifs/string_fill.bro new file mode 100644 index 0000000000..c47f1916cc --- /dev/null +++ b/testing/btest/bifs/string_fill.bro @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "test "; + + local b = string_fill(1, a); + local c = string_fill(2, a); + local d = string_fill(10, a); + + print fmt("*%s* %d", b, |b|); + print fmt("*%s* %d", c, |c|); + print fmt("*%s* %d", d, |d|); + } diff --git a/testing/btest/bifs/string_splitting.bro b/testing/btest/bifs/string_splitting.bro deleted file mode 100644 index 44068fe510..0000000000 --- a/testing/btest/bifs/string_splitting.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC: bro %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "X-Mailer: Testing Test (http://www.example.com)"; - print split1(a, /:[[:blank:]]*/); - - a = "A = B = C = D"; - print split_all(a, /=/); - } diff --git a/testing/btest/bifs/strip.bro b/testing/btest/bifs/strip.bro new file mode 100644 index 0000000000..de6601b83c --- /dev/null +++ b/testing/btest/bifs/strip.bro @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = " this is a test "; + local b = ""; + local c = " "; + + print fmt("*%s*", a); + print fmt("*%s*", strip(a)); + print fmt("*%s*", b); + print fmt("*%s*", strip(b)); + print fmt("*%s*", c); + print fmt("*%s*", strip(c)); + } diff --git a/testing/btest/bifs/strstr.bro b/testing/btest/bifs/strstr.bro new file mode 100644 index 0000000000..58f79d593b --- /dev/null +++ b/testing/btest/bifs/strstr.bro @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is a test"; + local b = "his"; + local c = "are"; + + print strstr(a, b); + print strstr(a, c); + } diff --git a/testing/btest/bifs/sub.bro b/testing/btest/bifs/sub.bro new file mode 100644 index 0000000000..f6a956f26a --- /dev/null +++ b/testing/btest/bifs/sub.bro @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is a test"; + local pat = /is|ss/; + + print sub(a, pat, "at"); + print gsub(a, pat, "at"); + } diff --git a/testing/btest/bifs/subst_string.bro b/testing/btest/bifs/subst_string.bro new file mode 100644 index 0000000000..81a3f89424 --- /dev/null +++ b/testing/btest/bifs/subst_string.bro @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is another test"; + local b = "is"; + local c = "at"; + + print subst_string(a, b, c); + } From 03aee9197d68e9dd10440937fa209b35a20dafa2 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 24 May 2012 16:33:19 -0500 Subject: [PATCH 307/651] Add more tests for previously-untested BIFs --- testing/btest/Baseline/bifs.all_set/out | 3 +++ testing/btest/Baseline/bifs.any_set/out | 3 +++ testing/btest/Baseline/bifs.clear_table/out | 2 ++ testing/btest/Baseline/bifs.exit/out | 1 + testing/btest/Baseline/bifs.getsetenv/out | 3 +++ testing/btest/Baseline/bifs.length/out | 6 +++++ testing/btest/Baseline/bifs.md5/output | 2 ++ testing/btest/Baseline/bifs.rand/out | 6 +++++ testing/btest/Baseline/bifs.resize/out | 4 ++++ testing/btest/Baseline/bifs.sort/out | 2 ++ testing/btest/Baseline/bifs.system/out | 1 + testing/btest/Baseline/bifs.system_env/out | 1 + testing/btest/Baseline/bifs.val_size/out | 2 ++ testing/btest/bifs/all_set.bro | 15 ++++++++++++ testing/btest/bifs/any_set.bro | 15 ++++++++++++ testing/btest/bifs/clear_table.bro | 14 +++++++++++ testing/btest/bifs/exit.bro | 9 +++++++ testing/btest/bifs/getsetenv.bro | 20 ++++++++++++++++ testing/btest/bifs/length.bro | 22 +++++++++++++++++ testing/btest/bifs/md5.test | 3 +++ testing/btest/bifs/order.bro | 18 ++++++++++++++ testing/btest/bifs/piped_exec.bro | 6 +++-- testing/btest/bifs/rand.bro | 24 +++++++++++++++++++ testing/btest/bifs/resize.bro | 26 +++++++++++++++++++++ testing/btest/bifs/sort.bro | 18 ++++++++++++++ testing/btest/bifs/system.bro | 15 ++++++++++++ testing/btest/bifs/system_env.bro | 21 +++++++++++++++++ testing/btest/bifs/val_size.bro | 12 ++++++++++ 28 files changed, 272 insertions(+), 2 deletions(-) create mode 100644 testing/btest/Baseline/bifs.all_set/out create mode 100644 testing/btest/Baseline/bifs.any_set/out create mode 100644 testing/btest/Baseline/bifs.clear_table/out create mode 100644 testing/btest/Baseline/bifs.exit/out create mode 100644 testing/btest/Baseline/bifs.getsetenv/out create mode 100644 testing/btest/Baseline/bifs.length/out create mode 100644 testing/btest/Baseline/bifs.rand/out create mode 100644 testing/btest/Baseline/bifs.resize/out create mode 100644 testing/btest/Baseline/bifs.sort/out create mode 100644 testing/btest/Baseline/bifs.system/out create mode 100644 testing/btest/Baseline/bifs.system_env/out create mode 100644 testing/btest/Baseline/bifs.val_size/out create mode 100644 testing/btest/bifs/all_set.bro create mode 100644 testing/btest/bifs/any_set.bro create mode 100644 testing/btest/bifs/clear_table.bro create mode 100644 testing/btest/bifs/exit.bro create mode 100644 testing/btest/bifs/getsetenv.bro create mode 100644 testing/btest/bifs/length.bro create mode 100644 testing/btest/bifs/order.bro create mode 100644 testing/btest/bifs/rand.bro create mode 100644 testing/btest/bifs/resize.bro create mode 100644 testing/btest/bifs/sort.bro create mode 100644 testing/btest/bifs/system.bro create mode 100644 testing/btest/bifs/system_env.bro create mode 100644 testing/btest/bifs/val_size.bro diff --git a/testing/btest/Baseline/bifs.all_set/out b/testing/btest/Baseline/bifs.all_set/out new file mode 100644 index 0000000000..ed4964b655 --- /dev/null +++ b/testing/btest/Baseline/bifs.all_set/out @@ -0,0 +1,3 @@ +F +F +T diff --git a/testing/btest/Baseline/bifs.any_set/out b/testing/btest/Baseline/bifs.any_set/out new file mode 100644 index 0000000000..3ea3c39b0d --- /dev/null +++ b/testing/btest/Baseline/bifs.any_set/out @@ -0,0 +1,3 @@ +T +F +F diff --git a/testing/btest/Baseline/bifs.clear_table/out b/testing/btest/Baseline/bifs.clear_table/out new file mode 100644 index 0000000000..b261da18d5 --- /dev/null +++ b/testing/btest/Baseline/bifs.clear_table/out @@ -0,0 +1,2 @@ +1 +0 diff --git a/testing/btest/Baseline/bifs.exit/out b/testing/btest/Baseline/bifs.exit/out new file mode 100644 index 0000000000..ce01362503 --- /dev/null +++ b/testing/btest/Baseline/bifs.exit/out @@ -0,0 +1 @@ +hello diff --git a/testing/btest/Baseline/bifs.getsetenv/out b/testing/btest/Baseline/bifs.getsetenv/out new file mode 100644 index 0000000000..0eabe36713 --- /dev/null +++ b/testing/btest/Baseline/bifs.getsetenv/out @@ -0,0 +1,3 @@ +OK +OK +OK diff --git a/testing/btest/Baseline/bifs.length/out b/testing/btest/Baseline/bifs.length/out new file mode 100644 index 0000000000..ad43182650 --- /dev/null +++ b/testing/btest/Baseline/bifs.length/out @@ -0,0 +1,6 @@ +1 +4 +2 +0 +0 +0 diff --git a/testing/btest/Baseline/bifs.md5/output b/testing/btest/Baseline/bifs.md5/output index 71c0fbfcb8..a560286854 100644 --- a/testing/btest/Baseline/bifs.md5/output +++ b/testing/btest/Baseline/bifs.md5/output @@ -2,3 +2,5 @@ f97c5d29941bfb1b2fdab0874906ab82 7b0391feb2e0cd271f1cf39aafb4376f f97c5d29941bfb1b2fdab0874906ab82 7b0391feb2e0cd271f1cf39aafb4376f +571c0a35c7858ad5a0e16b8fdb41adcd +1751cbd623726f423f734e23a8c7ec06 diff --git a/testing/btest/Baseline/bifs.rand/out b/testing/btest/Baseline/bifs.rand/out new file mode 100644 index 0000000000..367833f80a --- /dev/null +++ b/testing/btest/Baseline/bifs.rand/out @@ -0,0 +1,6 @@ +185 +236 +805 +47 +996 +498 diff --git a/testing/btest/Baseline/bifs.resize/out b/testing/btest/Baseline/bifs.resize/out new file mode 100644 index 0000000000..fcefeaf4df --- /dev/null +++ b/testing/btest/Baseline/bifs.resize/out @@ -0,0 +1,4 @@ +3 +5 +0 +7 diff --git a/testing/btest/Baseline/bifs.sort/out b/testing/btest/Baseline/bifs.sort/out new file mode 100644 index 0000000000..21d24208fb --- /dev/null +++ b/testing/btest/Baseline/bifs.sort/out @@ -0,0 +1,2 @@ +[5, 3, 8] +[3, 5, 8] diff --git a/testing/btest/Baseline/bifs.system/out b/testing/btest/Baseline/bifs.system/out new file mode 100644 index 0000000000..ae782e3280 --- /dev/null +++ b/testing/btest/Baseline/bifs.system/out @@ -0,0 +1 @@ +thistest diff --git a/testing/btest/Baseline/bifs.system_env/out b/testing/btest/Baseline/bifs.system_env/out new file mode 100644 index 0000000000..31e0fce560 --- /dev/null +++ b/testing/btest/Baseline/bifs.system_env/out @@ -0,0 +1 @@ +helloworld diff --git a/testing/btest/Baseline/bifs.val_size/out b/testing/btest/Baseline/bifs.val_size/out new file mode 100644 index 0000000000..16b548f269 --- /dev/null +++ b/testing/btest/Baseline/bifs.val_size/out @@ -0,0 +1,2 @@ +72 +72 diff --git a/testing/btest/bifs/all_set.bro b/testing/btest/bifs/all_set.bro new file mode 100644 index 0000000000..31544eb31e --- /dev/null +++ b/testing/btest/bifs/all_set.bro @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = vector( T, F, T ); + print all_set(a); + + local b = vector(); + print all_set(b); + + local c = vector( T ); + print all_set(c); + } diff --git a/testing/btest/bifs/any_set.bro b/testing/btest/bifs/any_set.bro new file mode 100644 index 0000000000..5fe046cdf4 --- /dev/null +++ b/testing/btest/bifs/any_set.bro @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = vector( F, T, F ); + print any_set(a); + + local b = vector(); + print any_set(b); + + local c = vector( F ); + print any_set(c); + } diff --git a/testing/btest/bifs/clear_table.bro b/testing/btest/bifs/clear_table.bro new file mode 100644 index 0000000000..94779285af --- /dev/null +++ b/testing/btest/bifs/clear_table.bro @@ -0,0 +1,14 @@ +# +# @TEST-EXEC: bro %INPUT > out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local mytable: table[string] of string = { ["key1"] = "val1" }; + + print |mytable|; + + clear_table(mytable); + + print |mytable|; + } diff --git a/testing/btest/bifs/exit.bro b/testing/btest/bifs/exit.bro new file mode 100644 index 0000000000..e551144caa --- /dev/null +++ b/testing/btest/bifs/exit.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT >out || test $? -eq 7 +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print "hello"; + exit(7); + } diff --git a/testing/btest/bifs/getsetenv.bro b/testing/btest/bifs/getsetenv.bro new file mode 100644 index 0000000000..b4ee9a0931 --- /dev/null +++ b/testing/btest/bifs/getsetenv.bro @@ -0,0 +1,20 @@ +# +# @TEST-EXEC: TESTBRO=testvalue bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = getenv("NOTDEFINED"); + local b = getenv("TESTBRO"); + if ( |a| == 0 ) + print "OK"; + if ( b == "testvalue" ) + print "OK"; + + if ( setenv("NOTDEFINED", "now defined" ) == T ) + { + if ( getenv("NOTDEFINED") == "now defined" ) + print "OK"; + } + + } diff --git a/testing/btest/bifs/length.bro b/testing/btest/bifs/length.bro new file mode 100644 index 0000000000..335223c124 --- /dev/null +++ b/testing/btest/bifs/length.bro @@ -0,0 +1,22 @@ +# +# @TEST-EXEC: bro %INPUT > out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local mytable: table[string] of string = { ["key1"] = "val1" }; + local myset: set[count] = set( 3, 6, 2, 7 ); + local myvec: vector of string = vector( "value1", "value2" ); + + print length(mytable); + print length(myset); + print length(myvec); + + mytable = table(); + myset = set(); + myvec = vector(); + + print length(mytable); + print length(myset); + print length(myvec); + } diff --git a/testing/btest/bifs/md5.test b/testing/btest/bifs/md5.test index 2632d76cb4..5a9715edf1 100644 --- a/testing/btest/bifs/md5.test +++ b/testing/btest/bifs/md5.test @@ -14,3 +14,6 @@ md5_hash_update("b", "three"); print md5_hash_finish("a"); print md5_hash_finish("b"); + +print md5_hmac("one"); +print md5_hmac("one", "two", "three"); diff --git a/testing/btest/bifs/order.bro b/testing/btest/bifs/order.bro new file mode 100644 index 0000000000..176e733cfe --- /dev/null +++ b/testing/btest/bifs/order.bro @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function myfunc(a: count, b: count): bool + { + return a < b; + } + +event bro_init() + { + local a = vector( 5, 3, 8 ); + + print order(a, myfunc); + + print a; + + } diff --git a/testing/btest/bifs/piped_exec.bro b/testing/btest/bifs/piped_exec.bro index 32fd5c5f80..3a76eba8f5 100644 --- a/testing/btest/bifs/piped_exec.bro +++ b/testing/btest/bifs/piped_exec.bro @@ -5,8 +5,10 @@ global cmds = "print \"hello world\";"; cmds = string_cat(cmds, "\nprint \"foobar\";"); -piped_exec("bro", cmds); +if ( piped_exec("bro", cmds) != T ) + exit(1); # Test null output. -piped_exec("cat > test.txt", "\x00\x00hello\x00\x00"); +if ( piped_exec("cat > test.txt", "\x00\x00hello\x00\x00") != T ) + exit(1); diff --git a/testing/btest/bifs/rand.bro b/testing/btest/bifs/rand.bro new file mode 100644 index 0000000000..229645944e --- /dev/null +++ b/testing/btest/bifs/rand.bro @@ -0,0 +1,24 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = rand(1000); + local b = rand(1000); + local c = rand(1000); + + print a; + print b; + print c; + + srand(575); + + local d = rand(1000); + local e = rand(1000); + local f = rand(1000); + + print d; + print e; + print f; + } diff --git a/testing/btest/bifs/resize.bro b/testing/btest/bifs/resize.bro new file mode 100644 index 0000000000..37e4ac38d9 --- /dev/null +++ b/testing/btest/bifs/resize.bro @@ -0,0 +1,26 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = vector( 5, 3, 8 ); + + print |a|; + + if ( resize(a, 5) != 3 ) + exit(1); + + print |a|; + + if ( resize(a, 0) != 5 ) + exit(1); + + print |a|; + + if ( resize(a, 7) != 0 ) + exit(1); + + print |a|; + + } diff --git a/testing/btest/bifs/sort.bro b/testing/btest/bifs/sort.bro new file mode 100644 index 0000000000..2d6d82129f --- /dev/null +++ b/testing/btest/bifs/sort.bro @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function myfunc(a: count, b: count): bool + { + return a < b; + } + +event bro_init() + { + local a = vector( 5, 3, 8 ); + + print sort(a, myfunc); + + print a; + + } diff --git a/testing/btest/bifs/system.bro b/testing/btest/bifs/system.bro new file mode 100644 index 0000000000..b73aed4d79 --- /dev/null +++ b/testing/btest/bifs/system.bro @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = system("echo thistest > out"); + if ( a != 0 ) + exit(1); + + local b = system(""); + if ( b == 0 ) + exit(1); + + } diff --git a/testing/btest/bifs/system_env.bro b/testing/btest/bifs/system_env.bro new file mode 100644 index 0000000000..d8e54a8709 --- /dev/null +++ b/testing/btest/bifs/system_env.bro @@ -0,0 +1,21 @@ +# +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local vars: table[string] of string = { ["TESTBRO"] = "helloworld" }; + + # make sure the env. variable is not set + local myvar = getenv("TESTBRO"); + if ( |myvar| != 0 ) + exit(1); + + local a = system_env("echo $TESTBRO > out", vars); + if ( a != 0 ) + exit(1); + + myvar = getenv("TESTBRO"); + if ( |myvar| != 0 ) + exit(1); + } diff --git a/testing/btest/bifs/val_size.bro b/testing/btest/bifs/val_size.bro new file mode 100644 index 0000000000..b779460b9b --- /dev/null +++ b/testing/btest/bifs/val_size.bro @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: bro %INPUT > out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 1; + local b = T; + + print val_size(a); + print val_size(b); + } From aaa16133a7ddb8c3f960dd4e08d27d27eae79e08 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 24 May 2012 16:48:15 -0700 Subject: [PATCH 308/651] Make tests even quieter. --- testing/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/Makefile b/testing/Makefile index 1c82580ec4..c4a2aeddda 100644 --- a/testing/Makefile +++ b/testing/Makefile @@ -6,10 +6,10 @@ all: make-verbose coverage brief: make-brief coverage make-verbose: - @for repo in $(DIRS); do (cd $$repo && make ); done + @for repo in $(DIRS); do (cd $$repo && make -s ); done make-brief: - @for repo in $(DIRS); do (cd $$repo && make brief ); done + @for repo in $(DIRS); do (cd $$repo && make -s brief ); done coverage: @for repo in $(DIRS); do (cd $$repo && echo "Coverage for '$$repo' dir:" && make coverage); done From 2933961042fc6a586de23e4f1d13578e74e359e9 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 24 May 2012 16:48:28 -0700 Subject: [PATCH 309/651] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index e0da8d0e28..3ee8d4b323 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit e0da8d0e284bbebbaef711c91c1b961580f225d2 +Subproject commit 3ee8d4b3232d74ed7bd475819193ad3a4055e2f5 From d2c756cac43f0fbaa2ac05844ab68c245c47e6a7 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 24 May 2012 17:33:02 -0700 Subject: [PATCH 310/651] Make tests even quieter. --- testing/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/Makefile b/testing/Makefile index c4a2aeddda..d56ee4e0e1 100644 --- a/testing/Makefile +++ b/testing/Makefile @@ -12,7 +12,7 @@ make-brief: @for repo in $(DIRS); do (cd $$repo && make -s brief ); done coverage: - @for repo in $(DIRS); do (cd $$repo && echo "Coverage for '$$repo' dir:" && make coverage); done + @for repo in $(DIRS); do (cd $$repo && echo "Coverage for '$$repo' dir:" && make -s coverage); done @test -f btest/coverage.log && cp btest/coverage.log `mktemp brocov.tmp.XXX` || true @for f in external/*/coverage.log; do test -f $$f && cp $$f `mktemp brocov.tmp.XXX` || true; done @echo "Complete test suite code coverage:" From 3d2009cacfda99acefd16ed64c5e5f8da4f43d10 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 24 May 2012 17:43:35 -0700 Subject: [PATCH 311/651] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- aux/broctl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/broccoli b/aux/broccoli index 95c93494d7..07866915a1 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 95c93494d7192f69d30f208c4caa3bd38adda6fd +Subproject commit 07866915a1450ddd25b888917f494b4824b0cc3f diff --git a/aux/broctl b/aux/broctl index ba9e1aa2f2..892b60edb9 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit ba9e1aa2f2159deac0cf96863f54405643764df0 +Subproject commit 892b60edb967bb456872638f22ba994e84530137 From b5417a32be4cfe4babdf26f942b3b2b84e785781 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 25 May 2012 08:31:06 -0700 Subject: [PATCH 312/651] Some tweaks to the DS doc. Also including a section with deficiencies. --- doc/logging-dataseries.rst | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index b41b9fb0b7..554600f055 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -6,8 +6,8 @@ Binary Output with DataSeries .. rst-class:: opening Bro's default ASCII log format is not exactly the most efficient - way for storing large volumes of data. An an alternative, Bro comes - with experimental support for `DataSeries + way for storing and searching large volumes of data. An an + alternative, Bro comes with experimental support for `DataSeries `_ output, an efficient binary format for recording structured bulk data. DataSeries is developed and maintained at HP Labs. @@ -35,9 +35,12 @@ To build and install the two into ````, do:: Please refer to the packages' documentation for more information about the installation process. In particular, there's more information on required and optional `dependencies for Lintel -`_ +`_ and `dependencies for DataSeries -`_ +`_. +For users on RedHat-style systems, you'll need the following:: + + yum install libxml2-devel boost-devel Compiling Bro with DataSeries Support ------------------------------------- @@ -166,3 +169,18 @@ with the output files. The ``man`` pages for these tool show further options, and their ``-h`` option gives some more information (either can be a bit cryptic unfortunately though). + +Deficiencies +------------ + +Due to limitations of the DataSeries format, one cannot inspect its +files before they have been fully written. In other words, when using +DataSeries, it's currently it's not possible to inspect the live log +files inside the spool directory before they are rotated to their +final location. It seems that this could be fixed with some effort, +and we will work with DataSeries development team on that if the +format gains traction among Bro users. + +Likewise, we're considering writing custom command line tools for +interacting with DataSeries files, making that a bit more convenient +than what the standard utilities provide. From da34266a526eb08f757fe4a6a7bf61fbdb82a3d6 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 25 May 2012 08:36:36 -0700 Subject: [PATCH 313/651] Switching default DS compression to gzip. --- scripts/base/frameworks/logging/writers/dataseries.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro index ccee500c3a..e85d9c8c49 100644 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -10,7 +10,7 @@ export { ## 'lzo' -- LZO compression. Very fast decompression times. ## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output. ## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output. - const compression = "lzo" &redef; + const compression = "gz" &redef; ## The extent buffer size. ## Larger values here lead to better compression and more efficient writes, but From 2034c10e9766d833e7bd32d011c5557ab1f90a7f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 May 2012 10:33:22 -0700 Subject: [PATCH 314/651] make input framework source (hopefully) adhere to the usual indentation style. No functional changes. --- src/input/Manager.cc | 1224 ++++++++++++++++++-------------- src/input/ReaderBackend.cc | 148 ++-- src/input/ReaderFrontend.cc | 40 +- src/input/readers/Ascii.cc | 314 ++++---- src/input/readers/Benchmark.cc | 118 ++- src/input/readers/Raw.cc | 155 ++-- 6 files changed, 1102 insertions(+), 897 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 6b179f66a1..0fde16b87d 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -24,25 +24,30 @@ using threading::Value; using threading::Field; /** - * InputHashes are used as Dictionaries to store the value and index hashes for all lines currently stored in a table. - * Index hash is stored as HashKey*, because it is thrown into other Bro functions that need the complex structure of it. - * For everything we do (with values), we just take the hash_t value and compare it directly with == + * InputHashes are used as Dictionaries to store the value and index hashes for all + * lines currently stored in a table. Index hash is stored as HashKey*, because it is + * thrown into other Bro functions that need the complex structure of it. + * For everything we do (with values), we just take the hash_t value and compare it + * directly with == */ -struct InputHash { +struct InputHash + { hash_t valhash; HashKey* idxkey; ~InputHash(); -}; + }; -InputHash::~InputHash() { +InputHash::~InputHash() + { if ( idxkey ) delete idxkey; -} + } -static void input_hash_delete_func(void* val) { +static void input_hash_delete_func(void* val) + { InputHash* h = (InputHash*) val; delete h; -} + } declare(PDict, InputHash); @@ -68,14 +73,16 @@ public: virtual ~Stream(); }; -Manager::Stream::Stream() { +Manager::Stream::Stream() + { type = 0; reader = 0; description = 0; removed = false; -} + } -Manager::Stream::~Stream() { +Manager::Stream::~Stream() + { if ( type ) Unref(type); if ( description ) @@ -83,7 +90,7 @@ Manager::Stream::~Stream() { if ( reader ) delete(reader); -} + } class Manager::TableStream: public Manager::Stream { public: @@ -120,7 +127,8 @@ public: ~EventStream(); }; -Manager::TableStream::TableStream() : Manager::Stream::Stream() { +Manager::TableStream::TableStream() : Manager::Stream::Stream() + { filter_type = TABLE_FILTER; tab = 0; @@ -131,20 +139,22 @@ Manager::TableStream::TableStream() : Manager::Stream::Stream() { lastDict = 0; pred = 0; -} + } -Manager::EventStream::EventStream() : Manager::Stream::Stream() { +Manager::EventStream::EventStream() : Manager::Stream::Stream() + { fields = 0; filter_type = EVENT_FILTER; -} + } -Manager::EventStream::~EventStream() { - if ( fields ) { +Manager::EventStream::~EventStream() + { + if ( fields ) Unref(fields); - } -} + } -Manager::TableStream::~TableStream() { +Manager::TableStream::~TableStream() + { if ( tab ) Unref(tab); if ( itype ) @@ -152,22 +162,24 @@ Manager::TableStream::~TableStream() { if ( rtype ) // can be 0 for sets Unref(rtype); - if ( currDict != 0 ) { + if ( currDict != 0 ) + { currDict->Clear(); delete currDict; - } + } - if ( lastDict != 0 ) { + if ( lastDict != 0 ) + { lastDict->Clear();; delete lastDict; - } -} + } + } struct ReaderDefinition { bro_int_t type; // the type const char *name; // descriptive name for error messages bool (*init)(); // optional one-time inifializing function - ReaderBackend* (*factory)(ReaderFrontend* frontend); // factory function for creating instances + ReaderBackend* (*factory)(ReaderFrontend* frontend); // factory function for creating instances }; ReaderDefinition input_readers[] = { @@ -180,49 +192,55 @@ ReaderDefinition input_readers[] = { }; Manager::Manager() -{ -} - -Manager::~Manager() { - for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { - delete s->second; - delete s->first; + { } -} - -ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) { - ReaderDefinition* ir = input_readers; - - while ( true ) { - if ( ir->type == BifEnum::Input::READER_DEFAULT ) +Manager::~Manager() + { + for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { - reporter->Error("The reader that was requested was not found and could not be initialized."); - return 0; + delete s->second; + delete s->first; } - if ( ir->type != type ) { + } + +ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) + { + ReaderDefinition* ir = input_readers; + + while ( true ) + { + if ( ir->type == BifEnum::Input::READER_DEFAULT ) + { + reporter->Error("The reader that was requested was not found and could not be initialized."); + return 0; + } + + if ( ir->type != type ) + { // no, didn't find the right one... ++ir; continue; - } + } // call init function of writer if presnt if ( ir->init ) - { + { if ( (*ir->init)() ) { - //clear it to be not called again - ir->init = 0; - } else { + //clear it to be not called again + ir->init = 0; + } + else { // ohok. init failed, kill factory for all eternity ir->factory = 0; DBG_LOG(DBG_LOGGING, "Failed to init input class %s", ir->name); return 0; } - } + } if ( !ir->factory ) // no factory? @@ -230,7 +248,8 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) // all done. break. break; - } + } + assert(ir->factory); ReaderBackend* backend = (*ir->factory)(frontend); @@ -238,31 +257,34 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) frontend->ty_name = ir->name; return backend; -} + } // create a new input reader object to be used at whomevers leisure lateron. bool Manager::CreateStream(Stream* info, RecordVal* description) -{ + { ReaderDefinition* ir = input_readers; RecordType* rtype = description->Type()->AsRecordType(); - if ( ! ( same_type(rtype, BifType::Record::Input::TableDescription, 0) || same_type(rtype, BifType::Record::Input::EventDescription, 0) ) ) - { + if ( ! ( same_type(rtype, BifType::Record::Input::TableDescription, 0) + || same_type(rtype, BifType::Record::Input::EventDescription, 0) ) ) + { reporter->Error("Streamdescription argument not of right type for new input stream"); return false; - } + } Val* name_val = description->LookupWithDefault(rtype->FieldOffset("name")); string name = name_val->AsString()->CheckString(); Unref(name_val); - { + { Stream *i = FindStream(name); - if ( i != 0 ) { - reporter->Error("Trying create already existing input stream %s", name.c_str()); + if ( i != 0 ) + { + reporter->Error("Trying create already existing input stream %s", + name.c_str()); return false; + } } - } EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); Val *autostart = description->LookupWithDefault(rtype->FieldOffset("autostart")); @@ -293,25 +315,27 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) return true; -} + } -bool Manager::CreateEventStream(RecordVal* fval) { +bool Manager::CreateEventStream(RecordVal* fval) + { RecordType* rtype = fval->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::EventDescription, 0) ) - { + { reporter->Error("filter argument not of right type"); return false; - } + } EventStream* filter = new EventStream(); - { + { bool res = CreateStream(filter, fval); - if ( res == false ) { + if ( res == false ) + { delete filter; return false; + } } - } RecordType *fields = fval->LookupWithDefault(rtype->FieldOffset("fields"))->AsType()->AsTypeType()->Type()->AsRecordType(); @@ -322,77 +346,87 @@ bool Manager::CreateEventStream(RecordVal* fval) { Func* event = event_val->AsFunc(); Unref(event_val); - { + { FuncType* etype = event->FType()->AsFuncType(); - if ( ! etype->IsEvent() ) { + if ( ! etype->IsEvent() ) + { reporter->Error("stream event is a function, not an event"); return false; - } + } const type_list* args = etype->ArgTypes()->Types(); - if ( args->length() < 2 ) { + if ( args->length() < 2 ) + { reporter->Error("event takes not enough arguments"); return false; - } + } if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) - { + { reporter->Error("events second attribute must be of type Input::Event"); return false; - } + } if ( ! same_type((*args)[0], BifType::Record::Input::EventDescription, 0) ) - { + { reporter->Error("events first attribute must be of type Input::EventDescription"); return false; - } + } - if ( want_record->InternalInt() == 0 ) { - if ( args->length() != fields->NumFields() + 2 ) { + if ( want_record->InternalInt() == 0 ) + { + if ( args->length() != fields->NumFields() + 2 ) + { reporter->Error("event has wrong number of arguments"); return false; - } + } - for ( int i = 0; i < fields->NumFields(); i++ ) { - if ( !same_type((*args)[i+2], fields->FieldType(i) ) ) { + for ( int i = 0; i < fields->NumFields(); i++ ) + { + if ( !same_type((*args)[i+2], fields->FieldType(i) ) ) + { reporter->Error("Incompatible type for event"); return false; + } } - } - } else if ( want_record->InternalInt() == 1 ) { - if ( args->length() != 3 ) { + } + else if ( want_record->InternalInt() == 1 ) + { + if ( args->length() != 3 ) + { reporter->Error("event has wrong number of arguments"); return false; - } + } - if ( !same_type((*args)[2], fields ) ) { + if ( !same_type((*args)[2], fields ) ) + { reporter->Error("Incompatible type for event"); return false; - } + } - } else { + } + else assert(false); - } - } + } vector fieldsV; // vector, because UnrollRecordType needs it bool status = !UnrollRecordType(&fieldsV, fields, ""); - if ( status ) { + if ( status ) + { reporter->Error("Problem unrolling"); return false; - } + } Field** logf = new Field*[fieldsV.size()]; - for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { + for ( unsigned int i = 0; i < fieldsV.size(); i++ ) logf[i] = fieldsV[i]; - } Unref(fields); // ref'd by lookupwithdefault filter->num_fields = fieldsV.size(); @@ -412,56 +446,64 @@ bool Manager::CreateEventStream(RecordVal* fval) { return true; } -bool Manager::CreateTableStream(RecordVal* fval) { +bool Manager::CreateTableStream(RecordVal* fval) + { RecordType* rtype = fval->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::TableDescription, 0) ) - { + { reporter->Error("filter argument not of right type"); return false; - } + } TableStream* filter = new TableStream(); - { + { bool res = CreateStream(filter, fval); - if ( res == false ) { + if ( res == false ) + { delete filter; return false; + } } - } Val* pred = fval->LookupWithDefault(rtype->FieldOffset("pred")); RecordType *idx = fval->LookupWithDefault(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); RecordType *val = 0; - if ( fval->LookupWithDefault(rtype->FieldOffset("val")) != 0 ) { + + if ( fval->LookupWithDefault(rtype->FieldOffset("val")) != 0 ) + { val = fval->LookupWithDefault(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); Unref(val); // The lookupwithdefault in the if-clause ref'ed val. - } + } + TableVal *dst = fval->LookupWithDefault(rtype->FieldOffset("destination"))->AsTableVal(); // check if index fields match table description - { + { int num = idx->NumFields(); const type_list* tl = dst->Type()->AsTableType()->IndexTypes(); loop_over_list(*tl, j) { - if ( j >= num ) { + if ( j >= num ) + { reporter->Error("Table type has more indexes than index definition"); return false; - } + } - if ( !same_type(idx->FieldType(j), (*tl)[j]) ) { + if ( !same_type(idx->FieldType(j), (*tl)[j]) ) + { reporter->Error("Table type does not match index type"); return false; - } + } } - if ( num != j ) { + if ( num != j ) + { reporter->Error("Table has less elements than index definition"); return false; + } } - } Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); @@ -469,51 +511,57 @@ bool Manager::CreateTableStream(RecordVal* fval) { Func* event = event_val ? event_val->AsFunc() : 0; Unref(event_val); - if ( event ) { + if ( event ) + { FuncType* etype = event->FType()->AsFuncType(); - if ( ! etype->IsEvent() ) { + if ( ! etype->IsEvent() ) + { reporter->Error("stream event is a function, not an event"); return false; - } + } const type_list* args = etype->ArgTypes()->Types(); if ( args->length() != 4 ) - { + { reporter->Error("Table event must take 4 arguments"); return false; - } + } if ( ! same_type((*args)[0], BifType::Record::Input::TableDescription, 0) ) - { + { reporter->Error("table events first attribute must be of type Input::TableDescription"); return false; - } + } if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) - { + { reporter->Error("table events second attribute must be of type Input::Event"); return false; - } + } if ( ! same_type((*args)[2], idx) ) - { + { reporter->Error("table events index attributes do not match"); return false; - } + } if ( want_record->InternalInt() == 1 && ! same_type((*args)[3], val) ) - { + { reporter->Error("table events value attributes do not match"); return false; - } else if ( want_record->InternalInt() == 0 && !same_type((*args)[3], val->FieldType(0) ) ) { + } + else if ( want_record->InternalInt() == 0 + && !same_type((*args)[3], val->FieldType(0) ) ) + { reporter->Error("table events value attribute does not match"); return false; - } + } + assert(want_record->InternalInt() == 1 || want_record->InternalInt() == 0); - } + } vector fieldsV; // vector, because we don't know the length beforehands @@ -529,16 +577,16 @@ bool Manager::CreateTableStream(RecordVal* fval) { if ( !val ) assert(valfields == 0); - if ( status ) { + if ( status ) + { reporter->Error("Problem unrolling"); return false; - } + } Field** fields = new Field*[fieldsV.size()]; - for ( unsigned int i = 0; i < fieldsV.size(); i++ ) { + for ( unsigned int i = 0; i < fieldsV.size(); i++ ) fields[i] = fieldsV[i]; - } filter->pred = pred ? pred->AsFunc() : 0; filter->num_idx_fields = idxfields; @@ -556,13 +604,15 @@ bool Manager::CreateTableStream(RecordVal* fval) { Unref(want_record); // ref'd by lookupwithdefault Unref(pred); - if ( valfields > 1 ) { - if ( ! filter->want_record ) { + if ( valfields > 1 ) + { + if ( ! filter->want_record ) + { reporter->Error("Stream %s does not want a record (want_record=F), but has more then one value field. Aborting", filter->name.c_str()); delete filter; return false; + } } - } assert(filter->reader); @@ -574,7 +624,7 @@ bool Manager::CreateTableStream(RecordVal* fval) { filter->name.c_str()); return true; -} + } bool Manager::IsCompatibleType(BroType* t, bool atomic_only) @@ -582,7 +632,7 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) if ( ! t ) return false; - switch ( t->Tag() ) { + switch ( t->Tag() ) { case TYPE_BOOL: case TYPE_INT: case TYPE_COUNT: @@ -624,20 +674,21 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) } return false; -} + } -bool Manager::RemoveStream(const string &name) { +bool Manager::RemoveStream(const string &name) + { Stream *i = FindStream(name); - if ( i == 0 ) { + if ( i == 0 ) return false; // not found - } - if ( i->removed ) { + if ( i->removed ) + { reporter->Error("Stream %s is already queued for removal. Ignoring remove.", name.c_str()); return false; - } + } i->removed = true; @@ -649,54 +700,66 @@ bool Manager::RemoveStream(const string &name) { #endif return true; -} - -bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) { - Stream *i = FindStream(reader); - - if ( i == 0 ) { - reporter->Error("Stream not found in RemoveStreamContinuation"); - return false; } +bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) + { + Stream *i = FindStream(reader); + + if ( i == 0 ) + { + reporter->Error("Stream not found in RemoveStreamContinuation"); + return false; + } #ifdef DEBUG DBG_LOG(DBG_INPUT, "Successfully executed removal of stream %s", i->name.c_str()); #endif + readers.erase(reader); delete(i); return true; -} + } -bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend) { - for ( int i = 0; i < rec->NumFields(); i++ ) +bool Manager::UnrollRecordType(vector *fields, + const RecordType *rec, const string& nameprepend) { - if ( !IsCompatibleType(rec->FieldType(i)) ) { + for ( int i = 0; i < rec->NumFields(); i++ ) + { + + if ( !IsCompatibleType(rec->FieldType(i)) ) + { reporter->Error("Incompatible type \"%s\" in table definition for ReaderFrontend", type_name(rec->FieldType(i)->Tag())); return false; - } + } if ( rec->FieldType(i)->Tag() == TYPE_RECORD ) - { + { string prep = nameprepend + rec->FieldName(i) + "."; if ( !UnrollRecordType(fields, rec->FieldType(i)->AsRecordType(), prep) ) - { + { return false; - } + } - } else { + } + else + { Field* field = new Field(); field->name = nameprepend + rec->FieldName(i); field->type = rec->FieldType(i)->Tag(); - if ( field->type == TYPE_TABLE ) { + if ( field->type == TYPE_TABLE ) + { field->subtype = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); - } else if ( field->type == TYPE_VECTOR ) { + } + else if ( field->type == TYPE_VECTOR ) + { field->subtype = rec->FieldType(i)->AsVectorType()->YieldType()->Tag(); - } else if ( field->type == TYPE_PORT && - rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN) ) { + } else if ( field->type == TYPE_PORT && + rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN) ) + { // we have an annotation for the second column Val* c = rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN)->AttrExpr()->Eval(0); @@ -705,31 +768,32 @@ bool Manager::UnrollRecordType(vector *fields, const RecordType *rec, co assert(c->Type()->Tag() == TYPE_STRING); field->secondary_name = c->AsStringVal()->AsString()->CheckString(); - } + } - if ( rec->FieldDecl(i)->FindAttr(ATTR_OPTIONAL ) ) { + if ( rec->FieldDecl(i)->FindAttr(ATTR_OPTIONAL ) ) field->optional = true; - } fields->push_back(field); + } } - } return true; -} + } bool Manager::ForceUpdate(const string &name) -{ + { Stream *i = FindStream(name); - if ( i == 0 ) { + if ( i == 0 ) + { reporter->Error("Stream %s not found", name.c_str()); return false; - } + } - if ( i->removed ) { + if ( i->removed ) + { reporter->Error("Stream %s is already queued for removal. Ignoring force update.", name.c_str()); return false; - } + } i->reader->Update(); @@ -742,31 +806,34 @@ bool Manager::ForceUpdate(const string &name) } -Val* Manager::RecordValToIndexVal(RecordVal *r) { +Val* Manager::RecordValToIndexVal(RecordVal *r) + { Val* idxval; RecordType *type = r->Type()->AsRecordType(); int num_fields = type->NumFields(); - if ( num_fields == 1 && type->FieldDecl(0)->type->Tag() != TYPE_RECORD ) { + if ( num_fields == 1 && type->FieldDecl(0)->type->Tag() != TYPE_RECORD ) + { idxval = r->LookupWithDefault(0); - } else { + } + else + { ListVal *l = new ListVal(TYPE_ANY); - for ( int j = 0 ; j < num_fields; j++ ) { - //Val* rval = r->Lookup(j); - //assert(rval != 0); + for ( int j = 0 ; j < num_fields; j++ ) l->Append(r->LookupWithDefault(j)); - } + idxval = l; - } + } return idxval; -} + } -Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Value* const *vals) { +Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Value* const *vals) + { Val* idxval; int position = 0; @@ -776,47 +843,54 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu position = 1; } else { ListVal *l = new ListVal(TYPE_ANY); - for ( int j = 0 ; j < type->NumFields(); j++ ) { - if ( type->FieldType(j)->Tag() == TYPE_RECORD ) { - l->Append(ValueToRecordVal(vals, type->FieldType(j)->AsRecordType(), &position)); - } else { + for ( int j = 0 ; j < type->NumFields(); j++ ) + { + if ( type->FieldType(j)->Tag() == TYPE_RECORD ) + l->Append(ValueToRecordVal(vals, + type->FieldType(j)->AsRecordType(), &position)); + else + { l->Append(ValueToVal(vals[position], type->FieldType(j))); position++; + } } - } idxval = l; - } + } assert ( position == num_fields ); return idxval; -} + } -void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) { +void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) + { Stream *i = FindStream(reader); - if ( i == 0 ) { + if ( i == 0 ) + { reporter->InternalError("Unknown reader in SendEntry"); return; - } + } int readFields; - if ( i->filter_type == TABLE_FILTER ) { + if ( i->filter_type == TABLE_FILTER ) readFields = SendEntryTable(i, vals); - } else if ( i->filter_type == EVENT_FILTER ) { + else if ( i->filter_type == EVENT_FILTER ) + { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); readFields = SendEventStreamEvent(i, type, vals); - } else { + } + else assert(false); - } - for ( int i = 0; i < readFields; i++ ) { + for ( int i = 0; i < readFields; i++ ) delete vals[i]; - } - delete [] vals; -} -int Manager::SendEntryTable(Stream* i, const Value* const *vals) { + delete [] vals; + } + +int Manager::SendEntryTable(Stream* i, const Value* const *vals) + { bool updated = false; assert(i); @@ -826,59 +900,66 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) { HashKey* idxhash = HashValues(filter->num_idx_fields, vals); - if ( idxhash == 0 ) { + if ( idxhash == 0 ) + { reporter->Error("Could not hash line. Ignoring"); return filter->num_val_fields + filter->num_idx_fields; - } + } hash_t valhash = 0; - if ( filter->num_val_fields > 0 ) { + if ( filter->num_val_fields > 0 ) + { HashKey* valhashkey = HashValues(filter->num_val_fields, vals+filter->num_idx_fields); if ( valhashkey == 0 ) { // empty line. index, but no values. // hence we also have no hash value... - } else { + } + else + { valhash = valhashkey->Hash(); delete(valhashkey); + } } - } InputHash *h = filter->lastDict->Lookup(idxhash); - if ( h != 0 ) { + if ( h != 0 ) + { // seen before - if ( filter->num_val_fields == 0 || h->valhash == valhash ) { + if ( filter->num_val_fields == 0 || h->valhash == valhash ) + { // ok, exact duplicate, move entry to new dicrionary and do nothing else. filter->lastDict->Remove(idxhash); filter->currDict->Insert(idxhash, h); delete idxhash; return filter->num_val_fields + filter->num_idx_fields; - } else { + } + else + { assert( filter->num_val_fields > 0 ); // entry was updated in some way filter->lastDict->Remove(idxhash); // keep h for predicates updated = true; + } } - } Val* valval; RecordVal* predidx = 0; int position = filter->num_idx_fields; - if ( filter->num_val_fields == 0 ) { + if ( filter->num_val_fields == 0 ) valval = 0; - } else if ( filter->num_val_fields == 1 && !filter->want_record ) { + else if ( filter->num_val_fields == 1 && !filter->want_record ) valval = ValueToVal(vals[position], filter->rtype->FieldType(0)); - } else { + else valval = ValueToRecordVal(vals, filter->rtype, &position); - } - // call filter first to determine if we really add / change the entry - if ( filter->pred ) { + if ( filter->pred ) + { EnumVal* ev; //Ref(idxval); int startpos = 0; @@ -886,68 +967,74 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) { predidx = ValueToRecordVal(vals, filter->itype, &startpos); //ValueToRecordVal(vals, filter->itype, &startpos); - if ( updated ) { + if ( updated ) ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); - } else { + else ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - } bool result; - if ( filter->num_val_fields > 0 ) { // we have values + if ( filter->num_val_fields > 0 ) // we have values result = CallPred(filter->pred, 3, ev, predidx->Ref(), valval->Ref()); - } else { - // no values + else // no values result = CallPred(filter->pred, 2, ev, predidx->Ref()); - } - if ( result == false ) { + if ( result == false ) + { Unref(predidx); Unref(valval); - if ( !updated ) { + if ( !updated ) + { // throw away. Hence - we quit. And remove the entry from the current dictionary... // (but why should it be in there? assert this). assert ( filter->currDict->RemoveEntry(idxhash) == 0 ); delete idxhash; delete h; return filter->num_val_fields + filter->num_idx_fields; - } else { + } + else + { // keep old one filter->currDict->Insert(idxhash, h); delete idxhash; return filter->num_val_fields + filter->num_idx_fields; + } } - } - } + } // now we don't need h anymore - if we are here, the entry is updated and a new h is created. - if ( h ) { + if ( h ) + { delete h; h = 0; - } + } Val* idxval; - if ( predidx != 0 ) { + if ( predidx != 0 ) + { idxval = RecordValToIndexVal(predidx); // I think there is an unref missing here. But if I insert is, it crashes :) - } else { + } + else idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); - } + Val* oldval = 0; - if ( updated == true ) { + if ( updated == true ) + { assert(filter->num_val_fields > 0); // in that case, we need the old value to send the event (if we send an event). oldval = filter->tab->Lookup(idxval, false); - } + } //i->tab->Assign(idxval, valval); assert(idxval); HashKey* k = filter->tab->ComputeHash(idxval); - if ( !k ) { + if ( !k ) + { reporter->InternalError("could not hash"); assert(false); - } + } InputHash* ih = new InputHash(); ih->idxkey = new HashKey(k->Key(), k->Size(), k->Hash()); @@ -955,57 +1042,64 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) { if ( filter->event && updated ) Ref(oldval); // otherwise it is no longer accessible after the assignment + filter->tab->Assign(idxval, k, valval); Unref(idxval); // asssign does not consume idxval. - if ( predidx != 0 ) { + if ( predidx != 0 ) Unref(predidx); - } filter->currDict->Insert(idxhash, ih); delete idxhash; - if ( filter->event ) { + if ( filter->event ) + { EnumVal* ev; int startpos = 0; Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); - if ( updated ) { // in case of update send back the old value. + if ( updated ) + { // in case of update send back the old value. assert ( filter->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, oldval); - } else { + } + else + { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - if ( filter->num_val_fields == 0 ) { + if ( filter->num_val_fields == 0 ) + { Ref(filter->description); SendEvent(filter->event, 3, filter->description->Ref(), ev, predidx); - } else { + } + else SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval->Ref()); + } - } - } + } return filter->num_val_fields + filter->num_idx_fields; -} + } -void Manager::EndCurrentSend(ReaderFrontend* reader) { +void Manager::EndCurrentSend(ReaderFrontend* reader) + { Stream *i = FindStream(reader); - if ( i == 0 ) { + + if ( i == 0 ) + { reporter->InternalError("Unknown reader in EndCurrentSend"); return; - } + } #ifdef DEBUG DBG_LOG(DBG_INPUT, "Got EndCurrentSend stream %s", i->name.c_str()); #endif - if ( i->filter_type == EVENT_FILTER ) { - // nothing to do.. + if ( i->filter_type == EVENT_FILTER ) // nothing to do.. return; - } assert(i->filter_type == TABLE_FILTER); TableStream* filter = (TableStream*) i; @@ -1016,8 +1110,8 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { InputHash* ih; HashKey *lastDictIdxKey; //while ( ( ih = i->lastDict->NextEntry(c) ) ) { - while ( ( ih = filter->lastDict->NextEntry(lastDictIdxKey, c) ) ) { - + while ( ( ih = filter->lastDict->NextEntry(lastDictIdxKey, c) ) ) + { ListVal * idx = 0; Val *val = 0; @@ -1025,16 +1119,18 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { EnumVal* ev = 0; int startpos = 0; - if ( filter->pred || filter->event ) { + if ( filter->pred || filter->event ) + { idx = filter->tab->RecoverIndex(ih->idxkey); assert(idx != 0); val = filter->tab->Lookup(idx); assert(val != 0); predidx = ListValToRecordVal(idx, filter->itype, &startpos); ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - } + } - if ( filter->pred ) { + if ( filter->pred ) + { // ask predicate, if we want to expire this element... Ref(ev); @@ -1043,7 +1139,8 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { bool result = CallPred(filter->pred, 3, ev, predidx, val); - if ( result == false ) { + if ( result == false ) + { // Keep it. Hence - we quit and simply go to the next entry of lastDict // ah well - and we have to add the entry to currDict... Unref(predidx); @@ -1051,15 +1148,16 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { filter->currDict->Insert(lastDictIdxKey, filter->lastDict->RemoveEntry(lastDictIdxKey)); delete lastDictIdxKey; continue; + } } - } - if ( filter->event ) { + if ( filter->event ) + { Ref(predidx); Ref(val); Ref(ev); SendEvent(filter->event, 3, ev, predidx, val); - } + } if ( predidx ) // if we have a filter or an event... Unref(predidx); @@ -1070,9 +1168,9 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { filter->lastDict->Remove(lastDictIdxKey); // delete in next line delete lastDictIdxKey; delete(ih); - } + } - filter->lastDict->Clear(); // should be empty->->-> but->->-> well->->-> who knows->->-> + filter->lastDict->Clear(); // should be empt. buti- well... who knows... delete(filter->lastDict); filter->lastDict = filter->currDict; @@ -1086,39 +1184,40 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) { // Send event that the current update is indeed finished. EventHandler* handler = event_registry->Lookup("Input::update_finished"); - if ( handler == 0 ) { + if ( handler == 0 ) reporter->InternalError("Input::update_finished not found!"); - } - SendEvent(handler, 2, new StringVal(i->name.c_str()), new StringVal(i->source.c_str())); -} + } -void Manager::Put(ReaderFrontend* reader, Value* *vals) { +void Manager::Put(ReaderFrontend* reader, Value* *vals) + { Stream *i = FindStream(reader); - if ( i == 0 ) { + if ( i == 0 ) + { reporter->InternalError("Unknown reader in Put"); return; - } + } int readFields; - if ( i->filter_type == TABLE_FILTER ) { + if ( i->filter_type == TABLE_FILTER ) readFields = PutTable(i, vals); - } else if ( i->filter_type == EVENT_FILTER ) { + else if ( i->filter_type == EVENT_FILTER ) + { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); readFields = SendEventStreamEvent(i, type, vals); - } else { + } + else assert(false); - } - for ( int i = 0; i < readFields; i++ ) { + for ( int i = 0; i < readFields; i++ ) delete vals[i]; - } + delete [] vals; + } -} - -int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const *vals) { +int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const *vals) + { assert(i); assert(i->filter_type == EVENT_FILTER); @@ -1133,29 +1232,37 @@ int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const * out_vals.push_back(type); int position = 0; - if ( filter->want_record ) { + if ( filter->want_record ) + { RecordVal * r = ValueToRecordVal(vals, filter->fields, &position); out_vals.push_back(r); - } else { - for ( int j = 0; j < filter->fields->NumFields(); j++) { + } + else + { + for ( int j = 0; j < filter->fields->NumFields(); j++) + { Val* val = 0; - if ( filter->fields->FieldType(j)->Tag() == TYPE_RECORD ) { - val = ValueToRecordVal(vals, filter->fields->FieldType(j)->AsRecordType(), &position); - } else { + if ( filter->fields->FieldType(j)->Tag() == TYPE_RECORD ) + val = ValueToRecordVal(vals, + filter->fields->FieldType(j)->AsRecordType(), + &position); + else + { val = ValueToVal(vals[position], filter->fields->FieldType(j)); position++; - } + } out_vals.push_back(val); + } } - } SendEvent(filter->event, out_vals); return filter->fields->NumFields(); -} + } -int Manager::PutTable(Stream* i, const Value* const *vals) { +int Manager::PutTable(Stream* i, const Value* const *vals) + { assert(i); assert(i->filter_type == TABLE_FILTER); @@ -1165,102 +1272,115 @@ int Manager::PutTable(Stream* i, const Value* const *vals) { Val* valval; int position = filter->num_idx_fields; - if ( filter->num_val_fields == 0 ) { + if ( filter->num_val_fields == 0 ) valval = 0; - } else if ( filter->num_val_fields == 1 && filter->want_record == 0 ) { + else if ( filter->num_val_fields == 1 && filter->want_record == 0 ) valval = ValueToVal(vals[position], filter->rtype->FieldType(0)); - } else { + else valval = ValueToRecordVal(vals, filter->rtype, &position); - } // if we have a subscribed event, we need to figure out, if this is an update or not // same for predicates - if ( filter->pred || filter->event ) { + if ( filter->pred || filter->event ) + { bool updated = false; Val* oldval = 0; - if ( filter->num_val_fields > 0 ) { + if ( filter->num_val_fields > 0 ) + { // in that case, we need the old value to send the event (if we send an event). oldval = filter->tab->Lookup(idxval, false); - } + } - if ( oldval != 0 ) { + if ( oldval != 0 ) + { // it is an update updated = true; Ref(oldval); // have to do that, otherwise it may disappear in assign - } + } // predicate if we want the update or not - if ( filter->pred ) { + if ( filter->pred ) + { EnumVal* ev; int startpos = 0; Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); Ref(valval); - if ( updated ) { - ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); - } else { - ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - } + if ( updated ) + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, + BifType::Enum::Input::Event); + else + ev = new EnumVal(BifEnum::Input::EVENT_NEW, + BifType::Enum::Input::Event); bool result; - if ( filter->num_val_fields > 0 ) { // we have values + if ( filter->num_val_fields > 0 ) // we have values result = CallPred(filter->pred, 3, ev, predidx, valval); - } else { - // no values + else // no values result = CallPred(filter->pred, 2, ev, predidx); - } - if ( result == false ) { + if ( result == false ) + { // do nothing Unref(idxval); Unref(valval); Unref(oldval); return filter->num_val_fields + filter->num_idx_fields; - } + } - } + } filter->tab->Assign(idxval, valval); - if ( filter->event ) { + if ( filter->event ) + { EnumVal* ev; int startpos = 0; Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); - if ( updated ) { // in case of update send back the old value. + if ( updated ) + { + // in case of update send back the old value. assert ( filter->num_val_fields > 0 ); - ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, + BifType::Enum::Input::Event); assert ( oldval != 0 ); - SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, oldval); - } else { - ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - if ( filter->num_val_fields == 0 ) { - SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx); - } else { - SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval->Ref()); + SendEvent(filter->event, 4, filter->description->Ref(), + ev, predidx, oldval); + } + else + { + ev = new EnumVal(BifEnum::Input::EVENT_NEW, + BifType::Enum::Input::Event); + if ( filter->num_val_fields == 0 ) + SendEvent(filter->event, 4, filter->description->Ref(), + ev, predidx); + else + SendEvent(filter->event, 4, filter->description->Ref(), + ev, predidx, valval->Ref()); } + } - } - - - } else { - // no predicates or other stuff + } + else // no predicates or other stuff filter->tab->Assign(idxval, valval); - } + return filter->num_idx_fields + filter->num_val_fields; -} + } // Todo:: perhaps throw some kind of clear-event? -void Manager::Clear(ReaderFrontend* reader) { +void Manager::Clear(ReaderFrontend* reader) + { Stream *i = FindStream(reader); - if ( i == 0 ) { + if ( i == 0 ) + { reporter->InternalError("Unknown reader in Clear"); return; - } + } #ifdef DEBUG DBG_LOG(DBG_INPUT, "Got Clear for stream %s", @@ -1271,30 +1391,35 @@ void Manager::Clear(ReaderFrontend* reader) { TableStream* filter = (TableStream*) i; filter->tab->RemoveAll(); -} + } // put interface: delete old entry from table. -bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { +bool Manager::Delete(ReaderFrontend* reader, Value* *vals) + { Stream *i = FindStream(reader); - if ( i == 0 ) { + if ( i == 0 ) + { reporter->InternalError("Unknown reader in Delete"); return false; - } + } bool success = false; int readVals = 0; - if ( i->filter_type == TABLE_FILTER ) { + if ( i->filter_type == TABLE_FILTER ) + { TableStream* filter = (TableStream*) i; Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); assert(idxval != 0); readVals = filter->num_idx_fields + filter->num_val_fields; bool filterresult = true; - if ( filter->pred || filter->event ) { + if ( filter->pred || filter->event ) + { Val *val = filter->tab->Lookup(idxval); - if ( filter->pred ) { + if ( filter->pred ) + { Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); int startpos = 0; @@ -1302,62 +1427,68 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { filterresult = CallPred(filter->pred, 3, ev, predidx, val); - if ( filterresult == false ) { + if ( filterresult == false ) + { // keep it. Unref(idxval); success = true; + } + } - } - // only if filter = true -> no filtering - if ( filterresult && filter->event ) { + if ( filterresult && filter->event ) + { Ref(idxval); assert(val != 0); Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); SendEvent(filter->event, 4, filter->description->Ref(), ev, idxval, val); + } } - } // only if filter = true -> no filtering - if ( filterresult ) { + if ( filterresult ) + { Val* retptr = filter->tab->Delete(idxval); success = ( retptr != 0 ); - if ( !success ) { + if ( !success ) reporter->Error("Internal error while deleting values from input table"); - } else { + else Unref(retptr); } - } - } else if ( i->filter_type == EVENT_FILTER ) { + + } + else if ( i->filter_type == EVENT_FILTER ) + { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); readVals = SendEventStreamEvent(i, type, vals); success = true; - } else { + } + else + { assert(false); return false; - } + } - for ( int i = 0; i < readVals; i++ ) { + for ( int i = 0; i < readVals; i++ ) delete vals[i]; - } + delete [] vals; return success; -} + } bool Manager::CallPred(Func* pred_func, const int numvals, ...) -{ + { bool result; val_list vl(numvals); va_list lP; va_start(lP, numvals); for ( int i = 0; i < numvals; i++ ) - { vl.append( va_arg(lP, Val*) ); - } + va_end(lP); Val* v = pred_func->Call(&vl); @@ -1365,120 +1496,131 @@ bool Manager::CallPred(Func* pred_func, const int numvals, ...) Unref(v); return(result); -} + } bool Manager::SendEvent(const string& name, const int num_vals, Value* *vals) -{ + { EventHandler* handler = event_registry->Lookup(name.c_str()); - if ( handler == 0 ) { + if ( handler == 0 ) + { reporter->Error("Event %s not found", name.c_str()); return false; - } + } RecordType *type = handler->FType()->Args(); int num_event_vals = type->NumFields(); - if ( num_vals != num_event_vals ) { + if ( num_vals != num_event_vals ) + { reporter->Error("Wrong number of values for event %s", name.c_str()); return false; - } + } val_list* vl = new val_list; - for ( int i = 0; i < num_vals; i++) { + for ( int i = 0; i < num_vals; i++) vl->append(ValueToVal(vals[i], type->FieldType(i))); - } mgr.Dispatch(new Event(handler, vl)); - for ( int i = 0; i < num_vals; i++ ) { + for ( int i = 0; i < num_vals; i++ ) delete vals[i]; - } + delete [] vals; return true; } void Manager::SendEvent(EventHandlerPtr ev, const int numvals, ...) -{ + { val_list* vl = new val_list; va_list lP; va_start(lP, numvals); for ( int i = 0; i < numvals; i++ ) - { vl->append( va_arg(lP, Val*) ); - } + va_end(lP); mgr.QueueEvent(ev, vl, SOURCE_LOCAL); -} - -void Manager::SendEvent(EventHandlerPtr ev, list events) -{ - val_list* vl = new val_list; - - for ( list::iterator i = events.begin(); i != events.end(); i++ ) { - vl->append( *i ); } - mgr.QueueEvent(ev, vl, SOURCE_LOCAL); -} +void Manager::SendEvent(EventHandlerPtr ev, list events) + { + val_list* vl = new val_list; + + for ( list::iterator i = events.begin(); i != events.end(); i++ ) + { + vl->append( *i ); + } -// Convert a bro list value to a bro record value. I / we could think about moving this functionality to val.cc -RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, int* position) { + mgr.QueueEvent(ev, vl, SOURCE_LOCAL); + } + +// Convert a bro list value to a bro record value. +// I / we could think about moving this functionality to val.cc +RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, int* position) + { assert(position != 0 ); // we need the pointer to point to data; - if ( request_type->Tag() != TYPE_RECORD ) { + if ( request_type->Tag() != TYPE_RECORD ) + { reporter->InternalError("ListValToRecordVal called on non-record-value."); return 0; - } + } RecordVal* rec = new RecordVal(request_type->AsRecordType()); assert(list != 0); int maxpos = list->Length(); - for ( int i = 0; i < request_type->NumFields(); i++ ) { + for ( int i = 0; i < request_type->NumFields(); i++ ) + { assert ( (*position) <= maxpos ); Val* fieldVal = 0; - if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) { + if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) fieldVal = ListValToRecordVal(list, request_type->FieldType(i)->AsRecordType(), position); - } else { + else + { fieldVal = list->Index(*position); (*position)++; - } + } rec->Assign(i, fieldVal); - } + } return rec; -} + } // Convert a threading value to a record value -RecordVal* Manager::ValueToRecordVal(const Value* const *vals, RecordType *request_type, int* position) { +RecordVal* Manager::ValueToRecordVal(const Value* const *vals, + RecordType *request_type, int* position) + { assert(position != 0); // we need the pointer to point to data. - if ( request_type->Tag() != TYPE_RECORD ) { + if ( request_type->Tag() != TYPE_RECORD ) + { reporter->InternalError("ValueToRecordVal called on non-record-value."); return 0; - } + } RecordVal* rec = new RecordVal(request_type->AsRecordType()); - for ( int i = 0; i < request_type->NumFields(); i++ ) { + for ( int i = 0; i < request_type->NumFields(); i++ ) + { Val* fieldVal = 0; - if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) { + if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) fieldVal = ValueToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); - } else { + else + { fieldVal = ValueToVal(vals[*position], request_type->FieldType(i)); (*position)++; - } + } rec->Assign(i, fieldVal); - } + } return rec; -} + } // Count the length of the values // used to create a correct length buffer for hashing later @@ -1495,7 +1637,7 @@ int Manager::GetValueLength(const Value* val) { case TYPE_COUNT: case TYPE_COUNTER: length += sizeof(val->val.uint_val); - break; + break; case TYPE_PORT: length += sizeof(val->val.port_val.port); @@ -1517,48 +1659,48 @@ int Manager::GetValueLength(const Value* val) { case TYPE_ADDR: { - switch ( val->val.addr_val.family ) { - case IPv4: - length += sizeof(val->val.addr_val.in.in4); - break; - case IPv6: - length += sizeof(val->val.addr_val.in.in6); - break; - default: - assert(false); - } - + switch ( val->val.addr_val.family ) { + case IPv4: + length += sizeof(val->val.addr_val.in.in4); + break; + case IPv6: + length += sizeof(val->val.addr_val.in.in6); + break; + default: + assert(false); + } } break; case TYPE_SUBNET: { - switch ( val->val.subnet_val.prefix.family ) { - case IPv4: - length += sizeof(val->val.subnet_val.prefix.in.in4)+sizeof(val->val.subnet_val.length); - break; - case IPv6: - length += sizeof(val->val.subnet_val.prefix.in.in6)+sizeof(val->val.subnet_val.length); - break; - default: - assert(false); - } - + switch ( val->val.subnet_val.prefix.family ) { + case IPv4: + length += sizeof(val->val.subnet_val.prefix.in.in4)+ + sizeof(val->val.subnet_val.length); + break; + case IPv6: + length += sizeof(val->val.subnet_val.prefix.in.in6)+ + sizeof(val->val.subnet_val.length); + break; + default: + assert(false); + } } break; - case TYPE_TABLE: { - for ( int i = 0; i < val->val.set_val.size; i++ ) { + case TYPE_TABLE: + { + for ( int i = 0; i < val->val.set_val.size; i++ ) length += GetValueLength(val->val.set_val.vals[i]); - } break; } - case TYPE_VECTOR: { + case TYPE_VECTOR: + { int j = val->val.vector_val.size; - for ( int i = 0; i < j; i++ ) { + for ( int i = 0; i < j; i++ ) length += GetValueLength(val->val.vector_val.vals[i]); - } break; } @@ -1572,7 +1714,8 @@ int Manager::GetValueLength(const Value* val) { // Given a threading::value, copy the raw data bytes into *data and return how many bytes were copied. // Used for hashing the values for lookup in the bro table -int Manager::CopyValue(char *data, const int startpos, const Value* val) { +int Manager::CopyValue(char *data, const int startpos, const Value* val) + { assert( val->present ); // presence has to be checked elsewhere switch ( val->type ) { @@ -1588,11 +1731,14 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { return sizeof(val->val.uint_val); break; - case TYPE_PORT: { + case TYPE_PORT: + { int length = 0; - memcpy(data+startpos, (const void*) &(val->val.port_val.port), sizeof(val->val.port_val.port)); + memcpy(data+startpos, (const void*) &(val->val.port_val.port), + sizeof(val->val.port_val.port)); length += sizeof(val->val.port_val.port); - memcpy(data+startpos+length, (const void*) &(val->val.port_val.proto), sizeof(val->val.port_val.proto)); + memcpy(data+startpos+length, (const void*) &(val->val.port_val.proto), + sizeof(val->val.port_val.proto)); length += sizeof(val->val.port_val.proto); return length; break; @@ -1602,7 +1748,8 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: - memcpy(data+startpos, (const void*) &(val->val.double_val), sizeof(val->val.double_val)); + memcpy(data+startpos, (const void*) &(val->val.double_val), + sizeof(val->val.double_val)); return sizeof(val->val.double_val); break; @@ -1616,64 +1763,66 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { case TYPE_ADDR: { - int length; - switch ( val->val.addr_val.family ) { - case IPv4: - length = sizeof(val->val.addr_val.in.in4); - memcpy(data + startpos, (const char*) &(val->val.addr_val.in.in4), length); - break; - case IPv6: - length = sizeof(val->val.addr_val.in.in6); - memcpy(data + startpos, (const char*) &(val->val.addr_val.in.in6), length); - break; - default: - assert(false); - } - - return length; - + int length; + switch ( val->val.addr_val.family ) { + case IPv4: + length = sizeof(val->val.addr_val.in.in4); + memcpy(data + startpos, (const char*) &(val->val.addr_val.in.in4), length); + break; + case IPv6: + length = sizeof(val->val.addr_val.in.in6); + memcpy(data + startpos, (const char*) &(val->val.addr_val.in.in6), length); + break; + default: + assert(false); + } + return length; } break; case TYPE_SUBNET: { - int length; - switch ( val->val.subnet_val.prefix.family ) { - case IPv4: - length = sizeof(val->val.addr_val.in.in4); - memcpy(data + startpos, (const char*) &(val->val.subnet_val.prefix.in.in4), length); - break; - case IPv6: - length = sizeof(val->val.addr_val.in.in6); - memcpy(data + startpos, (const char*) &(val->val.subnet_val.prefix.in.in4), length); - break; - default: - assert(false); - } - int lengthlength = sizeof(val->val.subnet_val.length); - memcpy(data + startpos + length , (const char*) &(val->val.subnet_val.length), lengthlength); - length += lengthlength; - return length; - + int length; + switch ( val->val.subnet_val.prefix.family ) { + case IPv4: + length = sizeof(val->val.addr_val.in.in4); + memcpy(data + startpos, + (const char*) &(val->val.subnet_val.prefix.in.in4), length); + break; + case IPv6: + length = sizeof(val->val.addr_val.in.in6); + memcpy(data + startpos, + (const char*) &(val->val.subnet_val.prefix.in.in4), length); + break; + default: + assert(false); + } + int lengthlength = sizeof(val->val.subnet_val.length); + memcpy(data + startpos + length , + (const char*) &(val->val.subnet_val.length), lengthlength); + length += lengthlength; + return length; } break; - case TYPE_TABLE: { + case TYPE_TABLE: + { int length = 0; int j = val->val.set_val.size; - for ( int i = 0; i < j; i++ ) { + for ( int i = 0; i < j; i++ ) length += CopyValue(data, startpos+length, val->val.set_val.vals[i]); - } + return length; break; } - case TYPE_VECTOR: { + case TYPE_VECTOR: + { int length = 0; int j = val->val.vector_val.size; - for ( int i = 0; i < j; i++ ) { + for ( int i = 0; i < j; i++ ) length += CopyValue(data, startpos+length, val->val.vector_val.vals[i]); - } + return length; break; } @@ -1685,52 +1834,57 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) { assert(false); return 0; -} + } // Hash num_elements threading values and return the HashKey for them. At least one of the vals has to be ->present. -HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { +HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) + { int length = 0; - for ( int i = 0; i < num_elements; i++ ) { + for ( int i = 0; i < num_elements; i++ ) + { const Value* val = vals[i]; if ( val->present ) length += GetValueLength(val); - } + } - if ( length == 0 ) { + if ( length == 0 ) + { reporter->Error("Input reader sent line where all elements are null values. Ignoring line"); return NULL; - } + } int position = 0; char *data = (char*) malloc(length); - if ( data == 0 ) { + if ( data == 0 ) reporter->InternalError("Could not malloc?"); - } - for ( int i = 0; i < num_elements; i++ ) { + + for ( int i = 0; i < num_elements; i++ ) + { const Value* val = vals[i]; if ( val->present ) position += CopyValue(data, position, val); - } + } HashKey *key = new HashKey(data, length); delete data; assert(position == length); return key; -} + } // convert threading value to Bro value -Val* Manager::ValueToVal(const Value* val, BroType* request_type) { +Val* Manager::ValueToVal(const Value* val, BroType* request_type) + { - if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) { + if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) + { reporter->InternalError("Typetags don't match: %d vs %d", request_type->Tag(), val->type); return 0; - } + } - if ( !val->present ) { + if ( !val->present ) return 0; // unset field - } switch ( val->type ) { case TYPE_BOOL: @@ -1762,72 +1916,73 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { case TYPE_ADDR: { - IPAddr* addr; - switch ( val->val.addr_val.family ) { - case IPv4: - addr = new IPAddr(val->val.addr_val.in.in4); - break; - case IPv6: - addr = new IPAddr(val->val.addr_val.in.in6); - break; - default: - assert(false); - } - AddrVal* addrval = new AddrVal(*addr); - delete addr; - return addrval; - + IPAddr* addr; + switch ( val->val.addr_val.family ) { + case IPv4: + addr = new IPAddr(val->val.addr_val.in.in4); + break; + case IPv6: + addr = new IPAddr(val->val.addr_val.in.in6); + break; + default: + assert(false); + } + AddrVal* addrval = new AddrVal(*addr); + delete addr; + return addrval; } case TYPE_SUBNET: { - IPAddr* addr; - switch ( val->val.subnet_val.prefix.family ) { - case IPv4: - addr = new IPAddr(val->val.subnet_val.prefix.in.in4); - break; - case IPv6: - addr = new IPAddr(val->val.subnet_val.prefix.in.in6); - break; - default: - assert(false); - } - SubNetVal* subnetval = new SubNetVal(*addr, val->val.subnet_val.length); - delete addr; - return subnetval; - + IPAddr* addr; + switch ( val->val.subnet_val.prefix.family ) { + case IPv4: + addr = new IPAddr(val->val.subnet_val.prefix.in.in4); + break; + case IPv6: + addr = new IPAddr(val->val.subnet_val.prefix.in.in6); + break; + default: + assert(false); } + SubNetVal* subnetval = new SubNetVal(*addr, val->val.subnet_val.length); + delete addr; + return subnetval; break; + } - case TYPE_TABLE: { + case TYPE_TABLE: + { // all entries have to have the same type... BroType* type = request_type->AsTableType()->Indices()->PureType(); TypeList* set_index = new TypeList(type->Ref()); set_index->Append(type->Ref()); SetType* s = new SetType(set_index, 0); TableVal* t = new TableVal(s); - for ( int i = 0; i < val->val.set_val.size; i++ ) { + for ( int i = 0; i < val->val.set_val.size; i++ ) + { Val* assignval = ValueToVal( val->val.set_val.vals[i], type ); t->Assign(assignval, 0); Unref(assignval); // idex is not consumed by assign. - } + } Unref(s); return t; break; } - case TYPE_VECTOR: { + case TYPE_VECTOR: + { // all entries have to have the same type... BroType* type = request_type->AsVectorType()->YieldType(); VectorType* vt = new VectorType(type->Ref()); VectorVal* v = new VectorVal(vt); - for ( int i = 0; i < val->val.vector_val.size; i++ ) { + for ( int i = 0; i < val->val.vector_val.size; i++ ) v->Assign(i, ValueToVal( val->val.set_val.vals[i], type ), 0); - } + Unref(vt); return v; - + break; } case TYPE_ENUM: { @@ -1836,9 +1991,10 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { string module = extract_module_name(val->val.string_val->c_str()); string var = extract_var_name(val->val.string_val->c_str()); bro_int_t index = request_type->AsEnumType()->Lookup(module, var.c_str()); - if ( index == -1 ) { - reporter->InternalError("Value not found in enum mappimg. Module: %s, var: %s", module.c_str(), var.c_str()); - } + if ( index == -1 ) + reporter->InternalError("Value not found in enum mappimg. Module: %s, var: %s", + module.c_str(), var.c_str()); + return new EnumVal(index, request_type->Ref()->AsEnumType() ); break; } @@ -1850,26 +2006,24 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) { assert(false); return NULL; -} + } Manager::Stream* Manager::FindStream(const string &name) { for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { if ( (*s).second->name == name ) - { return (*s).second; } - } return 0; } Manager::Stream* Manager::FindStream(ReaderFrontend* reader) -{ + { map::iterator s = readers.find(reader); - if ( s != readers.end() ) { + if ( s != readers.end() ) return s->second; - } + return 0; -} + } diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 27401ffcb8..c625301383 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -15,10 +15,11 @@ public: : threading::OutputMessage("Put", reader), val(val) {} - virtual bool Process() { + virtual bool Process() + { input_mgr->Put(Object(), val); return true; - } + } private: Value* *val; @@ -30,9 +31,10 @@ public: : threading::OutputMessage("Delete", reader), val(val) {} - virtual bool Process() { + virtual bool Process() + { return input_mgr->Delete(Object(), val); - } + } private: Value* *val; @@ -43,10 +45,11 @@ public: ClearMessage(ReaderFrontend* reader) : threading::OutputMessage("Clear", reader) {} - virtual bool Process() { + virtual bool Process() + { input_mgr->Clear(Object()); return true; - } + } private: }; @@ -57,14 +60,15 @@ public: : threading::OutputMessage("SendEvent", reader), name(name), num_vals(num_vals), val(val) {} - virtual bool Process() { + virtual bool Process() + { bool success = input_mgr->SendEvent(name, num_vals, val); if ( !success ) reporter->Error("SendEvent for event %s failed", name.c_str()); return true; // we do not want to die if sendEvent fails because the event did not return. - } + } private: const string name; @@ -78,10 +82,11 @@ public: : threading::OutputMessage("SendEntry", reader), val(val) { } - virtual bool Process() { + virtual bool Process() + { input_mgr->SendEntry(Object(), val); return true; - } + } private: Value* *val; @@ -92,10 +97,11 @@ public: EndCurrentSendMessage(ReaderFrontend* reader) : threading::OutputMessage("EndCurrentSend", reader) {} - virtual bool Process() { + virtual bool Process() + { input_mgr->EndCurrentSend(Object()); return true; - } + } private: }; @@ -105,9 +111,10 @@ public: ReaderClosedMessage(ReaderFrontend* reader) : threading::OutputMessage("ReaderClosed", reader) {} - virtual bool Process() { + virtual bool Process() + { return input_mgr->RemoveStreamContinuation(Object()); - } + } private: }; @@ -119,12 +126,16 @@ public: DisableMessage(ReaderFrontend* writer) : threading::OutputMessage("Disable", writer) {} - virtual bool Process() { Object()->SetDisable(); return true; } + virtual bool Process() + { + Object()->SetDisable(); + return true; + } }; ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() -{ + { buf = 0; buf_len = 1024; disabled = true; // disabled will be set correcty in init. @@ -132,45 +143,45 @@ ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() frontend = arg_frontend; SetName(frontend->Name()); -} + } ReaderBackend::~ReaderBackend() -{ - -} + { + } void ReaderBackend::Put(Value* *val) -{ + { SendOut(new PutMessage(frontend, val)); -} + } void ReaderBackend::Delete(Value* *val) -{ + { SendOut(new DeleteMessage(frontend, val)); -} + } void ReaderBackend::Clear() -{ + { SendOut(new ClearMessage(frontend)); -} + } void ReaderBackend::SendEvent(const string& name, const int num_vals, Value* *vals) -{ + { SendOut(new SendEventMessage(frontend, name, num_vals, vals)); -} + } void ReaderBackend::EndCurrentSend() -{ + { SendOut(new EndCurrentSendMessage(frontend)); -} + } void ReaderBackend::SendEntry(Value* *vals) -{ + { SendOut(new SendEntryMessage(frontend, vals)); -} + } -bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, const threading::Field* const* arg_fields) -{ +bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, + const threading::Field* const* arg_fields) + { source = arg_source; SetName("InputReader/"+source); @@ -180,89 +191,90 @@ bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, // disable if DoInit returns error. int success = DoInit(arg_source, mode, arg_num_fields, arg_fields); - if ( !success ) { + if ( !success ) + { Error("Init failed"); DisableFrontend(); - } + } disabled = !success; return success; -} + } void ReaderBackend::Close() -{ + { DoClose(); disabled = true; DisableFrontend(); SendOut(new ReaderClosedMessage(frontend)); - if ( fields != 0 ) { - - for ( unsigned int i = 0; i < num_fields; i++ ) { + if ( fields != 0 ) + { + for ( unsigned int i = 0; i < num_fields; i++ ) delete(fields[i]); - } delete[] (fields); fields = 0; + } } -} bool ReaderBackend::Update() -{ + { if ( disabled ) return false; bool success = DoUpdate(); - if ( !success ) { + if ( !success ) DisableFrontend(); - } return success; -} + } void ReaderBackend::DisableFrontend() -{ - disabled = true; // we also set disabled here, because there still may be other messages queued and we will dutifully ignore these from now + { + disabled = true; + // we also set disabled here, because there still may be other messages queued and we will dutifully ignore these from now SendOut(new DisableMessage(frontend)); -} + } bool ReaderBackend::DoHeartbeat(double network_time, double current_time) -{ + { MsgThread::DoHeartbeat(network_time, current_time); - return true; -} - -TransportProto ReaderBackend::StringToProto(const string &proto) { - if ( proto == "unknown" ) { - return TRANSPORT_UNKNOWN; - } else if ( proto == "tcp" ) { - return TRANSPORT_TCP; - } else if ( proto == "udp" ) { - return TRANSPORT_UDP; - } else if ( proto == "icmp" ) { - return TRANSPORT_ICMP; } +TransportProto ReaderBackend::StringToProto(const string &proto) + { + if ( proto == "unknown" ) + return TRANSPORT_UNKNOWN; + else if ( proto == "tcp" ) + return TRANSPORT_TCP; + else if ( proto == "udp" ) + return TRANSPORT_UDP; + else if ( proto == "icmp" ) + return TRANSPORT_ICMP; + Error(Fmt("Tried to parse invalid/unknown protocol: %s", proto.c_str())); return TRANSPORT_UNKNOWN; -} + } // more or less verbose copy from IPAddr.cc -- which uses reporter -Value::addr_t ReaderBackend::StringToAddr(const string &s) { +Value::addr_t ReaderBackend::StringToAddr(const string &s) + { Value::addr_t val; if ( s.find(':') == std::string::npos ) // IPv4. { val.family = IPv4; - if ( inet_aton(s.c_str(), &(val.in.in4)) <= 0 ) { + if ( inet_aton(s.c_str(), &(val.in.in4)) <= 0 ) + { Error(Fmt("Bad addres: %s", s.c_str())); memset(&val.in.in4.s_addr, 0, sizeof(val.in.in4.s_addr)); - } + } } @@ -277,6 +289,6 @@ Value::addr_t ReaderBackend::StringToAddr(const string &s) { } return val; -} + } } diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index 6b3c2e6a67..f61fd357b9 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -46,19 +46,23 @@ public: }; -ReaderFrontend::ReaderFrontend(bro_int_t type) { +ReaderFrontend::ReaderFrontend(bro_int_t type) + { disabled = initialized = false; ty_name = ""; backend = input_mgr->CreateBackend(this, type); assert(backend); backend->Start(); -} + } -ReaderFrontend::~ReaderFrontend() { -} +ReaderFrontend::~ReaderFrontend() + { + } -void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, const threading::Field* const* fields) { +void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, + const threading::Field* const* fields) + { if ( disabled ) return; @@ -69,39 +73,43 @@ void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, con initialized = true; backend->SendIn(new InitMessage(backend, arg_source, mode, num_fields, fields)); -} + } -void ReaderFrontend::Update() { +void ReaderFrontend::Update() + { if ( disabled ) return; - if ( !initialized ) { + if ( !initialized ) + { reporter->Error("Tried to call update on uninitialized reader"); return; - } + } backend->SendIn(new UpdateMessage(backend)); -} + } -void ReaderFrontend::Close() { +void ReaderFrontend::Close() + { if ( disabled ) return; - if ( !initialized ) { + if ( !initialized ) + { reporter->Error("Tried to call finish on uninitialized reader"); return; - } + } backend->SendIn(new CloseMessage(backend)); -} + } string ReaderFrontend::Name() const -{ + { if ( source.size() ) return ty_name; return ty_name + "/" + source; -} + } } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index a167408a0e..c798c21a5e 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -23,69 +23,73 @@ using threading::Field; FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position) : name(arg_name), type(arg_type) -{ + { position = arg_position; secondary_position = -1; present = true; -} + } -FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position) +FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, + const TypeTag& arg_subtype, int arg_position) : name(arg_name), type(arg_type), subtype(arg_subtype) -{ + { position = arg_position; secondary_position = -1; present = true; -} + } FieldMapping::FieldMapping(const FieldMapping& arg) : name(arg.name), type(arg.type), subtype(arg.subtype), present(arg.present) -{ + { position = arg.position; secondary_position = arg.secondary_position; -} + } -FieldMapping FieldMapping::subType() { +FieldMapping FieldMapping::subType() + { return FieldMapping(name, subtype, position); -} + } Ascii::Ascii(ReaderFrontend *frontend) : ReaderBackend(frontend) -{ + { file = 0; - //keyMap = new map(); - separator.assign( (const char*) BifConst::InputAscii::separator->Bytes(), BifConst::InputAscii::separator->Len()); - if ( separator.size() != 1 ) { + separator.assign( (const char*) BifConst::InputAscii::separator->Bytes(), + BifConst::InputAscii::separator->Len()); + if ( separator.size() != 1 ) Error("separator length has to be 1. Separator will be truncated."); - } - set_separator.assign( (const char*) BifConst::InputAscii::set_separator->Bytes(), BifConst::InputAscii::set_separator->Len()); - if ( set_separator.size() != 1 ) { + set_separator.assign( (const char*) BifConst::InputAscii::set_separator->Bytes(), + BifConst::InputAscii::set_separator->Len()); + if ( set_separator.size() != 1 ) Error("set_separator length has to be 1. Separator will be truncated."); - } - empty_field.assign( (const char*) BifConst::InputAscii::empty_field->Bytes(), BifConst::InputAscii::empty_field->Len()); - - unset_field.assign( (const char*) BifConst::InputAscii::unset_field->Bytes(), BifConst::InputAscii::unset_field->Len()); + empty_field.assign( (const char*) BifConst::InputAscii::empty_field->Bytes(), + BifConst::InputAscii::empty_field->Len()); + unset_field.assign( (const char*) BifConst::InputAscii::unset_field->Bytes(), + BifConst::InputAscii::unset_field->Len()); + } Ascii::~Ascii() -{ + { DoClose(); -} + } void Ascii::DoClose() -{ - if ( file != 0 ) { + { + if ( file != 0 ) + { file->close(); delete(file); file = 0; + } } -} bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) -{ + { fname = path; mode = arg_mode; mtime = 0; @@ -93,124 +97,135 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c num_fields = arg_num_fields; fields = arg_fields; - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) + { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); return false; - } + } file = new ifstream(path.c_str()); - if ( !file->is_open() ) { + if ( !file->is_open() ) + { Error(Fmt("Init: cannot open %s", fname.c_str())); delete(file); file = 0; return false; - } + } - if ( ReadHeader(false) == false ) { + if ( ReadHeader(false) == false ) + { Error(Fmt("Init: cannot open %s; headers are incorrect", fname.c_str())); file->close(); delete(file); file = 0; return false; - } + } DoUpdate(); return true; -} + } -bool Ascii::ReadHeader(bool useCached) { +bool Ascii::ReadHeader(bool useCached) + { // try to read the header line... string line; map ifields; - if ( !useCached ) { - if ( !GetLine(line) ) { + if ( !useCached ) + { + if ( !GetLine(line) ) + { Error("could not read first line"); return false; - } - - + } headerline = line; - - } else { + } + else line = headerline; - } // construct list of field names. istringstream splitstream(line); int pos=0; - while ( splitstream ) { + while ( splitstream ) + { string s; if ( !getline(splitstream, s, separator[0])) break; ifields[s] = pos; pos++; - } + } //printf("Updating fields from description %s\n", line.c_str()); columnMap.clear(); - for ( unsigned int i = 0; i < num_fields; i++ ) { + for ( unsigned int i = 0; i < num_fields; i++ ) + { const Field* field = fields[i]; map::iterator fit = ifields.find(field->name); - if ( fit == ifields.end() ) { - if ( field->optional ) { + if ( fit == ifields.end() ) + { + if ( field->optional ) + { // we do not really need this field. mark it as not present and always send an undef back. FieldMapping f(field->name, field->type, field->subtype, -1); f.present = false; columnMap.push_back(f); continue; - } + } Error(Fmt("Did not find requested field %s in input data file %s.", field->name.c_str(), fname.c_str())); return false; - } + } FieldMapping f(field->name, field->type, field->subtype, ifields[field->name]); - if ( field->secondary_name != "" ) { + if ( field->secondary_name != "" ) + { map::iterator fit2 = ifields.find(field->secondary_name); - if ( fit2 == ifields.end() ) { + if ( fit2 == ifields.end() ) + { Error(Fmt("Could not find requested port type field %s in input data file.", field->secondary_name.c_str())); return false; - } + } f.secondary_position = ifields[field->secondary_name]; - } + } columnMap.push_back(f); - } + } // well, that seems to have worked... return true; -} + } -bool Ascii::GetLine(string& str) { - while ( getline(*file, str) ) { - if ( str[0] != '#' ) { +bool Ascii::GetLine(string& str) + { + while ( getline(*file, str) ) + { + if ( str[0] != '#' ) return true; - } - if ( str.compare(0,8, "#fields\t") == 0 ) { + if ( str.compare(0,8, "#fields\t") == 0 ) + { str = str.substr(8); return true; + } } - } return false; -} - - -Value* Ascii::EntryToVal(string s, FieldMapping field) { - - if ( s.compare(unset_field) == 0 ) { // field is not set... - return new Value(field.type, false); } + +Value* Ascii::EntryToVal(string s, FieldMapping field) + { + + if ( s.compare(unset_field) == 0 ) // field is not set... + return new Value(field.type, false); + Value* val = new Value(field.type, true); switch ( field.type ) { @@ -220,14 +235,15 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { break; case TYPE_BOOL: - if ( s == "T" ) { + if ( s == "T" ) val->val.int_val = 1; - } else if ( s == "F" ) { + else if ( s == "F" ) val->val.int_val = 0; - } else { + else + { Error(Fmt("Field: %s Invalid value for boolean: %s", field.name.c_str(), s.c_str())); return false; - } + } break; case TYPE_INT: @@ -250,7 +266,8 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { val->val.port_val.proto = TRANSPORT_UNKNOWN; break; - case TYPE_SUBNET: { + case TYPE_SUBNET: + { size_t pos = s.find("/"); if ( pos == s.npos ) { Error(Fmt("Invalid value for subnet: %s", s.c_str())); @@ -261,8 +278,8 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { val->val.subnet_val.prefix = StringToAddr(addr); val->val.subnet_val.length = width; - } break; + } case TYPE_ADDR: val->val.addr_val = StringToAddr(s); @@ -287,47 +304,56 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { Value** lvals = new Value* [length]; - if ( field.type == TYPE_TABLE ) { + if ( field.type == TYPE_TABLE ) + { val->val.set_val.vals = lvals; val->val.set_val.size = length; - } else if ( field.type == TYPE_VECTOR ) { + } + else if ( field.type == TYPE_VECTOR ) + { val->val.vector_val.vals = lvals; val->val.vector_val.size = length; - } else { + } + else + { assert(false); - } + } if ( length == 0 ) break; //empty istringstream splitstream(s); - while ( splitstream ) { + while ( splitstream ) + { string element; if ( !getline(splitstream, element, set_separator[0]) ) break; - if ( pos >= length ) { - Error(Fmt("Internal error while parsing set. pos %d >= length %d. Element: %s", pos, length, element.c_str())); + if ( pos >= length ) + { + Error(Fmt("Internal error while parsing set. pos %d >= length %d." + " Element: %s", pos, length, element.c_str())); break; - } + } Value* newval = EntryToVal(element, field.subType()); - if ( newval == 0 ) { + if ( newval == 0 ) + { Error("Error while reading set"); return 0; - } + } lvals[pos] = newval; pos++; - - } + } - if ( pos != length ) { + if ( pos != length ) + { Error("Internal error while parsing set: did not find all elements"); return 0; - } + } break; } @@ -340,24 +366,23 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { } return val; - -} + } // read the entire file and send appropriate thingies back to InputMgr -bool Ascii::DoUpdate() { +bool Ascii::DoUpdate() + { switch ( mode ) { case REREAD: // check if the file has changed struct stat sb; - if ( stat(fname.c_str(), &sb) == -1 ) { + if ( stat(fname.c_str(), &sb) == -1 ) + { Error(Fmt("Could not get stat for %s", fname.c_str())); return false; - } + } - if ( sb.st_mtime <= mtime ) { - // no change + if ( sb.st_mtime <= mtime ) // no change return true; - } mtime = sb.st_mtime; // file changed. reread. @@ -366,57 +391,56 @@ bool Ascii::DoUpdate() { case MANUAL: case STREAM: - // dirty, fix me. (well, apparently after trying seeking, etc - this is not that bad) - if ( file && file->is_open() ) { - if ( mode == STREAM ) { + // dirty, fix me. (well, apparently after trying seeking, etc + // - this is not that bad) + if ( file && file->is_open() ) + { + if ( mode == STREAM ) + { file->clear(); // remove end of file evil bits if ( !ReadHeader(true) ) // in case filters changed - { return false; // header reading failed - } + break; - } + } file->close(); - } + } file = new ifstream(fname.c_str()); - if ( !file->is_open() ) { + if ( !file->is_open() ) + { Error(Fmt("cannot open %s", fname.c_str())); return false; - } + } - if ( ReadHeader(false) == false ) { + if ( ReadHeader(false) == false ) + { return false; - } + } break; default: assert(false); - } - - - // - - // file->seekg(0, ios::beg); // do not forget clear. - - + } string line; - while ( GetLine(line ) ) { + while ( GetLine(line ) ) + { // split on tabs istringstream splitstream(line); map stringfields; int pos = 0; - while ( splitstream ) { + while ( splitstream ) + { string s; if ( !getline(splitstream, s, separator[0]) ) break; stringfields[pos] = s; pos++; - } + } pos--; // for easy comparisons of max element. @@ -426,69 +450,60 @@ bool Ascii::DoUpdate() { int fpos = 0; for ( vector::iterator fit = columnMap.begin(); fit != columnMap.end(); - fit++ ){ + fit++ ) + { - if ( ! fit->present ) { + if ( ! fit->present ) + { // add non-present field fields[fpos] = new Value((*fit).type, false); fpos++; continue; - } + } assert(fit->position >= 0 ); - if ( (*fit).position > pos || (*fit).secondary_position > pos ) { + if ( (*fit).position > pos || (*fit).secondary_position > pos ) + { Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", line.c_str(), pos, (*fit).position, (*fit).secondary_position)); return false; - } + } Value* val = EntryToVal(stringfields[(*fit).position], *fit); - if ( val == 0 ) { + if ( val == 0 ) + { Error("Could not convert String value to Val"); return false; - } + } - if ( (*fit).secondary_position != -1 ) { + if ( (*fit).secondary_position != -1 ) + { // we have a port definition :) assert(val->type == TYPE_PORT ); // Error(Fmt("Got type %d != PORT with secondary position!", val->type)); val->val.port_val.proto = StringToProto(stringfields[(*fit).secondary_position]); - } + } fields[fpos] = val; fpos++; - } + } //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); assert ( (unsigned int) fpos == num_fields ); - if ( mode == STREAM ) { + if ( mode == STREAM ) Put(fields); - } else { + else SendEntry(fields); } - /* Do not do this, ownership changes to other thread - * for ( unsigned int i = 0; i < (*it).second.num_fields; i++ ) { - delete fields[i]; - } - delete [] fields; - */ - - } - - - //file->clear(); // remove end of file evil bits - //file->seekg(0, ios::beg); // and seek to start. - - if ( mode != STREAM ) { + if ( mode != STREAM ) EndCurrentSend(); - } - + return true; -} + } bool Ascii::DoHeartbeat(double network_time, double current_time) { @@ -500,12 +515,13 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) break; case REREAD: case STREAM: - Update(); // call update and not DoUpdate, because update actually checks disabled. + Update(); // call update and not DoUpdate, because update + // checks disabled. break; default: assert(false); } return true; -} + } diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index deff2b038d..29f0070fec 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -22,7 +22,7 @@ using threading::Field; Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) -{ + { multiplication_factor = double(BifConst::InputBenchmark::factor); autospread = double(BifConst::InputBenchmark::autospread); spread = int(BifConst::InputBenchmark::spread); @@ -32,19 +32,19 @@ Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) timedspread = double(BifConst::InputBenchmark::timedspread); heart_beat_interval = double(BifConst::Threading::heart_beat_interval); -} + } Benchmark::~Benchmark() -{ + { DoClose(); -} + } void Benchmark::DoClose() -{ -} + { + } bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) -{ + { mode = arg_mode; num_fields = arg_num_fields; @@ -54,18 +54,20 @@ bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Fiel if ( autospread != 0.0 ) autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) + { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); return false; - } + } heartbeatstarttime = CurrTime(); DoUpdate(); return true; -} + } -string Benchmark::RandomString(const int len) { +string Benchmark::RandomString(const int len) + { string s(len, ' '); static const char values[] = @@ -73,65 +75,65 @@ string Benchmark::RandomString(const int len) { "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"; - for (int i = 0; i < len; ++i) { + for (int i = 0; i < len; ++i) s[i] = values[rand() / (RAND_MAX / sizeof(values))]; - } return s; -} + } -double Benchmark::CurrTime() { +double Benchmark::CurrTime() + { struct timeval tv; assert ( gettimeofday(&tv, 0) >= 0 ); return double(tv.tv_sec) + double(tv.tv_usec) / 1e6; -} + } // read the entire file and send appropriate thingies back to InputMgr -bool Benchmark::DoUpdate() { +bool Benchmark::DoUpdate() + { int linestosend = num_lines * heart_beat_interval; - for ( int i = 0; i < linestosend; i++ ) { + for ( int i = 0; i < linestosend; i++ ) + { Value** field = new Value*[num_fields]; - for (unsigned int j = 0; j < num_fields; j++ ) { + for (unsigned int j = 0; j < num_fields; j++ ) field[j] = EntryToVal(fields[j]->type, fields[j]->subtype); - } - if ( mode == STREAM ) { + if ( mode == STREAM ) // do not do tracking, spread out elements over the second that we have... Put(field); - } else { + else SendEntry(field); - } - if ( stopspreadat == 0 || num_lines < stopspreadat ) { + if ( stopspreadat == 0 || num_lines < stopspreadat ) + { if ( spread != 0 ) usleep(spread); if ( autospread_time != 0 ) usleep( autospread_time ); - } + } - if ( timedspread != 0.0 ) { + if ( timedspread != 0.0 ) + { double diff; - do { + do diff = CurrTime() - heartbeatstarttime; - //printf("%d %f\n", i, diff); - //} while ( diff < i/threading::Manager::HEART_BEAT_INTERVAL*(num_lines + (num_lines * timedspread) ) ); - } while ( diff/heart_beat_interval < i/(linestosend + (linestosend * timedspread) ) ); - //} while ( diff < 0.8); - } + while ( diff/heart_beat_interval < i/(linestosend + + (linestosend * timedspread) ) ); + } } - if ( mode != STREAM ) { + if ( mode != STREAM ) EndCurrentSend(); - } return true; } -threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { +threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) + { Value* val = new Value(type, true); // basically construct something random from the fields that we want. @@ -170,7 +172,8 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { val->val.port_val.proto = TRANSPORT_UNKNOWN; break; - case TYPE_SUBNET: { + case TYPE_SUBNET: + { val->val.subnet_val.prefix = StringToAddr("192.168.17.1"); val->val.subnet_val.length = 16; } @@ -192,28 +195,32 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { Value** lvals = new Value* [length]; - if ( type == TYPE_TABLE ) { + if ( type == TYPE_TABLE ) + { val->val.set_val.vals = lvals; val->val.set_val.size = length; - } else if ( type == TYPE_VECTOR ) { + } + else if ( type == TYPE_VECTOR ) + { val->val.vector_val.vals = lvals; val->val.vector_val.size = length; - } else { + } + else assert(false); - } if ( length == 0 ) break; //empty - for ( unsigned int pos = 0; pos < length; pos++ ) { - + for ( unsigned int pos = 0; pos < length; pos++ ) + { Value* newval = EntryToVal(subtype, TYPE_ENUM); - if ( newval == 0 ) { + if ( newval == 0 ) + { Error("Error while reading set"); return 0; - } + } lvals[pos] = newval; - } + } break; } @@ -226,20 +233,11 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { return val; -} + } bool Benchmark::DoHeartbeat(double network_time, double current_time) { - /* - * This does not work the way I envisioned it, because the queueing is the problem. - printf("%f\n", CurrTime() - current_time); - if ( CurrTime() - current_time > 0.25 ) { - // event has hung for a time. refuse. - SendEvent("EndBenchmark", 0, 0); - return true; - } */ - ReaderBackend::DoHeartbeat(network_time, current_time); num_lines = (int) ( (double) num_lines*multiplication_factor); num_lines += add; @@ -251,7 +249,8 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) break; case REREAD: case STREAM: - if ( multiplication_factor != 1 || add != 0 ) { + if ( multiplication_factor != 1 || add != 0 ) + { // we have to document at what time we changed the factor to what value. Value** v = new Value*[2]; v[0] = new Value(TYPE_COUNT, true); @@ -260,12 +259,11 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) v[1]->val.double_val = CurrTime(); SendEvent("lines_changed", 2, v); - } + } - if ( autospread != 0.0 ) { - autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); + if ( autospread != 0.0 ) // because executing this in every loop is apparently too expensive. - } + autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); Update(); // call update and not DoUpdate, because update actually checks disabled. diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index f656be769c..43c782de29 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -24,79 +24,86 @@ using threading::Value; using threading::Field; Raw::Raw(ReaderFrontend *frontend) : ReaderBackend(frontend) -{ + { file = 0; in = 0; - //keyMap = new map(); - separator.assign( (const char*) BifConst::InputRaw::record_separator->Bytes(), BifConst::InputRaw::record_separator->Len()); - if ( separator.size() != 1 ) { + if ( separator.size() != 1 ) Error("separator length has to be 1. Separator will be truncated."); } -} - Raw::~Raw() -{ + { DoClose(); -} + } void Raw::DoClose() -{ - if ( file != 0 ) { + { + if ( file != 0 ) + { Close(); + } } -} bool Raw::Open() -{ - if ( execute ) { + { + if ( execute ) + { file = popen(fname.c_str(), "r"); - if ( file == NULL ) { + if ( file == NULL ) + { Error(Fmt("Could not execute command %s", fname.c_str())); return false; + } } - } else { + else + { file = fopen(fname.c_str(), "r"); - if ( file == NULL ) { + if ( file == NULL ) + { Error(Fmt("Init: cannot open %s", fname.c_str())); return false; + } } - } in = new boost::fdistream(fileno(file)); - if ( execute && mode == STREAM ) { + if ( execute && mode == STREAM ) + { fcntl(fileno(file), F_SETFL, O_NONBLOCK); - } + } return true; -} + } bool Raw::Close() -{ - if ( file == NULL ) { + { + if ( file == NULL ) + { InternalError(Fmt("Trying to close closed file for stream %s", fname.c_str())); return false; - } + } - if ( execute ) { + if ( execute ) + { delete(in); pclose(file); - } else { + } + else + { delete(in); fclose(file); - } + } in = NULL; file = NULL; return true; -} + } bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) -{ + { fname = path; mode = arg_mode; mtime = 0; @@ -107,24 +114,30 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con num_fields = arg_num_fields; fields = arg_fields; - if ( path.length() == 0 ) { + if ( path.length() == 0 ) + { Error("No source path provided"); return false; - } + } - if ( arg_num_fields != 1 ) { - Error("Filter for raw reader contains more than one field. Filters for the raw reader may only contain exactly one string field. Filter ignored."); + if ( arg_num_fields != 1 ) + { + Error("Filter for raw reader contains more than one field. " + "Filters for the raw reader may only contain exactly one string field. " + "Filter ignored."); return false; - } + } - if ( fields[0]->type != TYPE_STRING ) { + if ( fields[0]->type != TYPE_STRING ) + { Error("Filter for raw reader contains a field that is not of type string."); return false; - } + } // do Initialization char last = path[path.length()-1]; - if ( last == '|' ) { + if ( last == '|' ) + { execute = true; fname = path.substr(0, fname.length() - 1); @@ -137,19 +150,17 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con } else { execute = false; - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) { + if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) + { Error(Fmt("Unsupported read mode %d for source %s", mode, fname.c_str())); return false; - } + } result = Open(); + } - } - - if ( result == false ) { + if ( result == false ) return result; - } - #ifdef DEBUG Debug(DBG_INPUT, "Raw reader created, will perform first update"); @@ -162,62 +173,68 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con Debug(DBG_INPUT, "First update went through"); #endif return true; -} - - -bool Raw::GetLine(string& str) { - while ( getline(*in, str, separator[0]) ) { - return true; } + +bool Raw::GetLine(string& str) + { + while ( getline(*in, str, separator[0]) ) + return true; + return false; -} + } // read the entire file and send appropriate thingies back to InputMgr -bool Raw::DoUpdate() { - if ( firstrun ) { +bool Raw::DoUpdate() + { + if ( firstrun ) firstrun = false; - } else { + else + { switch ( mode ) { case REREAD: + { // check if the file has changed struct stat sb; - if ( stat(fname.c_str(), &sb) == -1 ) { + if ( stat(fname.c_str(), &sb) == -1 ) + { Error(Fmt("Could not get stat for %s", fname.c_str())); return false; - } + } - if ( sb.st_mtime <= mtime ) { + if ( sb.st_mtime <= mtime ) // no change return true; - } mtime = sb.st_mtime; // file changed. reread. // fallthrough + } case MANUAL: case STREAM: - if ( mode == STREAM && file != NULL && in != NULL ) { + if ( mode == STREAM && file != NULL && in != NULL ) + { //fpurge(file); in->clear(); // remove end of file evil bits break; - } + } Close(); - if ( !Open() ) { + if ( !Open() ) return false; - } + break; default: assert(false); } - } + } string line; - while ( GetLine(line) ) { + while ( GetLine(line) ) + { assert (num_fields == 1); Value** fields = new Value*[1]; @@ -228,14 +245,14 @@ bool Raw::DoUpdate() { fields[0] = val; Put(fields); - } + } return true; -} + } bool Raw::DoHeartbeat(double network_time, double current_time) -{ + { ReaderBackend::DoHeartbeat(network_time, current_time); switch ( mode ) { @@ -244,12 +261,12 @@ bool Raw::DoHeartbeat(double network_time, double current_time) break; case REREAD: case STREAM: - Update(); // call update and not DoUpdate, because update actually checks disabled. + Update(); // call update and not DoUpdate, because update + // checks disabled. break; default: assert(false); } return true; -} - + } From 8cd36f158bfa12ff593c12aae314670e709af2b3 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 25 May 2012 12:37:35 -0500 Subject: [PATCH 315/651] Add Teredo tunnel decapsulation. Also fix header truncation check for IPv6 No Next header and add an "ipv6_no_next" weird for such packets that aren't tunneled over Teredo (which it calls "bubbles" and are used to create mappings in NATs). --- scripts/base/frameworks/dpd/dpd.sig | 6 + scripts/base/frameworks/tunnels/__load__.bro | 3 - scripts/base/frameworks/tunnels/main.bro | 8 ++ src/AYIYA.cc | 5 +- src/AYIYA.h | 3 +- src/Analyzer.cc | 7 +- src/AnalyzerTags.h | 2 +- src/CMakeLists.txt | 1 + src/Sessions.cc | 16 +++ src/Teredo.cc | 126 +++++++++++++++++++ src/Teredo.h | 63 ++++++++++ src/Tunnels.h | 5 + src/ayiya-protocol.pac | 2 +- src/event.bif | 5 +- 14 files changed, 236 insertions(+), 16 deletions(-) create mode 100644 src/Teredo.cc create mode 100644 src/Teredo.h diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig index b1fb9e6f19..305383809d 100644 --- a/scripts/base/frameworks/dpd/dpd.sig +++ b/scripts/base/frameworks/dpd/dpd.sig @@ -156,6 +156,12 @@ signature dpd_ayiya { enable "ayiya" } +signature dpd_teredo { + ip-proto = udp + payload /^(\x00\x00)|(\x00\x01)|([\x60-\x6f])/ + enable "teredo" +} + signature dpd_socks_client { ip-proto == tcp # '32' is a rather arbitrary max length for the user name. diff --git a/scripts/base/frameworks/tunnels/__load__.bro b/scripts/base/frameworks/tunnels/__load__.bro index 3def3511f5..a10fe855df 100644 --- a/scripts/base/frameworks/tunnels/__load__.bro +++ b/scripts/base/frameworks/tunnels/__load__.bro @@ -1,4 +1 @@ @load ./main - -const ports = { 5072/udp } &redef; -redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ports] }; diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 743098cd6d..4076e79cd5 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -74,6 +74,14 @@ export { global active: table[conn_id] of Info = table() &synchronized &read_expire=24hrs &expire_func=expire; } +const ayiya_ports = { 5072/udp }; +redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ayiya_ports] }; + +const teredo_ports = { 3544/udp }; +redef dpd_config += { [ANALYZER_TEREDO] = [$ports = teredo_ports] }; + +redef likely_server_ports += { ayiya_ports, teredo_ports }; + event bro_init() &priority=5 { Log::create_stream(Tunnel::LOG, [$columns=Info]); diff --git a/src/AYIYA.cc b/src/AYIYA.cc index ef845a5368..c525a73b6c 100644 --- a/src/AYIYA.cc +++ b/src/AYIYA.cc @@ -4,7 +4,6 @@ AYIYA_Analyzer::AYIYA_Analyzer(Connection* conn) : Analyzer(AnalyzerTag::AYIYA, conn) { interp = new binpac::AYIYA::AYIYA_Conn(this); - did_session_done = 0; } AYIYA_Analyzer::~AYIYA_Analyzer() @@ -15,9 +14,7 @@ AYIYA_Analyzer::~AYIYA_Analyzer() void AYIYA_Analyzer::Done() { Analyzer::Done(); - - if ( ! did_session_done ) - Event(udp_session_done); + Event(udp_session_done); } void AYIYA_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, int seq, const IP_Hdr* ip, int caplen) diff --git a/src/AYIYA.h b/src/AYIYA.h index bf1fb0bf2c..2122cafee6 100644 --- a/src/AYIYA.h +++ b/src/AYIYA.h @@ -16,14 +16,13 @@ public: { return new AYIYA_Analyzer(conn); } static bool Available() + // TODO: specific option to turn off AYIYA analysis { return BifConst::Tunnel::max_depth > 0; } protected: friend class AnalyzerTimer; void ExpireTimer(double t); - int did_session_done; - binpac::AYIYA::AYIYA_Conn* interp; }; diff --git a/src/Analyzer.cc b/src/Analyzer.cc index c72af2a44a..9e30da0066 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -38,6 +38,7 @@ #include "SSH.h" #include "SSL.h" #include "Syslog-binpac.h" +#include "Teredo.h" #include "ConnSizeAnalyzer.h" // Keep same order here as in AnalyzerTag definition! @@ -135,9 +136,9 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { { AnalyzerTag::SOCKS, "SOCKS", SOCKS_Analyzer::InstantiateAnalyzer, SOCKS_Analyzer::Available, 0, false }, - //{ AnalyzerTag::Teredo, "Teredo", - // Teredo_Analyzer::InstantiateAnalyzer, - // Teredo_Analyzer::Available, 0, false }, + { AnalyzerTag::Teredo, "TEREDO", + Teredo_Analyzer::InstantiateAnalyzer, + Teredo_Analyzer::Available, 0, false }, { AnalyzerTag::File, "FILE", File_Analyzer::InstantiateAnalyzer, File_Analyzer::Available, 0, false }, diff --git a/src/AnalyzerTags.h b/src/AnalyzerTags.h index 05de68f2b3..c77c229458 100644 --- a/src/AnalyzerTags.h +++ b/src/AnalyzerTags.h @@ -36,7 +36,7 @@ namespace AnalyzerTag { // Decapsulation Analyzers AYIYA, SOCKS, - //Teredo, + Teredo, // Other File, Backdoor, InterConn, SteppingStone, TCPStats, diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 50875cbcca..7d74aee1ce 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -397,6 +397,7 @@ set(bro_SRCS TCP_Endpoint.cc TCP_Reassembler.cc Telnet.cc + Teredo.cc Timer.cc Traverse.cc Trigger.cc diff --git a/src/Sessions.cc b/src/Sessions.cc index d3d5d294bc..704bb62a25 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -567,6 +567,19 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } + case IPPROTO_NONE: + { + if ( encapsulation.LastType() == BifEnum::Tunnel::TEREDO ) + { + // TODO: raise bubble packet event + } + else + Weird("ipv6_no_next", hdr, pkt); + + Remove(f); + return; + } + default: Weird(fmt("unknown_protocol_%d", proto), hdr, pkt); Remove(f); @@ -682,6 +695,9 @@ bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, case IPPROTO_IPV6: min_hdr_len = sizeof(struct ip6_hdr); break; + case IPPROTO_NONE: + min_hdr_len = 0; + break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: default: diff --git a/src/Teredo.cc b/src/Teredo.cc new file mode 100644 index 0000000000..39ecef286f --- /dev/null +++ b/src/Teredo.cc @@ -0,0 +1,126 @@ +#include "Teredo.h" +#include "IP.h" +#include "Reporter.h" + +void Teredo_Analyzer::Done() + { + Analyzer::Done(); + Event(udp_session_done); + } + +bool TeredoEncapsulation::DoParse(const u_char* data, int& len, + bool found_origin, bool found_auth) + { + if ( len < 2 ) + { + reporter->Weird(conn, "truncated_Teredo"); + } + + uint16 tag = ntohs((*((const uint16*)data))); + + if ( tag == 0 ) + { + // Origin Indication + if ( found_origin ) + // can't have multiple origin indications + return false; + + if ( len < 8 ) + { + reporter->Weird(conn, "truncated_Teredo_origin_indication"); + return false; + } + + origin_indication = data; + len -= 8; + data += 8; + return DoParse(data, len, true, found_auth); + } + else if ( tag == 1 ) + { + // Authentication + if ( found_origin || found_auth ) + // can't have multiple authentication headers and can't come after + // an origin indication + return false; + + if ( len < 4 ) + { + reporter->Weird(conn, "truncated_Teredo_authentication"); + return false; + } + + uint8 id_len = data[2]; + uint8 au_len = data[3]; + uint16 tot_len = 4 + id_len + au_len + 8 + 1; + + if ( len < tot_len ) + { + reporter->Weird(conn, "truncated_Teredo_authentication"); + return false; + } + + auth = data; + len -= tot_len; + data += tot_len; + return DoParse(data, len, found_origin, true); + } + else if ( ((tag & 0xf000)>>12) == 6 ) + { + // IPv6 + if ( len < 40 ) + { + reporter->Weird(conn, "truncated_IPv6_in_Teredo"); + return false; + } + + if ( len - 40 != ntohs(((const struct ip6_hdr*)data)->ip6_plen) ) + { + reporter->Weird(conn, "Teredo_payload_len_mismatch"); + return false; + } + + inner_ip = data; + return true; + } + + return false; + } + +void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, + int seq, const IP_Hdr* ip, int caplen) + { + Analyzer::DeliverPacket(len, data, orig, seq, ip, caplen); + + if ( Conn()->GetEncapsulation().Depth() >= BifConst::Tunnel::max_depth ) + { + reporter->Weird(Conn(), "tunnel_depth"); + return; + } + + TeredoEncapsulation te(Conn()); + + if ( ! te.Parse(data, len) ) + { + ProtocolViolation("Invalid Teredo encapsulation", (const char*)data, + len); + return; + } + + IP_Hdr inner_ip((const struct ip6_hdr*) te.InnerIP(), false, len); + + ProtocolConfirmation(); + + // TODO: raise Teredo-specific events + + struct pcap_pkthdr fake_hdr; + fake_hdr.caplen = fake_hdr.len = len; + fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; + + Encapsulation encap(Conn()->GetEncapsulation()); + EncapsulatingConn ec(Conn(), BifEnum::Tunnel::TEREDO); + encap.Add(ec); + + sessions->DoNextPacket(network_time, &fake_hdr, &inner_ip, te.InnerIP(), 0, + encap); + } diff --git a/src/Teredo.h b/src/Teredo.h new file mode 100644 index 0000000000..0662099233 --- /dev/null +++ b/src/Teredo.h @@ -0,0 +1,63 @@ +#ifndef Teredo_h +#define Teredo_h + +#include "Analyzer.h" +#include "NetVar.h" + +class Teredo_Analyzer : public Analyzer { +public: + Teredo_Analyzer(Connection* conn) : Analyzer(AnalyzerTag::Teredo, conn) + {} + + virtual ~Teredo_Analyzer() + {} + + virtual void Done(); + + virtual void DeliverPacket(int len, const u_char* data, bool orig, + int seq, const IP_Hdr* ip, int caplen); + + static Analyzer* InstantiateAnalyzer(Connection* conn) + { return new Teredo_Analyzer(conn); } + + static bool Available() + //TODO: specific option to turn off Teredo analysis? + { return BifConst::Tunnel::max_depth > 0; } + +protected: + friend class AnalyzerTimer; + void ExpireTimer(double t); +}; + +class TeredoEncapsulation { +public: + TeredoEncapsulation(Connection* c) + : inner_ip(0), origin_indication(0), auth(0), conn(c) + {} + + /** + * Returns whether input data parsed as a valid Teredo encapsulation type. + * If it was valid, the len argument is decremented appropriately. + */ + bool Parse(const u_char* data, int& len) + { return DoParse(data, len, false, false); } + + const u_char* InnerIP() const + { return inner_ip; } + + const u_char* OriginIndication() const + { return origin_indication; } + + const u_char* Authentication() const + { return auth; } + +protected: + bool DoParse(const u_char* data, int& len, bool found_orig, bool found_au); + + const u_char* inner_ip; + const u_char* origin_indication; + const u_char* auth; + Connection* conn; +}; + +#endif diff --git a/src/Tunnels.h b/src/Tunnels.h index b8d693ea59..0f9c4f4107 100644 --- a/src/Tunnels.h +++ b/src/Tunnels.h @@ -103,6 +103,11 @@ public: return conns ? conns->size() : 0; } + BifEnum::Tunnel::Type LastType() const + { + return conns ? (*conns)[conns->size()-1].type : BifEnum::Tunnel::NONE; + } + VectorVal* GetVectorVal() const { VectorVal* vv = new VectorVal(new VectorType( diff --git a/src/ayiya-protocol.pac b/src/ayiya-protocol.pac index 7801708c7d..328d44ece7 100644 --- a/src/ayiya-protocol.pac +++ b/src/ayiya-protocol.pac @@ -13,4 +13,4 @@ type PDU = record { signature_len = (signature_byte >> 4) * 4; auth = auth_and_op >> 4; op = auth_and_op & 0xF; -} &byteorder = littleendian; \ No newline at end of file +} &byteorder = littleendian; diff --git a/src/event.bif b/src/event.bif index 94ee923240..7e428aabdd 100644 --- a/src/event.bif +++ b/src/event.bif @@ -765,8 +765,9 @@ event udp_reply%(u: connection%); event udp_contents%(u: connection, is_orig: bool, contents: string%); ## Generated when a UDP session for a supported protocol has finished. Some of -## Bro's application-layer UDP analyzers flag the end of a session by raising this -## event. Currently, the analyzers for DNS, NTP, Netbios, and Syslog support this. +## Bro's application-layer UDP analyzers flag the end of a session by raising +## this event. Currently, the analyzers for DNS, NTP, Netbios, Syslog, AYIYA, +## and Teredo support this. ## ## u: The connection record for the corresponding UDP flow. ## From 2e452dc29ff39c557d7de5e6b21af93ce2e80690 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 May 2012 10:49:17 -0700 Subject: [PATCH 316/651] remove last remnants of autostart, which has been removed for quite a while. --- scripts/base/frameworks/input/main.bro | 6 ------ src/input/Manager.cc | 1 - src/input/ReaderBackend.h | 1 - 3 files changed, 8 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index e06dfae005..a52cd97b4b 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -24,9 +24,6 @@ export { ## Read mode to use for this stream mode: Mode &default=default_mode; - ## Automatically start the input stream after the first filter has been added - autostart: bool &default=T; - ## Descriptive name. Used to remove a filter at a later time name: string; @@ -68,9 +65,6 @@ export { ## Read mode to use for this stream mode: Mode &default=default_mode; - ## Automatically start the input stream after the first filter has been added - autostart: bool &default=T; - ## Descriptive name. Used to remove a filter at a later time name: string; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 0fde16b87d..3f7fcea078 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -287,7 +287,6 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) } EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); - Val *autostart = description->LookupWithDefault(rtype->FieldOffset("autostart")); ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); assert(reader_obj); diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 5b230ca652..ca54d8a204 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -227,7 +227,6 @@ private: // For implementing Fmt(). char* buf; unsigned int buf_len; - bool autostart; unsigned int num_fields; const threading::Field* const * fields; // raw mapping From 96a7e068f085291c3ee7db0bca39cb1df18055ac Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 May 2012 11:29:57 -0700 Subject: [PATCH 317/651] baselines for the autostart removal. --- .../scripts.base.frameworks.input.event/out | 14 ++++---- .../out | 18 +++++------ .../scripts.base.frameworks.input.raw/out | 16 +++++----- .../scripts.base.frameworks.input.reread/out | 16 +++++----- .../out | 32 +++++++++---------- .../out | 16 +++++----- .../out | 14 ++++---- .../out | 6 ++-- 8 files changed, 66 insertions(+), 66 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.event/out b/testing/btest/Baseline/scripts.base.frameworks.input.event/out index 59070cd88e..bb3b6d0a9e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.event/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.event/out @@ -1,4 +1,4 @@ -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -8,7 +8,7 @@ print A::b; Input::EVENT_NEW 1 T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -18,7 +18,7 @@ print A::b; Input::EVENT_NEW 2 T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -28,7 +28,7 @@ print A::b; Input::EVENT_NEW 3 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -38,7 +38,7 @@ print A::b; Input::EVENT_NEW 4 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -48,7 +48,7 @@ print A::b; Input::EVENT_NEW 5 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -58,7 +58,7 @@ print A::b; Input::EVENT_NEW 6 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out index 06e28de441..bb69da3267 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out @@ -1,4 +1,4 @@ -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -14,7 +14,7 @@ Input::remove(input); }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -30,7 +30,7 @@ Input::remove(input); }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -46,7 +46,7 @@ Input::remove(input); }] Input::EVENT_NEW q3r3057fdf -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -62,7 +62,7 @@ Input::remove(input); }] Input::EVENT_NEW sdfs\d -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -78,7 +78,7 @@ Input::remove(input); }] Input::EVENT_NEW -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -94,7 +94,7 @@ Input::remove(input); }] Input::EVENT_NEW dfsdf -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -110,7 +110,7 @@ Input::remove(input); }] Input::EVENT_NEW sdf -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -126,7 +126,7 @@ Input::remove(input); }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out index 34a5599dc9..55e7610e1e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out @@ -1,4 +1,4 @@ -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -6,7 +6,7 @@ print A::s; }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -14,7 +14,7 @@ print A::s; }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -22,7 +22,7 @@ print A::s; }] Input::EVENT_NEW q3r3057fdf -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -30,7 +30,7 @@ print A::s; }] Input::EVENT_NEW sdfs\d -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -38,7 +38,7 @@ print A::s; }] Input::EVENT_NEW -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -46,7 +46,7 @@ print A::s; }] Input::EVENT_NEW dfsdf -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -54,7 +54,7 @@ print A::s; }] Input::EVENT_NEW sdf -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index 46a30f387f..5cce15f6c7 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -15,7 +15,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -96,7 +96,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -201,7 +201,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -366,7 +366,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -489,7 +489,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -612,7 +612,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -735,7 +735,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -858,7 +858,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out index d85c8f2e83..9d62fdbef4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out @@ -1,4 +1,4 @@ -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -6,7 +6,7 @@ print A::s; }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -14,7 +14,7 @@ print A::s; }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -22,7 +22,7 @@ print A::s; }] Input::EVENT_NEW q3r3057fdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -30,7 +30,7 @@ print A::s; }] Input::EVENT_NEW sdfs\d -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -38,7 +38,7 @@ print A::s; }] Input::EVENT_NEW -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -46,7 +46,7 @@ print A::s; }] Input::EVENT_NEW dfsdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -54,7 +54,7 @@ print A::s; }] Input::EVENT_NEW sdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -62,7 +62,7 @@ print A::s; }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -70,7 +70,7 @@ print A::s; }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -78,7 +78,7 @@ print A::s; }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -86,7 +86,7 @@ print A::s; }] Input::EVENT_NEW q3r3057fdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -94,7 +94,7 @@ print A::s; }] Input::EVENT_NEW sdfs\d -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -102,7 +102,7 @@ print A::s; }] Input::EVENT_NEW -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -110,7 +110,7 @@ print A::s; }] Input::EVENT_NEW dfsdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; @@ -118,7 +118,7 @@ print A::s; }] Input::EVENT_NEW sdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, autostart=T, name=input, fields=, want_record=F, ev=line +[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { print A::description; print A::tpe; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out index 937acf428e..07a3ffdba5 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out @@ -1,4 +1,4 @@ -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -13,7 +13,7 @@ Input::remove(input); }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -28,7 +28,7 @@ Input::remove(input); }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -43,7 +43,7 @@ Input::remove(input); }] Input::EVENT_NEW q3r3057fdf -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -58,7 +58,7 @@ Input::remove(input); }] Input::EVENT_NEW sdfs\d -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -73,7 +73,7 @@ Input::remove(input); }] Input::EVENT_NEW -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -88,7 +88,7 @@ Input::remove(input); }] Input::EVENT_NEW dfsdf -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; @@ -103,7 +103,7 @@ Input::remove(input); }] Input::EVENT_NEW sdf -[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, autostart=T, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { print A::outfile, A::description; print A::outfile, A::tpe; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out index 56b36a1a0e..a1bbb9bbe4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out @@ -1,4 +1,4 @@ -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -16,7 +16,7 @@ print right; Input::EVENT_NEW [i=1] T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -34,7 +34,7 @@ print right; Input::EVENT_NEW [i=2] T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -52,7 +52,7 @@ print right; Input::EVENT_NEW [i=3] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -70,7 +70,7 @@ print right; Input::EVENT_NEW [i=4] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -88,7 +88,7 @@ print right; Input::EVENT_NEW [i=5] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -106,7 +106,7 @@ print right; Input::EVENT_NEW [i=6] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, autostart=T, name=input, destination={ +[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out index a61a4a2993..41d9438da0 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out @@ -30,7 +30,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -120,7 +120,7 @@ BB } ============EVENT============ Description -[source=../input2.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh2, destination={ +[source=../input2.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh2, destination={ [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -240,7 +240,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, autostart=T, name=ssh, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, From 4de6d76488e0d85f7085aa53528bee93b7d7b8b7 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 May 2012 11:30:18 -0700 Subject: [PATCH 318/651] fix up the executeraw test - now it works for the first time and does not always fail --- src/input/readers/Raw.cc | 10 ++++++++++ .../scripts.base.frameworks.input.executeraw/out | 9 +++++++++ .../scripts/base/frameworks/input/executeraw.bro | 16 ++++++++++------ 3 files changed, 29 insertions(+), 6 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 43c782de29..ce0b4f8a5f 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -178,6 +178,12 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con bool Raw::GetLine(string& str) { + if ( in->peek() == std::iostream::traits_type::eof() ) + return false; + + if ( in->eofbit == true || in->failbit == true ) + return false; + while ( getline(*in, str, separator[0]) ) return true; @@ -247,6 +253,10 @@ bool Raw::DoUpdate() Put(fields); } +#ifdef DEBUG + Debug(DBG_INPUT, "DoUpdate finished successfully"); +#endif + return true; } diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out new file mode 100644 index 0000000000..8611b35dd3 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out @@ -0,0 +1,9 @@ +[source=wc -l ../input.log |, reader=Input::READER_RAW, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +{ +print outfile, description; +print outfile, tpe; +print outfile, s; +close(outfile); +}] +Input::EVENT_NEW + 8 ../input.log diff --git a/testing/btest/scripts/base/frameworks/input/executeraw.bro b/testing/btest/scripts/base/frameworks/input/executeraw.bro index 6fceebf885..6d07a9bf29 100644 --- a/testing/btest/scripts/base/frameworks/input/executeraw.bro +++ b/testing/btest/scripts/base/frameworks/input/executeraw.bro @@ -1,5 +1,6 @@ # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 1 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -13,21 +14,24 @@ sdf 3rw43wRRERLlL#RWERERERE. @TEST-END-FILE +@load frameworks/communication/listen -module A; +global outfile: file; type Val: record { s: string; }; event line(description: Input::EventDescription, tpe: Input::Event, s: string) { - print description; - print tpe; - print s; + print outfile, description; + print outfile, tpe; + print outfile, s; + close(outfile); } event bro_init() { - Input::add_event([$source="wc input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line]); + outfile = open ("../out"); + Input::add_event([$source="wc -l ../input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line]); Input::remove("input"); } From 24173807ea8b1c895bc9ee75cb9924ba198901ea Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 May 2012 11:35:56 -0700 Subject: [PATCH 319/651] reactivate network_time check in threading manager. previously this line made all input framework tests fail - it works now. Some of the other recent changes of the threading manager must have fixed that problem. This was easy :) --- src/threading/Manager.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 491b8379e8..4a05fb8d41 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -125,7 +125,7 @@ void Manager::Process() if ( msg->Process() ) { - //if ( network_time ) // FIXME: ask robin again if he needs this. makes input interface not work in bro_init. + if ( network_time ) did_process = true; } From 1059d9aa75f9b9dd7f6081f4de77b9ea73734b3a Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 25 May 2012 17:15:29 -0500 Subject: [PATCH 320/651] Add more tests for previously-untested BIFs --- testing/btest/Baseline/bifs.cat/out | 6 ++ testing/btest/Baseline/bifs.fmt/out | 48 ++++++++++++ testing/btest/Baseline/bifs.lookup_ID/out | 5 ++ testing/btest/Baseline/bifs.math/out | 8 ++ .../Baseline/bifs.record_type_to_vector/out | 1 + testing/btest/Baseline/bifs.to_count/out | 9 +++ testing/btest/Baseline/bifs.to_double/out | 6 ++ testing/btest/Baseline/bifs.to_int/out | 3 + testing/btest/Baseline/bifs.to_interval/out | 2 + testing/btest/Baseline/bifs.to_port/out | 6 ++ testing/btest/Baseline/bifs.to_time/out | 2 + testing/btest/Baseline/bifs.type_name/out | 20 +++++ testing/btest/bifs/cat.bro | 22 ++++++ testing/btest/bifs/fmt.bro | 78 +++++++++++++++++++ testing/btest/bifs/lookup_ID.bro | 16 ++++ testing/btest/bifs/math.bro | 24 ++++++ testing/btest/bifs/record_type_to_vector.bro | 13 ++++ testing/btest/bifs/to_count.bro | 27 +++++++ testing/btest/bifs/to_double.bro | 20 +++++ testing/btest/bifs/to_int.bro | 10 +++ testing/btest/bifs/to_interval.bro | 11 +++ testing/btest/bifs/to_port.bro | 17 ++++ testing/btest/bifs/to_time.bro | 11 +++ testing/btest/bifs/type_name.bro | 56 +++++++++++++ 24 files changed, 421 insertions(+) create mode 100644 testing/btest/Baseline/bifs.cat/out create mode 100644 testing/btest/Baseline/bifs.fmt/out create mode 100644 testing/btest/Baseline/bifs.lookup_ID/out create mode 100644 testing/btest/Baseline/bifs.math/out create mode 100644 testing/btest/Baseline/bifs.record_type_to_vector/out create mode 100644 testing/btest/Baseline/bifs.to_count/out create mode 100644 testing/btest/Baseline/bifs.to_double/out create mode 100644 testing/btest/Baseline/bifs.to_int/out create mode 100644 testing/btest/Baseline/bifs.to_interval/out create mode 100644 testing/btest/Baseline/bifs.to_port/out create mode 100644 testing/btest/Baseline/bifs.to_time/out create mode 100644 testing/btest/Baseline/bifs.type_name/out create mode 100644 testing/btest/bifs/cat.bro create mode 100644 testing/btest/bifs/fmt.bro create mode 100644 testing/btest/bifs/lookup_ID.bro create mode 100644 testing/btest/bifs/math.bro create mode 100644 testing/btest/bifs/record_type_to_vector.bro create mode 100644 testing/btest/bifs/to_count.bro create mode 100644 testing/btest/bifs/to_double.bro create mode 100644 testing/btest/bifs/to_int.bro create mode 100644 testing/btest/bifs/to_interval.bro create mode 100644 testing/btest/bifs/to_port.bro create mode 100644 testing/btest/bifs/to_time.bro create mode 100644 testing/btest/bifs/type_name.bro diff --git a/testing/btest/Baseline/bifs.cat/out b/testing/btest/Baseline/bifs.cat/out new file mode 100644 index 0000000000..cf73512b88 --- /dev/null +++ b/testing/btest/Baseline/bifs.cat/out @@ -0,0 +1,6 @@ +foo3T + +3T +foo|3|T + +|3|T diff --git a/testing/btest/Baseline/bifs.fmt/out b/testing/btest/Baseline/bifs.fmt/out new file mode 100644 index 0000000000..6422294a39 --- /dev/null +++ b/testing/btest/Baseline/bifs.fmt/out @@ -0,0 +1,48 @@ +test +% + +*test * +* test* +* T* +*T * +* 3.14e+00* +*3.14e+00 * +* 3.14* +* 3.1* +* -3.14e+00* +* -3.14* +* -3.1* +*-3.14e+00 * +*-3.14 * +*-3.1 * +* -128* +*-128 * +* 128* +*0000000128* +*128 * +* a0* +*00000000a0* +* a0* +* 160/tcp* +* 127.0.0.1* +* 7f000001* +*192.168.0.0/16* +* ::1* +*fe000000000000000000000000000001* +*fe80:1234::1* +*fe80:1234::/32* +* 3.0 hrs* +*/^?(^foo|bar)$?/* +* Blue* +* [1, 2, 3]* +*{^J^I2,^J^I1,^J^I3^J}* +*{^J^I[2] = bro,^J^I[1] = test^J}* +3.100000e+02 +310.000000 +310 +3.100e+02 +310.000 +310 +310 +this\0test +this\0test diff --git a/testing/btest/Baseline/bifs.lookup_ID/out b/testing/btest/Baseline/bifs.lookup_ID/out new file mode 100644 index 0000000000..64b6379deb --- /dev/null +++ b/testing/btest/Baseline/bifs.lookup_ID/out @@ -0,0 +1,5 @@ +bro test + + + +event() diff --git a/testing/btest/Baseline/bifs.math/out b/testing/btest/Baseline/bifs.math/out new file mode 100644 index 0000000000..40131d2528 --- /dev/null +++ b/testing/btest/Baseline/bifs.math/out @@ -0,0 +1,8 @@ +3.0 +2.0 +-4.0 +-3.0 +1.772005 +23.103867 +1.144223 +0.49693 diff --git a/testing/btest/Baseline/bifs.record_type_to_vector/out b/testing/btest/Baseline/bifs.record_type_to_vector/out new file mode 100644 index 0000000000..1b4fa4baf1 --- /dev/null +++ b/testing/btest/Baseline/bifs.record_type_to_vector/out @@ -0,0 +1 @@ +[, ct, str1] diff --git a/testing/btest/Baseline/bifs.to_count/out b/testing/btest/Baseline/bifs.to_count/out new file mode 100644 index 0000000000..a283cbaed3 --- /dev/null +++ b/testing/btest/Baseline/bifs.to_count/out @@ -0,0 +1,9 @@ +0 +2 +3 +4 +7 +0 +18446744073709551611 +0 +123 diff --git a/testing/btest/Baseline/bifs.to_double/out b/testing/btest/Baseline/bifs.to_double/out new file mode 100644 index 0000000000..8c2fef496a --- /dev/null +++ b/testing/btest/Baseline/bifs.to_double/out @@ -0,0 +1,6 @@ +0.000001 +1.0 +-60.0 +3600.0 +86400.0 +1337982322.762159 diff --git a/testing/btest/Baseline/bifs.to_int/out b/testing/btest/Baseline/bifs.to_int/out new file mode 100644 index 0000000000..cde0c82987 --- /dev/null +++ b/testing/btest/Baseline/bifs.to_int/out @@ -0,0 +1,3 @@ +1 +-1 +0 diff --git a/testing/btest/Baseline/bifs.to_interval/out b/testing/btest/Baseline/bifs.to_interval/out new file mode 100644 index 0000000000..d841f8d99a --- /dev/null +++ b/testing/btest/Baseline/bifs.to_interval/out @@ -0,0 +1,2 @@ +1234563.14 +-1234563.14 diff --git a/testing/btest/Baseline/bifs.to_port/out b/testing/btest/Baseline/bifs.to_port/out new file mode 100644 index 0000000000..fb8a536abb --- /dev/null +++ b/testing/btest/Baseline/bifs.to_port/out @@ -0,0 +1,6 @@ +123/tcp +123/udp +123/icmp +256/tcp +256/udp +256/icmp diff --git a/testing/btest/Baseline/bifs.to_time/out b/testing/btest/Baseline/bifs.to_time/out new file mode 100644 index 0000000000..d841f8d99a --- /dev/null +++ b/testing/btest/Baseline/bifs.to_time/out @@ -0,0 +1,2 @@ +1234563.14 +-1234563.14 diff --git a/testing/btest/Baseline/bifs.type_name/out b/testing/btest/Baseline/bifs.type_name/out new file mode 100644 index 0000000000..610ee304fd --- /dev/null +++ b/testing/btest/Baseline/bifs.type_name/out @@ -0,0 +1,20 @@ +string +count +double +bool +time +interval +pattern +enum +port +addr +addr +subnet +subnet +vector +vector +set[count] +set[string] +table[count] of string +table[string] of count +record { c:count; s:string; } diff --git a/testing/btest/bifs/cat.bro b/testing/btest/bifs/cat.bro new file mode 100644 index 0000000000..b85b3af550 --- /dev/null +++ b/testing/btest/bifs/cat.bro @@ -0,0 +1,22 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "foo"; + local b = 3; + local c = T; + + print cat(a, b, c); + + print cat(); + + print cat("", 3, T); + + print cat_sep("|", "", a, b, c); + + print cat_sep("|", ""); + + print cat_sep("|", "", "", b, c); + } diff --git a/testing/btest/bifs/fmt.bro b/testing/btest/bifs/fmt.bro new file mode 100644 index 0000000000..bb2740d127 --- /dev/null +++ b/testing/btest/bifs/fmt.bro @@ -0,0 +1,78 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +type color: enum { Red, Blue }; + +event bro_init() + { + local a = "foo"; + local b = 3; + local c = T; + local d = Blue; + local e = vector( 1, 2, 3); + local f = set( 1, 2, 3); + local g: table[count] of string = { [1] = "test", [2] = "bro" }; + local h = "this\0test"; + + #print fmt(c, b, a); # this should work, according to doc comments + + # tests with only a format string (no additional args) + print fmt("test"); + print fmt("%%"); + + # no arguments + print fmt(); + + # tests of various data types with field width specified + print fmt("*%-10s*", "test"); + print fmt("*%10s*", "test"); + print fmt("*%10s*", T); + print fmt("*%-10s*", T); + print fmt("*%10.2e*", 3.14159265); + print fmt("*%-10.2e*", 3.14159265); + print fmt("*%10.2f*", 3.14159265); + print fmt("*%10.2g*", 3.14159265); + print fmt("*%10.2e*", -3.14159265); + print fmt("*%10.2f*", -3.14159265); + print fmt("*%10.2g*", -3.14159265); + print fmt("*%-10.2e*", -3.14159265); + print fmt("*%-10.2f*", -3.14159265); + print fmt("*%-10.2g*", -3.14159265); + print fmt("*%10d*", -128); + print fmt("*%-10d*", -128); + print fmt("*%10d*", 128); + print fmt("*%010d*", 128); + print fmt("*%-10d*", 128); + print fmt("*%10x*", 160); + print fmt("*%010x*", 160); + print fmt("*%10x*", 160/tcp); + print fmt("*%10s*", 160/tcp); + print fmt("*%10s*", 127.0.0.1); + print fmt("*%10x*", 127.0.0.1); + print fmt("*%10s*", 192.168.0.0/16); + print fmt("*%10s*", [::1]); + print fmt("*%10x*", [fe00::1]); + print fmt("*%10s*", [fe80:1234::1]); + print fmt("*%10s*", [fe80:1234::]/32); + print fmt("*%10s*", 3hr); + print fmt("*%10s*", /^foo|bar/); + print fmt("*%10s*", d); + print fmt("*%10s*", e); + print fmt("*%10s*", f); + print fmt("*%10s*", g); + + # tests of various data types without field width + print fmt("%e", 3.1e+2); + print fmt("%f", 3.1e+2); + print fmt("%g", 3.1e+2); + print fmt("%.3e", 3.1e+2); + print fmt("%.3f", 3.1e+2); + print fmt("%.3g", 3.1e+2); + print fmt("%.7g", 3.1e+2); + + # these produce same result + print fmt("%As", h); + print fmt("%s", h); + + } diff --git a/testing/btest/bifs/lookup_ID.bro b/testing/btest/bifs/lookup_ID.bro new file mode 100644 index 0000000000..b8a29ef41f --- /dev/null +++ b/testing/btest/bifs/lookup_ID.bro @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +global a = "bro test"; + +event bro_init() + { + local b = "local value"; + + print lookup_ID("a"); + print lookup_ID(""); + print lookup_ID("xyz"); + print lookup_ID("b"); + print type_name( lookup_ID("bro_init") ); + } diff --git a/testing/btest/bifs/math.bro b/testing/btest/bifs/math.bro new file mode 100644 index 0000000000..90aed5b4e6 --- /dev/null +++ b/testing/btest/bifs/math.bro @@ -0,0 +1,24 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 3.14; + local b = 2.71; + local c = -3.14; + local d = -2.71; + + print floor(a); + print floor(b); + print floor(c); + print floor(d); + + print sqrt(a); + + print exp(a); + + print ln(a); + + print log10(a); + } diff --git a/testing/btest/bifs/record_type_to_vector.bro b/testing/btest/bifs/record_type_to_vector.bro new file mode 100644 index 0000000000..18ddf35022 --- /dev/null +++ b/testing/btest/bifs/record_type_to_vector.bro @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +type myrecord: record { + ct: count; + str1: string; +}; + +event bro_init() + { + print record_type_to_vector("myrecord"); + } diff --git a/testing/btest/bifs/to_count.bro b/testing/btest/bifs/to_count.bro new file mode 100644 index 0000000000..c1fe72ce52 --- /dev/null +++ b/testing/btest/bifs/to_count.bro @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a: int = -2; + print int_to_count(a); + + local b: int = 2; + print int_to_count(b); + + local c: double = 3.14; + print double_to_count(c); + + local d: double = 3.9; + print double_to_count(d); + + print to_count("7"); + print to_count(""); + print to_count("-5"); + print to_count("not a count"); + + local e: port = 123/tcp; + print port_to_count(e); + + } diff --git a/testing/btest/bifs/to_double.bro b/testing/btest/bifs/to_double.bro new file mode 100644 index 0000000000..f13d34f69a --- /dev/null +++ b/testing/btest/bifs/to_double.bro @@ -0,0 +1,20 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 1 usec; + print interval_to_double(a); + local b = 1sec; + print interval_to_double(b); + local c = -1min; + print interval_to_double(c); + local d = 1hrs; + print interval_to_double(d); + local e = 1 day; + print interval_to_double(e); + + local f = current_time(); + print time_to_double(f); + } diff --git a/testing/btest/bifs/to_int.bro b/testing/btest/bifs/to_int.bro new file mode 100644 index 0000000000..9d108a9da7 --- /dev/null +++ b/testing/btest/bifs/to_int.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print to_int("1"); + print to_int("-1"); + print to_int("not an int"); + } diff --git a/testing/btest/bifs/to_interval.bro b/testing/btest/bifs/to_interval.bro new file mode 100644 index 0000000000..8fded315d2 --- /dev/null +++ b/testing/btest/bifs/to_interval.bro @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 1234563.14; + print double_to_interval(a); + local b = -1234563.14; + print double_to_interval(b); + } diff --git a/testing/btest/bifs/to_port.bro b/testing/btest/bifs/to_port.bro new file mode 100644 index 0000000000..39a0cbed6b --- /dev/null +++ b/testing/btest/bifs/to_port.bro @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print to_port("123/tcp"); + print to_port("123/udp"); + print to_port("123/icmp"); + + local a: transport_proto = tcp; + local b: transport_proto = udp; + local c: transport_proto = icmp; + print count_to_port(256, a); + print count_to_port(256, b); + print count_to_port(256, c); + } diff --git a/testing/btest/bifs/to_time.bro b/testing/btest/bifs/to_time.bro new file mode 100644 index 0000000000..97b109e647 --- /dev/null +++ b/testing/btest/bifs/to_time.bro @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 1234563.14; + print double_to_time(a); + local b = -1234563.14; + print double_to_time(b); + } diff --git a/testing/btest/bifs/type_name.bro b/testing/btest/bifs/type_name.bro new file mode 100644 index 0000000000..a8c51ef69d --- /dev/null +++ b/testing/btest/bifs/type_name.bro @@ -0,0 +1,56 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +type color: enum { Red, Blue }; + +type myrecord: record { + c: count; + s: string; +}; + +event bro_init() + { + local a = "foo"; + local b = 3; + local c = 3.14; + local d = T; + local e = current_time(); + local f = 5hr; + local g = /^foo|bar/; + local h = Blue; + local i = 123/tcp; + local j = 192.168.0.2; + local k = [fe80::1]; + local l = 192.168.0.0/16; + local m = [fe80:1234::]/32; + local n = vector( 1, 2, 3); + local o = vector( "bro", "test"); + local p = set( 1, 2, 3); + local q = set( "this", "test"); + local r: table[count] of string = { [1] = "test", [2] = "bro" }; + local s: table[string] of count = { ["a"] = 5, ["b"] = 3 }; + local t: myrecord = [ $c = 2, $s = "another test" ]; + + print type_name(a); + print type_name(b); + print type_name(c); + print type_name(d); + print type_name(e); + print type_name(f); + print type_name(g); + print type_name(h); + print type_name(i); + print type_name(j); + print type_name(k); + print type_name(l); + print type_name(m); + print type_name(n); + print type_name(o); + print type_name(p); + print type_name(q); + print type_name(r); + print type_name(s); + print type_name(t); + + } From 658b188dff8cf4112f0752f1f63422bd13bde51d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 May 2012 14:26:11 -0700 Subject: [PATCH 321/651] filters have been called streams for eternity. And I always was too lazy to change it everywhere... Fix that. --- scripts/base/frameworks/input/main.bro | 4 +- src/input/Manager.cc | 342 ++++++++++++------------- src/input/Manager.h | 30 ++- src/input/ReaderBackend.h | 42 +-- src/input/readers/Ascii.cc | 2 +- src/input/readers/Ascii.h | 2 +- src/input/readers/Raw.h | 3 - 7 files changed, 217 insertions(+), 208 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index a52cd97b4b..c9ce0e321e 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -24,7 +24,7 @@ export { ## Read mode to use for this stream mode: Mode &default=default_mode; - ## Descriptive name. Used to remove a filter at a later time + ## Descriptive name. Used to remove a stream at a later time name: string; ## Special definitions for tables @@ -65,7 +65,7 @@ export { ## Read mode to use for this stream mode: Mode &default=default_mode; - ## Descriptive name. Used to remove a filter at a later time + ## Descriptive name. Used to remove a stream at a later time name: string; ## Special definitions for events diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 3f7fcea078..3bae7dbb28 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -62,7 +62,7 @@ public: int mode; - StreamType filter_type; // to distinguish between event and table filters + StreamType stream_type; // to distinguish between event and table streams EnumVal* type; ReaderFrontend* reader; @@ -129,7 +129,7 @@ public: Manager::TableStream::TableStream() : Manager::Stream::Stream() { - filter_type = TABLE_FILTER; + stream_type = TABLE_FILTER; tab = 0; itype = 0; @@ -144,7 +144,7 @@ Manager::TableStream::TableStream() : Manager::Stream::Stream() Manager::EventStream::EventStream() : Manager::Stream::Stream() { fields = 0; - filter_type = EVENT_FILTER; + stream_type = EVENT_FILTER; } Manager::EventStream::~EventStream() @@ -322,16 +322,16 @@ bool Manager::CreateEventStream(RecordVal* fval) RecordType* rtype = fval->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::EventDescription, 0) ) { - reporter->Error("filter argument not of right type"); + reporter->Error("EventDescription argument not of right type"); return false; } - EventStream* filter = new EventStream(); + EventStream* stream = new EventStream(); { - bool res = CreateStream(filter, fval); + bool res = CreateStream(stream, fval); if ( res == false ) { - delete filter; + delete stream; return false; } } @@ -428,19 +428,19 @@ bool Manager::CreateEventStream(RecordVal* fval) logf[i] = fieldsV[i]; Unref(fields); // ref'd by lookupwithdefault - filter->num_fields = fieldsV.size(); - filter->fields = fields->Ref()->AsRecordType(); - filter->event = event_registry->Lookup(event->GetID()->Name()); - filter->want_record = ( want_record->InternalInt() == 1 ); + stream->num_fields = fieldsV.size(); + stream->fields = fields->Ref()->AsRecordType(); + stream->event = event_registry->Lookup(event->GetID()->Name()); + stream->want_record = ( want_record->InternalInt() == 1 ); Unref(want_record); // ref'd by lookupwithdefault - assert(filter->reader); - filter->reader->Init(filter->source, filter->mode, filter->num_fields, logf ); + assert(stream->reader); + stream->reader->Init(stream->source, stream->mode, stream->num_fields, logf ); - readers[filter->reader] = filter; + readers[stream->reader] = stream; DBG_LOG(DBG_INPUT, "Successfully created event stream %s", - filter->name.c_str()); + stream->name.c_str()); return true; } @@ -450,16 +450,16 @@ bool Manager::CreateTableStream(RecordVal* fval) RecordType* rtype = fval->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::TableDescription, 0) ) { - reporter->Error("filter argument not of right type"); + reporter->Error("TableDescription argument not of right type"); return false; } - TableStream* filter = new TableStream(); + TableStream* stream = new TableStream(); { - bool res = CreateStream(filter, fval); + bool res = CreateStream(stream, fval); if ( res == false ) { - delete filter; + delete stream; return false; } } @@ -587,40 +587,40 @@ bool Manager::CreateTableStream(RecordVal* fval) for ( unsigned int i = 0; i < fieldsV.size(); i++ ) fields[i] = fieldsV[i]; - filter->pred = pred ? pred->AsFunc() : 0; - filter->num_idx_fields = idxfields; - filter->num_val_fields = valfields; - filter->tab = dst->AsTableVal(); - filter->rtype = val ? val->AsRecordType() : 0; - filter->itype = idx->AsRecordType(); - filter->event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; - filter->currDict = new PDict(InputHash); - filter->currDict->SetDeleteFunc(input_hash_delete_func); - filter->lastDict = new PDict(InputHash); - filter->lastDict->SetDeleteFunc(input_hash_delete_func); - filter->want_record = ( want_record->InternalInt() == 1 ); + stream->pred = pred ? pred->AsFunc() : 0; + stream->num_idx_fields = idxfields; + stream->num_val_fields = valfields; + stream->tab = dst->AsTableVal(); + stream->rtype = val ? val->AsRecordType() : 0; + stream->itype = idx->AsRecordType(); + stream->event = event ? event_registry->Lookup(event->GetID()->Name()) : 0; + stream->currDict = new PDict(InputHash); + stream->currDict->SetDeleteFunc(input_hash_delete_func); + stream->lastDict = new PDict(InputHash); + stream->lastDict->SetDeleteFunc(input_hash_delete_func); + stream->want_record = ( want_record->InternalInt() == 1 ); Unref(want_record); // ref'd by lookupwithdefault Unref(pred); if ( valfields > 1 ) { - if ( ! filter->want_record ) + if ( ! stream->want_record ) { - reporter->Error("Stream %s does not want a record (want_record=F), but has more then one value field. Aborting", filter->name.c_str()); - delete filter; + reporter->Error("Stream %s does not want a record (want_record=F), but has more then one value field. Aborting", stream->name.c_str()); + delete stream; return false; } } - assert(filter->reader); - filter->reader->Init(filter->source, filter->mode, fieldsV.size(), fields ); + assert(stream->reader); + stream->reader->Init(stream->source, stream->mode, fieldsV.size(), fields ); - readers[filter->reader] = filter; + readers[stream->reader] = stream; DBG_LOG(DBG_INPUT, "Successfully created table stream %s", - filter->name.c_str()); + stream->name.c_str()); return true; } @@ -872,9 +872,9 @@ void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) } int readFields; - if ( i->filter_type == TABLE_FILTER ) + if ( i->stream_type == TABLE_FILTER ) readFields = SendEntryTable(i, vals); - else if ( i->filter_type == EVENT_FILTER ) + else if ( i->stream_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); readFields = SendEventStreamEvent(i, type, vals); @@ -894,21 +894,21 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) assert(i); - assert(i->filter_type == TABLE_FILTER); - TableStream* filter = (TableStream*) i; + assert(i->stream_type == TABLE_FILTER); + TableStream* stream = (TableStream*) i; - HashKey* idxhash = HashValues(filter->num_idx_fields, vals); + HashKey* idxhash = HashValues(stream->num_idx_fields, vals); if ( idxhash == 0 ) { reporter->Error("Could not hash line. Ignoring"); - return filter->num_val_fields + filter->num_idx_fields; + return stream->num_val_fields + stream->num_idx_fields; } hash_t valhash = 0; - if ( filter->num_val_fields > 0 ) + if ( stream->num_val_fields > 0 ) { - HashKey* valhashkey = HashValues(filter->num_val_fields, vals+filter->num_idx_fields); + HashKey* valhashkey = HashValues(stream->num_val_fields, vals+stream->num_idx_fields); if ( valhashkey == 0 ) { // empty line. index, but no values. // hence we also have no hash value... @@ -920,23 +920,23 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) } } - InputHash *h = filter->lastDict->Lookup(idxhash); + InputHash *h = stream->lastDict->Lookup(idxhash); if ( h != 0 ) { // seen before - if ( filter->num_val_fields == 0 || h->valhash == valhash ) + if ( stream->num_val_fields == 0 || h->valhash == valhash ) { // ok, exact duplicate, move entry to new dicrionary and do nothing else. - filter->lastDict->Remove(idxhash); - filter->currDict->Insert(idxhash, h); + stream->lastDict->Remove(idxhash); + stream->currDict->Insert(idxhash, h); delete idxhash; - return filter->num_val_fields + filter->num_idx_fields; + return stream->num_val_fields + stream->num_idx_fields; } else { - assert( filter->num_val_fields > 0 ); + assert( stream->num_val_fields > 0 ); // entry was updated in some way - filter->lastDict->Remove(idxhash); + stream->lastDict->Remove(idxhash); // keep h for predicates updated = true; @@ -947,24 +947,24 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) Val* valval; RecordVal* predidx = 0; - int position = filter->num_idx_fields; - if ( filter->num_val_fields == 0 ) + int position = stream->num_idx_fields; + if ( stream->num_val_fields == 0 ) valval = 0; - else if ( filter->num_val_fields == 1 && !filter->want_record ) - valval = ValueToVal(vals[position], filter->rtype->FieldType(0)); + else if ( stream->num_val_fields == 1 && !stream->want_record ) + valval = ValueToVal(vals[position], stream->rtype->FieldType(0)); else - valval = ValueToRecordVal(vals, filter->rtype, &position); + valval = ValueToRecordVal(vals, stream->rtype, &position); - // call filter first to determine if we really add / change the entry - if ( filter->pred ) + // call stream first to determine if we really add / change the entry + if ( stream->pred ) { EnumVal* ev; //Ref(idxval); int startpos = 0; - //Val* predidx = ListValToRecordVal(idxval->AsListVal(), filter->itype, &startpos); - predidx = ValueToRecordVal(vals, filter->itype, &startpos); - //ValueToRecordVal(vals, filter->itype, &startpos); + //Val* predidx = ListValToRecordVal(idxval->AsListVal(), stream->itype, &startpos); + predidx = ValueToRecordVal(vals, stream->itype, &startpos); + //ValueToRecordVal(vals, stream->itype, &startpos); if ( updated ) ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); @@ -972,10 +972,10 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); bool result; - if ( filter->num_val_fields > 0 ) // we have values - result = CallPred(filter->pred, 3, ev, predidx->Ref(), valval->Ref()); + if ( stream->num_val_fields > 0 ) // we have values + result = CallPred(stream->pred, 3, ev, predidx->Ref(), valval->Ref()); else // no values - result = CallPred(filter->pred, 2, ev, predidx->Ref()); + result = CallPred(stream->pred, 2, ev, predidx->Ref()); if ( result == false ) { @@ -985,17 +985,17 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) { // throw away. Hence - we quit. And remove the entry from the current dictionary... // (but why should it be in there? assert this). - assert ( filter->currDict->RemoveEntry(idxhash) == 0 ); + assert ( stream->currDict->RemoveEntry(idxhash) == 0 ); delete idxhash; delete h; - return filter->num_val_fields + filter->num_idx_fields; + return stream->num_val_fields + stream->num_idx_fields; } else { // keep old one - filter->currDict->Insert(idxhash, h); + stream->currDict->Insert(idxhash, h); delete idxhash; - return filter->num_val_fields + filter->num_idx_fields; + return stream->num_val_fields + stream->num_idx_fields; } } @@ -1016,19 +1016,19 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) // I think there is an unref missing here. But if I insert is, it crashes :) } else - idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); + idxval = ValueToIndexVal(stream->num_idx_fields, stream->itype, vals); Val* oldval = 0; if ( updated == true ) { - assert(filter->num_val_fields > 0); + assert(stream->num_val_fields > 0); // in that case, we need the old value to send the event (if we send an event). - oldval = filter->tab->Lookup(idxval, false); + oldval = stream->tab->Lookup(idxval, false); } //i->tab->Assign(idxval, valval); assert(idxval); - HashKey* k = filter->tab->ComputeHash(idxval); + HashKey* k = stream->tab->ComputeHash(idxval); if ( !k ) { reporter->InternalError("could not hash"); @@ -1039,46 +1039,46 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) ih->idxkey = new HashKey(k->Key(), k->Size(), k->Hash()); ih->valhash = valhash; - if ( filter->event && updated ) + if ( stream->event && updated ) Ref(oldval); // otherwise it is no longer accessible after the assignment - filter->tab->Assign(idxval, k, valval); + stream->tab->Assign(idxval, k, valval); Unref(idxval); // asssign does not consume idxval. if ( predidx != 0 ) Unref(predidx); - filter->currDict->Insert(idxhash, ih); + stream->currDict->Insert(idxhash, ih); delete idxhash; - if ( filter->event ) + if ( stream->event ) { EnumVal* ev; int startpos = 0; - Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + Val* predidx = ValueToRecordVal(vals, stream->itype, &startpos); if ( updated ) { // in case of update send back the old value. - assert ( filter->num_val_fields > 0 ); + assert ( stream->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); - SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, oldval); + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - if ( filter->num_val_fields == 0 ) + if ( stream->num_val_fields == 0 ) { - Ref(filter->description); - SendEvent(filter->event, 3, filter->description->Ref(), ev, predidx); + Ref(stream->description); + SendEvent(stream->event, 3, stream->description->Ref(), ev, predidx); } else - SendEvent(filter->event, 4, filter->description->Ref(), ev, predidx, valval->Ref()); + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, valval->Ref()); } } - return filter->num_val_fields + filter->num_idx_fields; + return stream->num_val_fields + stream->num_idx_fields; } @@ -1097,19 +1097,19 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) i->name.c_str()); #endif - if ( i->filter_type == EVENT_FILTER ) // nothing to do.. + if ( i->stream_type == EVENT_FILTER ) // nothing to do.. return; - assert(i->filter_type == TABLE_FILTER); - TableStream* filter = (TableStream*) i; + assert(i->stream_type == TABLE_FILTER); + TableStream* stream = (TableStream*) i; // lastdict contains all deleted entries and should be empty apart from that - IterCookie *c = filter->lastDict->InitForIteration(); - filter->lastDict->MakeRobustCookie(c); + IterCookie *c = stream->lastDict->InitForIteration(); + stream->lastDict->MakeRobustCookie(c); InputHash* ih; HashKey *lastDictIdxKey; //while ( ( ih = i->lastDict->NextEntry(c) ) ) { - while ( ( ih = filter->lastDict->NextEntry(lastDictIdxKey, c) ) ) + while ( ( ih = stream->lastDict->NextEntry(lastDictIdxKey, c) ) ) { ListVal * idx = 0; Val *val = 0; @@ -1118,17 +1118,17 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) EnumVal* ev = 0; int startpos = 0; - if ( filter->pred || filter->event ) + if ( stream->pred || stream->event ) { - idx = filter->tab->RecoverIndex(ih->idxkey); + idx = stream->tab->RecoverIndex(ih->idxkey); assert(idx != 0); - val = filter->tab->Lookup(idx); + val = stream->tab->Lookup(idx); assert(val != 0); - predidx = ListValToRecordVal(idx, filter->itype, &startpos); + predidx = ListValToRecordVal(idx, stream->itype, &startpos); ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); } - if ( filter->pred ) + if ( stream->pred ) { // ask predicate, if we want to expire this element... @@ -1136,7 +1136,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) Ref(predidx); Ref(val); - bool result = CallPred(filter->pred, 3, ev, predidx, val); + bool result = CallPred(stream->pred, 3, ev, predidx, val); if ( result == false ) { @@ -1144,37 +1144,37 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) // ah well - and we have to add the entry to currDict... Unref(predidx); Unref(ev); - filter->currDict->Insert(lastDictIdxKey, filter->lastDict->RemoveEntry(lastDictIdxKey)); + stream->currDict->Insert(lastDictIdxKey, stream->lastDict->RemoveEntry(lastDictIdxKey)); delete lastDictIdxKey; continue; } } - if ( filter->event ) + if ( stream->event ) { Ref(predidx); Ref(val); Ref(ev); - SendEvent(filter->event, 3, ev, predidx, val); + SendEvent(stream->event, 3, ev, predidx, val); } - if ( predidx ) // if we have a filter or an event... + if ( predidx ) // if we have a stream or an event... Unref(predidx); if ( ev ) Unref(ev); - Unref(filter->tab->Delete(ih->idxkey)); - filter->lastDict->Remove(lastDictIdxKey); // delete in next line + Unref(stream->tab->Delete(ih->idxkey)); + stream->lastDict->Remove(lastDictIdxKey); // delete in next line delete lastDictIdxKey; delete(ih); } - filter->lastDict->Clear(); // should be empt. buti- well... who knows... - delete(filter->lastDict); + stream->lastDict->Clear(); // should be empt. buti- well... who knows... + delete(stream->lastDict); - filter->lastDict = filter->currDict; - filter->currDict = new PDict(InputHash); - filter->currDict->SetDeleteFunc(input_hash_delete_func); + stream->lastDict = stream->currDict; + stream->currDict = new PDict(InputHash); + stream->currDict->SetDeleteFunc(input_hash_delete_func); #ifdef DEBUG DBG_LOG(DBG_INPUT, "EndCurrentSend complete for stream %s, queueing update_finished event", @@ -1199,9 +1199,9 @@ void Manager::Put(ReaderFrontend* reader, Value* *vals) } int readFields; - if ( i->filter_type == TABLE_FILTER ) + if ( i->stream_type == TABLE_FILTER ) readFields = PutTable(i, vals); - else if ( i->filter_type == EVENT_FILTER ) + else if ( i->stream_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); readFields = SendEventStreamEvent(i, type, vals); @@ -1219,44 +1219,44 @@ int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const * { assert(i); - assert(i->filter_type == EVENT_FILTER); - EventStream* filter = (EventStream*) i; + assert(i->stream_type == EVENT_FILTER); + EventStream* stream = (EventStream*) i; Val *val; list out_vals; - Ref(filter->description); - out_vals.push_back(filter->description); + Ref(stream->description); + out_vals.push_back(stream->description); // no tracking, send everything with a new event... //out_vals.push_back(new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event)); out_vals.push_back(type); int position = 0; - if ( filter->want_record ) + if ( stream->want_record ) { - RecordVal * r = ValueToRecordVal(vals, filter->fields, &position); + RecordVal * r = ValueToRecordVal(vals, stream->fields, &position); out_vals.push_back(r); } else { - for ( int j = 0; j < filter->fields->NumFields(); j++) + for ( int j = 0; j < stream->fields->NumFields(); j++) { Val* val = 0; - if ( filter->fields->FieldType(j)->Tag() == TYPE_RECORD ) + if ( stream->fields->FieldType(j)->Tag() == TYPE_RECORD ) val = ValueToRecordVal(vals, - filter->fields->FieldType(j)->AsRecordType(), + stream->fields->FieldType(j)->AsRecordType(), &position); else { - val = ValueToVal(vals[position], filter->fields->FieldType(j)); + val = ValueToVal(vals[position], stream->fields->FieldType(j)); position++; } out_vals.push_back(val); } } - SendEvent(filter->event, out_vals); + SendEvent(stream->event, out_vals); - return filter->fields->NumFields(); + return stream->fields->NumFields(); } @@ -1264,31 +1264,31 @@ int Manager::PutTable(Stream* i, const Value* const *vals) { assert(i); - assert(i->filter_type == TABLE_FILTER); - TableStream* filter = (TableStream*) i; + assert(i->stream_type == TABLE_FILTER); + TableStream* stream = (TableStream*) i; - Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); + Val* idxval = ValueToIndexVal(stream->num_idx_fields, stream->itype, vals); Val* valval; - int position = filter->num_idx_fields; - if ( filter->num_val_fields == 0 ) + int position = stream->num_idx_fields; + if ( stream->num_val_fields == 0 ) valval = 0; - else if ( filter->num_val_fields == 1 && filter->want_record == 0 ) - valval = ValueToVal(vals[position], filter->rtype->FieldType(0)); + else if ( stream->num_val_fields == 1 && stream->want_record == 0 ) + valval = ValueToVal(vals[position], stream->rtype->FieldType(0)); else - valval = ValueToRecordVal(vals, filter->rtype, &position); + valval = ValueToRecordVal(vals, stream->rtype, &position); // if we have a subscribed event, we need to figure out, if this is an update or not // same for predicates - if ( filter->pred || filter->event ) + if ( stream->pred || stream->event ) { bool updated = false; Val* oldval = 0; - if ( filter->num_val_fields > 0 ) + if ( stream->num_val_fields > 0 ) { // in that case, we need the old value to send the event (if we send an event). - oldval = filter->tab->Lookup(idxval, false); + oldval = stream->tab->Lookup(idxval, false); } if ( oldval != 0 ) @@ -1300,11 +1300,11 @@ int Manager::PutTable(Stream* i, const Value* const *vals) // predicate if we want the update or not - if ( filter->pred ) + if ( stream->pred ) { EnumVal* ev; int startpos = 0; - Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + Val* predidx = ValueToRecordVal(vals, stream->itype, &startpos); Ref(valval); if ( updated ) @@ -1315,10 +1315,10 @@ int Manager::PutTable(Stream* i, const Value* const *vals) BifType::Enum::Input::Event); bool result; - if ( filter->num_val_fields > 0 ) // we have values - result = CallPred(filter->pred, 3, ev, predidx, valval); + if ( stream->num_val_fields > 0 ) // we have values + result = CallPred(stream->pred, 3, ev, predidx, valval); else // no values - result = CallPred(filter->pred, 2, ev, predidx); + result = CallPred(stream->pred, 2, ev, predidx); if ( result == false ) { @@ -1326,38 +1326,38 @@ int Manager::PutTable(Stream* i, const Value* const *vals) Unref(idxval); Unref(valval); Unref(oldval); - return filter->num_val_fields + filter->num_idx_fields; + return stream->num_val_fields + stream->num_idx_fields; } } - filter->tab->Assign(idxval, valval); + stream->tab->Assign(idxval, valval); - if ( filter->event ) + if ( stream->event ) { EnumVal* ev; int startpos = 0; - Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + Val* predidx = ValueToRecordVal(vals, stream->itype, &startpos); if ( updated ) { // in case of update send back the old value. - assert ( filter->num_val_fields > 0 ); + assert ( stream->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); - SendEvent(filter->event, 4, filter->description->Ref(), + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, oldval); } else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - if ( filter->num_val_fields == 0 ) - SendEvent(filter->event, 4, filter->description->Ref(), + if ( stream->num_val_fields == 0 ) + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx); else - SendEvent(filter->event, 4, filter->description->Ref(), + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, valval->Ref()); } @@ -1365,10 +1365,10 @@ int Manager::PutTable(Stream* i, const Value* const *vals) } else // no predicates or other stuff - filter->tab->Assign(idxval, valval); + stream->tab->Assign(idxval, valval); - return filter->num_idx_fields + filter->num_val_fields; + return stream->num_idx_fields + stream->num_val_fields; } // Todo:: perhaps throw some kind of clear-event? @@ -1386,10 +1386,10 @@ void Manager::Clear(ReaderFrontend* reader) i->name.c_str()); #endif - assert(i->filter_type == TABLE_FILTER); - TableStream* filter = (TableStream*) i; + assert(i->stream_type == TABLE_FILTER); + TableStream* stream = (TableStream*) i; - filter->tab->RemoveAll(); + stream->tab->RemoveAll(); } // put interface: delete old entry from table. @@ -1405,28 +1405,28 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) bool success = false; int readVals = 0; - if ( i->filter_type == TABLE_FILTER ) + if ( i->stream_type == TABLE_FILTER ) { - TableStream* filter = (TableStream*) i; - Val* idxval = ValueToIndexVal(filter->num_idx_fields, filter->itype, vals); + TableStream* stream = (TableStream*) i; + Val* idxval = ValueToIndexVal(stream->num_idx_fields, stream->itype, vals); assert(idxval != 0); - readVals = filter->num_idx_fields + filter->num_val_fields; - bool filterresult = true; + readVals = stream->num_idx_fields + stream->num_val_fields; + bool streamresult = true; - if ( filter->pred || filter->event ) + if ( stream->pred || stream->event ) { - Val *val = filter->tab->Lookup(idxval); + Val *val = stream->tab->Lookup(idxval); - if ( filter->pred ) + if ( stream->pred ) { Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); int startpos = 0; - Val* predidx = ValueToRecordVal(vals, filter->itype, &startpos); + Val* predidx = ValueToRecordVal(vals, stream->itype, &startpos); - filterresult = CallPred(filter->pred, 3, ev, predidx, val); + streamresult = CallPred(stream->pred, 3, ev, predidx, val); - if ( filterresult == false ) + if ( streamresult == false ) { // keep it. Unref(idxval); @@ -1435,21 +1435,21 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) } - // only if filter = true -> no filtering - if ( filterresult && filter->event ) + // only if stream = true -> no streaming + if ( streamresult && stream->event ) { Ref(idxval); assert(val != 0); Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - SendEvent(filter->event, 4, filter->description->Ref(), ev, idxval, val); + SendEvent(stream->event, 4, stream->description->Ref(), ev, idxval, val); } } - // only if filter = true -> no filtering - if ( filterresult ) + // only if stream = true -> no streaming + if ( streamresult ) { - Val* retptr = filter->tab->Delete(idxval); + Val* retptr = stream->tab->Delete(idxval); success = ( retptr != 0 ); if ( !success ) reporter->Error("Internal error while deleting values from input table"); @@ -1458,7 +1458,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) } } - else if ( i->filter_type == EVENT_FILTER ) + else if ( i->stream_type == EVENT_FILTER ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); readVals = SendEventStreamEvent(i, type, vals); diff --git a/src/input/Manager.h b/src/input/Manager.h index 0bdb2eb58d..d15febe0d6 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. // -// Class for managing input streams and filters +// Class for managing input streams #ifndef INPUT_MANAGER_H #define INPUT_MANAGER_H @@ -34,12 +34,7 @@ public: ~Manager(); /** - * Creates a new input stream. - * Add a filter to an input source, which will write the data from the data source into - * a Bro table. - * Add a filter to an input source, which sends events for read input data. - * - * @param id The enum value corresponding the input stream. + * Creates a new input stream which will write the data from the data source into * * @param description A record of script type \c Input:StreamDescription. * @@ -47,6 +42,15 @@ public: * input.bif, which just forwards here. */ bool CreateTableStream(RecordVal* description); + + /** + * Creates a new input stream which sends events for read input data. + * + * @param description A record of script type \c Input:StreamDescription. + * + * This method corresponds directly to the internal BiF defined in + * input.bif, which just forwards here. + */ bool CreateEventStream(RecordVal* description); @@ -104,11 +108,11 @@ protected: // doing so creates a new thread!). ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); - // Functions are called from the ReaderBackend to notify the manager, that a filter has been removed + // Functions are called from the ReaderBackend to notify the manager, that a stream has been removed // or a stream has been closed. - // Used to prevent race conditions where data for a specific filter is still in the queue when the + // Used to prevent race conditions where data for a specific stream is still in the queue when the // RemoveStream directive is executed by the main thread. - // This makes sure all data that has ben queued for a filter is still received. + // This makes sure all data that has ben queued for a stream is still received. bool RemoveStreamContinuation(ReaderFrontend* reader); private: @@ -118,13 +122,13 @@ private: bool CreateStream(Stream*, RecordVal* description); - // SendEntry implementation for Tablefilter + // SendEntry implementation for Table stream int SendEntryTable(Stream* i, const threading::Value* const *vals); - // Put implementation for Tablefilter + // Put implementation for Table stream int PutTable(Stream* i, const threading::Value* const *vals); - // SendEntry and Put implementation for Eventfilter + // SendEntry and Put implementation for Event stream int SendEventStreamEvent(Stream* i, EnumVal* type, const threading::Value* const *vals); // Checks is a bro type can be used for data reading. The equivalend in threading cannot be used, because we have support different types diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index ca54d8a204..b4d9101bc8 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -55,7 +55,8 @@ public: * * @param arg_num_fields number of fields contained in \a fields * - * @param fields the types and names of the fields to be retrieved from the input source + * @param fields the types and names of the fields to be retrieved + * from the input source * * @return False if an error occured. */ @@ -72,7 +73,8 @@ public: /** * Force trigger an update of the input stream. - * The action that will be taken depends on the current read mode and the individual input backend + * The action that will be taken depends on the current read mode and the + * individual input backend * * An backend can choose to ignore this. * @@ -90,8 +92,8 @@ protected: // Methods that have to be overwritten by the individual readers /** - * Reader-specific intialization method. Note that data may only be read from the input source - * after the Start function has been called. + * Reader-specific intialization method. Note that data may only be + * read from the input source after the Start function has been called. * * A reader implementation must override this method. If it returns * false, it will be assumed that a fatal error has occured that @@ -145,29 +147,32 @@ protected: */ void SendEvent(const string& name, const int num_vals, threading::Value* *vals); - // Content-sending-functions (simple mode). Including table-specific stuff that simply is not used if we have no table + // Content-sending-functions (simple mode). Including table-specific stuff that + // simply is not used if we have no table /** - * Method allowing a reader to send a list of values read for a specific filter back to the manager. + * Method allowing a reader to send a list of values read for a specific stream + * back to the manager. * - * If the filter points to a table, the values are inserted into the table; if it points to an event, the event is raised + * If the stream is a table stream, the values are inserted into the table; + * if it is an event stream, the event is raised. * - * @param val list of threading::Values expected by the filter + * @param val list of threading::Values expected by the stream */ void Put(threading::Value* *val); /** * Method allowing a reader to delete a specific value from a bro table. * - * If the receiving filter is an event, only a removed event is raised + * If the receiving stream is an event stream, only a removed event is raised * - * @param val list of threading::Values expected by the filter + * @param val list of threading::Values expected by the stream */ void Delete(threading::Value* *val); /** * Method allowing a reader to clear a value from a bro table. * - * If the receiving filter is an event, this is ignored. + * If the receiving stream is an event stream, this is ignored. * */ void Clear(); @@ -176,19 +181,22 @@ protected: /** - * Method allowing a reader to send a list of values read for a specific filter back to the manager. + * Method allowing a reader to send a list of values read for a specific stream + * back to the manager. * - * If the filter points to a table, the values are inserted into the table; if it points to an event, the event is raised. + * If the stream is a table stream, the values are inserted into the table; + * if it is an event stream, the event is raised. * - * @param val list of threading::Values expected by the filter + * @param val list of threading::Values expected by the stream */ void SendEntry(threading::Value* *vals); /** - * Method telling the manager, that the current list of entries sent by SendEntry is finished. + * Method telling the manager, that the current list of entries sent by SendEntry + * is finished. * - * For table filters, all entries that were not updated since the last EndCurrentSend will be deleted, because they are no longer - * present in the input source + * For table streams, all entries that were not updated since the last EndCurrentSend + * will be deleted, because they are no longer present in the input source * */ void EndCurrentSend(); diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index c798c21a5e..8223d6e201 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -398,7 +398,7 @@ bool Ascii::DoUpdate() if ( mode == STREAM ) { file->clear(); // remove end of file evil bits - if ( !ReadHeader(true) ) // in case filters changed + if ( !ReadHeader(true) ) return false; // header reading failed break; diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index 0953075bff..e5f3070724 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -74,7 +74,7 @@ private: string unset_field; - // keep a copy of the headerline to determine field locations when filters change + // keep a copy of the headerline to determine field locations when stream descriptions change string headerline; int mode; diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index 59f9202960..9f575bb89c 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -42,9 +42,6 @@ private: // Options set from the script-level. string separator; - // keep a copy of the headerline to determine field locations when filters change - string headerline; - int mode; bool execute; bool firstrun; From b37f9e38f6ac9d82bc3500cd5b7aadff7e35ea54 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 25 May 2012 15:14:25 -0700 Subject: [PATCH 322/651] Input framework merge in progress. --- scripts/base/frameworks/input/main.bro | 53 +- .../base/frameworks/input/readers/ascii.bro | 2 + src/input/Manager.cc | 1019 +++++++++-------- src/input/Manager.h | 140 +-- src/input/ReaderBackend.cc | 84 +- src/input/ReaderBackend.h | 162 ++- src/input/ReaderFrontend.cc | 32 +- src/input/ReaderFrontend.h | 44 +- src/input/readers/Ascii.cc | 211 ++-- src/input/readers/Ascii.h | 60 +- src/input/readers/Benchmark.cc | 80 +- src/input/readers/Benchmark.h | 31 +- src/input/readers/Raw.cc | 151 +-- src/input/readers/Raw.h | 39 +- .../out | 2 +- .../base/frameworks/input/executeraw.bro | 3 +- 16 files changed, 1063 insertions(+), 1050 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index c9ce0e321e..7a372dc120 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -4,7 +4,7 @@ module Input; export { - + ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; @@ -13,52 +13,56 @@ export { ## TableFilter description type used for the `table` method. type TableDescription: record { ## Common definitions for tables and events - + ## String that allows the reader to find the source. ## For `READER_ASCII`, this is the filename. source: string; - - ## Reader to use for this steam + + ## Reader to use for this stream reader: Reader &default=default_reader; ## Read mode to use for this stream mode: Mode &default=default_mode; ## Descriptive name. Used to remove a stream at a later time - name: string; + name: string; - ## Special definitions for tables + # Special definitions for tables - ## Table which will contain the data read by the input framework + ## Table which will receive the data read by the input framework destination: any; + ## Record that defines the values used as the index of the table idx: any; - ## Record that defines the values used as the values of the table + + ## Record that defines the values used as the elements of the table ## If val is undefined, destination has to be a set. val: any &optional; - ## Defines if the value of the table is a record (default), or a single value. - ## Val can only contain one element when this is set to false. + + ## Defines if the value of the table is a record (default), or a single value. Val + ## can only contain one element when this is set to false. want_record: bool &default=T; - ## The event that is raised each time a value is added to, changed in or removed from the table. - ## The event will receive an Input::Event enum as the first argument, the idx record as the second argument - ## and the value (record) as the third argument. - ev: any &optional; # event containing idx, val as values. + ## The event that is raised each time a value is added to, changed in or removed + ## from the table. The event will receive an Input::Event enum as the first + ## argument, the idx record as the second argument and the value (record) as the + ## third argument. + ev: any &optional; # event containing idx, val as values. - ## Predicate function, that can decide if an insertion, update or removal should really be executed. - ## Parameters are the same as for the event. If true is returned, the update is performed. If false - ## is returned, it is skipped + ## Predicate function that can decide if an insertion, update or removal should + ## really be executed. Parameters are the same as for the event. If true is + ## returned, the update is performed. If false is returned, it is skipped. pred: function(typ: Input::Event, left: any, right: any): bool &optional; }; ## EventFilter description type used for the `event` method. type EventDescription: record { ## Common definitions for tables and events - + ## String that allows the reader to find the source. ## For `READER_ASCII`, this is the filename. source: string; - + ## Reader to use for this steam reader: Reader &default=default_reader; @@ -66,19 +70,20 @@ export { mode: Mode &default=default_mode; ## Descriptive name. Used to remove a stream at a later time - name: string; + name: string; + + # Special definitions for events - ## Special definitions for events - ## Record describing the fields to be retrieved from the source input. fields: any; + ## If want_record if false (default), the event receives each value in fields as a seperate argument. ## If it is set to true, the event receives all fields in a signle record value. want_record: bool &default=F; ## The event that is rised each time a new line is received from the reader. ## The event will receive an Input::Event enum as the first element, and the fields as the following arguments. - ev: any; + ev: any; }; @@ -86,7 +91,7 @@ export { ## ## description: `TableDescription` record describing the source. global add_table: function(description: Input::TableDescription) : bool; - + ## Create a new event input from a given source. Returns true on success. ## ## description: `TableDescription` record describing the source. diff --git a/scripts/base/frameworks/input/readers/ascii.bro b/scripts/base/frameworks/input/readers/ascii.bro index 14c04757f7..7fca1ad795 100644 --- a/scripts/base/frameworks/input/readers/ascii.bro +++ b/scripts/base/frameworks/input/readers/ascii.bro @@ -1,4 +1,6 @@ ##! Interface for the ascii input reader. +##! +##! The defaults are set to match Bro's ASCII output. module InputAscii; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 3bae7dbb28..6cae5e2f34 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -23,27 +23,41 @@ using namespace input; using threading::Value; using threading::Field; +struct ReaderDefinition { + bro_int_t type; // The reader type. + const char *name; // Descriptive name for error messages. + bool (*init)(); // Optional one-time initializing function. + ReaderBackend* (*factory)(ReaderFrontend* frontend); // Factory function for creating instances. +}; + +ReaderDefinition input_readers[] = { + { BifEnum::Input::READER_ASCII, "Ascii", 0, reader::Ascii::Instantiate }, + { BifEnum::Input::READER_RAW, "Raw", 0, reader::Raw::Instantiate }, + { BifEnum::Input::READER_BENCHMARK, "Benchmark", 0, reader::Benchmark::Instantiate }, + + // End marker + { BifEnum::Input::READER_DEFAULT, "None", 0, (ReaderBackend* (*)(ReaderFrontend* frontend))0 } +}; + /** - * InputHashes are used as Dictionaries to store the value and index hashes for all - * lines currently stored in a table. Index hash is stored as HashKey*, because it is - * thrown into other Bro functions that need the complex structure of it. - * For everything we do (with values), we just take the hash_t value and compare it - * directly with == + * InputHashes are used as Dictionaries to store the value and index hashes + * for all lines currently stored in a table. Index hash is stored as + * HashKey*, because it is thrown into other Bro functions that need the + * complex structure of it. For everything we do (with values), we just take + * the hash_t value and compare it directly with "==" */ -struct InputHash - { +struct InputHash { hash_t valhash; - HashKey* idxkey; + HashKey* idxkey; ~InputHash(); - }; +}; -InputHash::~InputHash() +InputHash::~InputHash() { - if ( idxkey ) - delete idxkey; - } + delete idxkey; + } -static void input_hash_delete_func(void* val) +static void input_hash_delete_func(void* val) { InputHash* h = (InputHash*) val; delete h; @@ -52,14 +66,14 @@ static void input_hash_delete_func(void* val) declare(PDict, InputHash); /** - * Base stuff that every stream can do + * Base stuff that every stream can do. */ class Manager::Stream { public: string name; string source; bool removed; - + int mode; StreamType stream_type; // to distinguish between event and table streams @@ -73,23 +87,24 @@ public: virtual ~Stream(); }; -Manager::Stream::Stream() +Manager::Stream::Stream() { - type = 0; - reader = 0; - description = 0; + type = 0; + reader = 0; + description = 0; removed = false; } -Manager::Stream::~Stream() +Manager::Stream::~Stream() { - if ( type ) + if ( type ) Unref(type); - if ( description ) + + if ( description ) Unref(description); - if ( reader ) - delete(reader); + if ( reader ) + delete(reader); } class Manager::TableStream: public Manager::Stream { @@ -98,7 +113,7 @@ public: unsigned int num_idx_fields; unsigned int num_val_fields; bool want_record; - EventHandlerPtr table_event; + EventHandlerPtr table_event; TableVal* tab; RecordType* rtype; @@ -107,9 +122,9 @@ public: PDict(InputHash)* currDict; PDict(InputHash)* lastDict; - Func* pred; + Func* pred; - EventHandlerPtr event; + EventHandlerPtr event; TableStream(); ~TableStream(); @@ -122,15 +137,15 @@ public: RecordType* fields; unsigned int num_fields; - bool want_record; + bool want_record; EventStream(); ~EventStream(); }; -Manager::TableStream::TableStream() : Manager::Stream::Stream() +Manager::TableStream::TableStream() : Manager::Stream::Stream() { - stream_type = TABLE_FILTER; - + stream_type = TABLE_STREAM; + tab = 0; itype = 0; rtype = 0; @@ -141,61 +156,47 @@ Manager::TableStream::TableStream() : Manager::Stream::Stream() pred = 0; } -Manager::EventStream::EventStream() : Manager::Stream::Stream() +Manager::EventStream::EventStream() : Manager::Stream::Stream() { fields = 0; - stream_type = EVENT_FILTER; + stream_type = EVENT_STREAM; } -Manager::EventStream::~EventStream() +Manager::EventStream::~EventStream() { - if ( fields ) + if ( fields ) Unref(fields); } -Manager::TableStream::~TableStream() +Manager::TableStream::~TableStream() { if ( tab ) Unref(tab); - if ( itype ) + + if ( itype ) Unref(itype); + if ( rtype ) // can be 0 for sets Unref(rtype); - if ( currDict != 0 ) + if ( currDict != 0 ) { currDict->Clear(); delete currDict; } - if ( lastDict != 0 ) + if ( lastDict != 0 ) { lastDict->Clear();; delete lastDict; } - } - -struct ReaderDefinition { - bro_int_t type; // the type - const char *name; // descriptive name for error messages - bool (*init)(); // optional one-time inifializing function - ReaderBackend* (*factory)(ReaderFrontend* frontend); // factory function for creating instances -}; - -ReaderDefinition input_readers[] = { - { BifEnum::Input::READER_ASCII, "Ascii", 0, reader::Ascii::Instantiate }, - { BifEnum::Input::READER_RAW, "Raw", 0, reader::Raw::Instantiate }, - { BifEnum::Input::READER_BENCHMARK, "Benchmark", 0, reader::Benchmark::Instantiate }, - - // End marker - { BifEnum::Input::READER_DEFAULT, "None", 0, (ReaderBackend* (*)(ReaderFrontend* frontend))0 } -}; + } Manager::Manager() { } -Manager::~Manager() +Manager::~Manager() { for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { @@ -205,47 +206,48 @@ Manager::~Manager() } -ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) +ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) { ReaderDefinition* ir = input_readers; - while ( true ) + while ( true ) { - if ( ir->type == BifEnum::Input::READER_DEFAULT ) + if ( ir->type == BifEnum::Input::READER_DEFAULT ) { reporter->Error("The reader that was requested was not found and could not be initialized."); return 0; } - if ( ir->type != type ) + if ( ir->type != type ) { // no, didn't find the right one... ++ir; continue; } - + // call init function of writer if presnt - if ( ir->init ) + if ( ir->init ) { - if ( (*ir->init)() ) + if ( (*ir->init)() ) { //clear it to be not called again ir->init = 0; - } + } + else { // ohok. init failed, kill factory for all eternity ir->factory = 0; DBG_LOG(DBG_LOGGING, "Failed to init input class %s", ir->name); return 0; } - + } - - if ( !ir->factory ) + + if ( ! ir->factory ) // no factory? return 0; - + // all done. break. break; } @@ -259,45 +261,43 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) return backend; } -// create a new input reader object to be used at whomevers leisure lateron. -bool Manager::CreateStream(Stream* info, RecordVal* description) +// Create a new input reader object to be used at whomevers leisure lateron. +bool Manager::CreateStream(Stream* info, RecordVal* description) { ReaderDefinition* ir = input_readers; - + RecordType* rtype = description->Type()->AsRecordType(); - if ( ! ( same_type(rtype, BifType::Record::Input::TableDescription, 0) + if ( ! ( same_type(rtype, BifType::Record::Input::TableDescription, 0) || same_type(rtype, BifType::Record::Input::EventDescription, 0) ) ) { reporter->Error("Streamdescription argument not of right type for new input stream"); return false; } - + Val* name_val = description->LookupWithDefault(rtype->FieldOffset("name")); string name = name_val->AsString()->CheckString(); Unref(name_val); + Stream *i = FindStream(name); + if ( i != 0 ) { - Stream *i = FindStream(name); - if ( i != 0 ) - { - reporter->Error("Trying create already existing input stream %s", - name.c_str()); - return false; - } + reporter->Error("Trying create already existing input stream %s", + name.c_str()); + return false; } EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); - ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); - assert(reader_obj); - - // get the source... + ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); + assert(reader_obj); + + // get the source ... Val* sourceval = description->LookupWithDefault(rtype->FieldOffset("source")); assert ( sourceval != 0 ); const BroString* bsource = sourceval->AsString(); string source((const char*) bsource->Bytes(), bsource->Len()); Unref(sourceval); - + EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); info->mode = mode->InternalInt(); Unref(mode); @@ -311,25 +311,23 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", name.c_str()); - + return true; - } -bool Manager::CreateEventStream(RecordVal* fval) +bool Manager::CreateEventStream(RecordVal* fval) { - RecordType* rtype = fval->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::EventDescription, 0) ) { reporter->Error("EventDescription argument not of right type"); return false; } - + EventStream* stream = new EventStream(); { bool res = CreateStream(stream, fval); - if ( res == false ) + if ( res == false ) { delete stream; return false; @@ -337,94 +335,93 @@ bool Manager::CreateEventStream(RecordVal* fval) } - RecordType *fields = fval->LookupWithDefault(rtype->FieldOffset("fields"))->AsType()->AsTypeType()->Type()->AsRecordType(); - - Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); + RecordType *fields = fval->LookupWithDefault(rtype->FieldOffset("fields"))->AsType()->AsTypeType()->Type()->AsRecordType(); + + Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); Val* event_val = fval->LookupWithDefault(rtype->FieldOffset("ev")); Func* event = event_val->AsFunc(); Unref(event_val); + FuncType* etype = event->FType()->AsFuncType(); + + if ( ! etype->IsEvent() ) { - FuncType* etype = event->FType()->AsFuncType(); - - if ( ! etype->IsEvent() ) + reporter->Error("stream event is a function, not an event"); + return false; + } + + const type_list* args = etype->ArgTypes()->Types(); + + if ( args->length() < 2 ) + { + reporter->Error("event takes not enough arguments"); + return false; + } + + if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) + { + reporter->Error("events second attribute must be of type Input::Event"); + return false; + } + + if ( ! same_type((*args)[0], BifType::Record::Input::EventDescription, 0) ) + { + reporter->Error("events first attribute must be of type Input::EventDescription"); + return false; + } + + if ( want_record->InternalInt() == 0 ) + { + if ( args->length() != fields->NumFields() + 2 ) { - reporter->Error("stream event is a function, not an event"); + reporter->Error("event has wrong number of arguments"); return false; } - const type_list* args = etype->ArgTypes()->Types(); - - if ( args->length() < 2 ) + for ( int i = 0; i < fields->NumFields(); i++ ) { - reporter->Error("event takes not enough arguments"); - return false; - } - - if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) - { - reporter->Error("events second attribute must be of type Input::Event"); - return false; - } - - if ( ! same_type((*args)[0], BifType::Record::Input::EventDescription, 0) ) - { - reporter->Error("events first attribute must be of type Input::EventDescription"); - return false; - } - - if ( want_record->InternalInt() == 0 ) - { - if ( args->length() != fields->NumFields() + 2 ) - { - reporter->Error("event has wrong number of arguments"); - return false; - } - - for ( int i = 0; i < fields->NumFields(); i++ ) - { - if ( !same_type((*args)[i+2], fields->FieldType(i) ) ) - { - reporter->Error("Incompatible type for event"); - return false; - } - } - - } - else if ( want_record->InternalInt() == 1 ) - { - if ( args->length() != 3 ) - { - reporter->Error("event has wrong number of arguments"); - return false; - } - - if ( !same_type((*args)[2], fields ) ) + if ( !same_type((*args)[i+2], fields->FieldType(i) ) ) { reporter->Error("Incompatible type for event"); return false; } - - } - else - assert(false); + } - } + } + + else if ( want_record->InternalInt() == 1 ) + { + if ( args->length() != 3 ) + { + reporter->Error("event has wrong number of arguments"); + return false; + } + + if ( !same_type((*args)[2], fields ) ) + { + reporter->Error("Incompatible type for event"); + return false; + } + + } + + else + assert(false); vector fieldsV; // vector, because UnrollRecordType needs it bool status = !UnrollRecordType(&fieldsV, fields, ""); - if ( status ) + if ( status ) { reporter->Error("Problem unrolling"); return false; } - + Field** logf = new Field*[fieldsV.size()]; - for ( unsigned int i = 0; i < fieldsV.size(); i++ ) + for ( unsigned int i = 0; i < fieldsV.size(); i++ ) logf[i] = fieldsV[i]; Unref(fields); // ref'd by lookupwithdefault @@ -445,7 +442,7 @@ bool Manager::CreateEventStream(RecordVal* fval) return true; } -bool Manager::CreateTableStream(RecordVal* fval) +bool Manager::CreateTableStream(RecordVal* fval) { RecordType* rtype = fval->Type()->AsRecordType(); if ( ! same_type(rtype, BifType::Record::Input::TableDescription, 0) ) @@ -457,7 +454,7 @@ bool Manager::CreateTableStream(RecordVal* fval) TableStream* stream = new TableStream(); { bool res = CreateStream(stream, fval); - if ( res == false ) + if ( res == false ) { delete stream; return false; @@ -468,8 +465,8 @@ bool Manager::CreateTableStream(RecordVal* fval) RecordType *idx = fval->LookupWithDefault(rtype->FieldOffset("idx"))->AsType()->AsTypeType()->Type()->AsRecordType(); RecordType *val = 0; - - if ( fval->LookupWithDefault(rtype->FieldOffset("val")) != 0 ) + + if ( fval->LookupWithDefault(rtype->FieldOffset("val")) != 0 ) { val = fval->LookupWithDefault(rtype->FieldOffset("val"))->AsType()->AsTypeType()->Type()->AsRecordType(); Unref(val); // The lookupwithdefault in the if-clause ref'ed val. @@ -478,30 +475,28 @@ bool Manager::CreateTableStream(RecordVal* fval) TableVal *dst = fval->LookupWithDefault(rtype->FieldOffset("destination"))->AsTableVal(); // check if index fields match table description + int num = idx->NumFields(); + const type_list* tl = dst->Type()->AsTableType()->IndexTypes(); + + loop_over_list(*tl, j) { - int num = idx->NumFields(); - const type_list* tl = dst->Type()->AsTableType()->IndexTypes(); - - loop_over_list(*tl, j) + if ( j >= num ) { - if ( j >= num ) - { - reporter->Error("Table type has more indexes than index definition"); - return false; - } - - if ( !same_type(idx->FieldType(j), (*tl)[j]) ) - { - reporter->Error("Table type does not match index type"); - return false; - } - } - - if ( num != j ) - { - reporter->Error("Table has less elements than index definition"); + reporter->Error("Table type has more indexes than index definition"); return false; } + + if ( ! same_type(idx->FieldType(j), (*tl)[j]) ) + { + reporter->Error("Table type does not match index type"); + return false; + } + } + + if ( num != j ) + { + reporter->Error("Table has less elements than index definition"); + return false; } Val *want_record = fval->LookupWithDefault(rtype->FieldOffset("want_record")); @@ -509,12 +504,12 @@ bool Manager::CreateTableStream(RecordVal* fval) Val* event_val = fval->LookupWithDefault(rtype->FieldOffset("ev")); Func* event = event_val ? event_val->AsFunc() : 0; Unref(event_val); - - if ( event ) + + if ( event ) { FuncType* etype = event->FType()->AsFuncType(); - - if ( ! etype->IsEvent() ) + + if ( ! etype->IsEvent() ) { reporter->Error("stream event is a function, not an event"); return false; @@ -522,37 +517,37 @@ bool Manager::CreateTableStream(RecordVal* fval) const type_list* args = etype->ArgTypes()->Types(); - if ( args->length() != 4 ) + if ( args->length() != 4 ) { reporter->Error("Table event must take 4 arguments"); return false; } - if ( ! same_type((*args)[0], BifType::Record::Input::TableDescription, 0) ) + if ( ! same_type((*args)[0], BifType::Record::Input::TableDescription, 0) ) { reporter->Error("table events first attribute must be of type Input::TableDescription"); return false; - } + } - if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) + if ( ! same_type((*args)[1], BifType::Enum::Input::Event, 0) ) { reporter->Error("table events second attribute must be of type Input::Event"); return false; - } + } - if ( ! same_type((*args)[2], idx) ) + if ( ! same_type((*args)[2], idx) ) { reporter->Error("table events index attributes do not match"); return false; - } - - if ( want_record->InternalInt() == 1 && ! same_type((*args)[3], val) ) + } + + if ( want_record->InternalInt() == 1 && ! same_type((*args)[3], val) ) { reporter->Error("table events value attributes do not match"); return false; - } - else if ( want_record->InternalInt() == 0 - && !same_type((*args)[3], val->FieldType(0) ) ) + } + else if ( want_record->InternalInt() == 0 + && !same_type((*args)[3], val->FieldType(0) ) ) { reporter->Error("table events value attribute does not match"); return false; @@ -560,33 +555,32 @@ bool Manager::CreateTableStream(RecordVal* fval) assert(want_record->InternalInt() == 1 || want_record->InternalInt() == 0); - } + } vector fieldsV; // vector, because we don't know the length beforehands bool status = !UnrollRecordType(&fieldsV, idx, ""); int idxfields = fieldsV.size(); - + if ( val ) // if we are not a set status = status || !UnrollRecordType(&fieldsV, val, ""); int valfields = fieldsV.size() - idxfields; - if ( !val ) + if ( ! val ) assert(valfields == 0); - if ( status ) + if ( status ) { reporter->Error("Problem unrolling"); return false; } - - + Field** fields = new Field*[fieldsV.size()]; - for ( unsigned int i = 0; i < fieldsV.size(); i++ ) + for ( unsigned int i = 0; i < fieldsV.size(); i++ ) fields[i] = fieldsV[i]; - + stream->pred = pred ? pred->AsFunc() : 0; stream->num_idx_fields = idxfields; stream->num_val_fields = valfields; @@ -603,9 +597,9 @@ bool Manager::CreateTableStream(RecordVal* fval) Unref(want_record); // ref'd by lookupwithdefault Unref(pred); - if ( valfields > 1 ) + if ( valfields > 1 ) { - if ( ! stream->want_record ) + if ( ! stream->want_record ) { reporter->Error("Stream %s does not want a record (want_record=F), but has more then one value field. Aborting", stream->name.c_str()); delete stream; @@ -664,7 +658,7 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) { if ( atomic_only ) return false; - + return IsCompatibleType(t->AsVectorType()->YieldType(), true); } @@ -676,14 +670,14 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) } -bool Manager::RemoveStream(const string &name) +bool Manager::RemoveStream(const string &name) { Stream *i = FindStream(name); - if ( i == 0 ) + if ( i == 0 ) return false; // not found - if ( i->removed ) + if ( i->removed ) { reporter->Error("Stream %s is already queued for removal. Ignoring remove.", name.c_str()); return false; @@ -701,11 +695,11 @@ bool Manager::RemoveStream(const string &name) return true; } -bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) +bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) { Stream *i = FindStream(reader); - if ( i == 0 ) + if ( i == 0 ) { reporter->Error("Stream not found in RemoveStreamContinuation"); return false; @@ -718,49 +712,51 @@ bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) readers.erase(reader); delete(i); + return true; } -bool Manager::UnrollRecordType(vector *fields, - const RecordType *rec, const string& nameprepend) +bool Manager::UnrollRecordType(vector *fields, + const RecordType *rec, const string& nameprepend) { - for ( int i = 0; i < rec->NumFields(); i++ ) + for ( int i = 0; i < rec->NumFields(); i++ ) { - if ( !IsCompatibleType(rec->FieldType(i)) ) + if ( ! IsCompatibleType(rec->FieldType(i)) ) { reporter->Error("Incompatible type \"%s\" in table definition for ReaderFrontend", type_name(rec->FieldType(i)->Tag())); return false; } - if ( rec->FieldType(i)->Tag() == TYPE_RECORD ) + if ( rec->FieldType(i)->Tag() == TYPE_RECORD ) { string prep = nameprepend + rec->FieldName(i) + "."; - - if ( !UnrollRecordType(fields, rec->FieldType(i)->AsRecordType(), prep) ) + + if ( !UnrollRecordType(fields, rec->FieldType(i)->AsRecordType(), prep) ) { return false; } - } - else + } + + else { Field* field = new Field(); field->name = nameprepend + rec->FieldName(i); - field->type = rec->FieldType(i)->Tag(); - if ( field->type == TYPE_TABLE ) - { + field->type = rec->FieldType(i)->Tag(); + + if ( field->type == TYPE_TABLE ) field->subtype = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); - } - else if ( field->type == TYPE_VECTOR ) - { + + else if ( field->type == TYPE_VECTOR ) field->subtype = rec->FieldType(i)->AsVectorType()->YieldType()->Tag(); - } else if ( field->type == TYPE_PORT && - rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN) ) + + else if ( field->type == TYPE_PORT && + rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN) ) { // we have an annotation for the second column - + Val* c = rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN)->AttrExpr()->Eval(0); assert(c); @@ -769,7 +765,7 @@ bool Manager::UnrollRecordType(vector *fields, field->secondary_name = c->AsStringVal()->AsString()->CheckString(); } - if ( rec->FieldDecl(i)->FindAttr(ATTR_OPTIONAL ) ) + if ( rec->FieldDecl(i)->FindAttr(ATTR_OPTIONAL ) ) field->optional = true; fields->push_back(field); @@ -782,30 +778,29 @@ bool Manager::UnrollRecordType(vector *fields, bool Manager::ForceUpdate(const string &name) { Stream *i = FindStream(name); - if ( i == 0 ) + if ( i == 0 ) { reporter->Error("Stream %s not found", name.c_str()); return false; } - - if ( i->removed ) + + if ( i->removed ) { reporter->Error("Stream %s is already queued for removal. Ignoring force update.", name.c_str()); return false; } - + i->reader->Update(); #ifdef DEBUG - DBG_LOG(DBG_INPUT, "Forcing update of stream %s", - name.c_str()); + DBG_LOG(DBG_INPUT, "Forcing update of stream %s", name.c_str()); #endif return true; // update is async :( } -Val* Manager::RecordValToIndexVal(RecordVal *r) +Val* Manager::RecordValToIndexVal(RecordVal *r) { Val* idxval; @@ -813,16 +808,15 @@ Val* Manager::RecordValToIndexVal(RecordVal *r) int num_fields = type->NumFields(); - if ( num_fields == 1 && type->FieldDecl(0)->type->Tag() != TYPE_RECORD ) - { + if ( num_fields == 1 && type->FieldDecl(0)->type->Tag() != TYPE_RECORD ) idxval = r->LookupWithDefault(0); - } - else + + else { ListVal *l = new ListVal(TYPE_ANY); - for ( int j = 0 ; j < num_fields; j++ ) + for ( int j = 0 ; j < num_fields; j++ ) l->Append(r->LookupWithDefault(j)); - + idxval = l; } @@ -831,23 +825,27 @@ Val* Manager::RecordValToIndexVal(RecordVal *r) } -Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Value* const *vals) +Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Value* const *vals) { Val* idxval; int position = 0; - if ( num_fields == 1 && type->FieldType(0)->Tag() != TYPE_RECORD ) { + if ( num_fields == 1 && type->FieldType(0)->Tag() != TYPE_RECORD ) + { idxval = ValueToVal(vals[0], type->FieldType(0)); position = 1; - } else { + } + + else + { ListVal *l = new ListVal(TYPE_ANY); - for ( int j = 0 ; j < type->NumFields(); j++ ) + for ( int j = 0 ; j < type->NumFields(); j++ ) { - if ( type->FieldType(j)->Tag() == TYPE_RECORD ) - l->Append(ValueToRecordVal(vals, + if ( type->FieldType(j)->Tag() == TYPE_RECORD ) + l->Append(ValueToRecordVal(vals, type->FieldType(j)->AsRecordType(), &position)); - else + else { l->Append(ValueToVal(vals[position], type->FieldType(j))); position++; @@ -862,66 +860,70 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu } -void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) +void Manager::SendEntry(ReaderFrontend* reader, Value* *vals) { Stream *i = FindStream(reader); - if ( i == 0 ) + if ( i == 0 ) { reporter->InternalError("Unknown reader in SendEntry"); return; } - int readFields; - if ( i->stream_type == TABLE_FILTER ) + int readFields = 0; + + if ( i->stream_type == TABLE_STREAM ) readFields = SendEntryTable(i, vals); - else if ( i->stream_type == EVENT_FILTER ) + + else if ( i->stream_type == EVENT_STREAM ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - readFields = SendEventStreamEvent(i, type, vals); - } - else + readFields = SendEventStreamEvent(i, type, vals); + } + + else assert(false); - for ( int i = 0; i < readFields; i++ ) + for ( int i = 0; i < readFields; i++ ) delete vals[i]; - delete [] vals; + delete [] vals; } -int Manager::SendEntryTable(Stream* i, const Value* const *vals) +int Manager::SendEntryTable(Stream* i, const Value* const *vals) { bool updated = false; assert(i); - assert(i->stream_type == TABLE_FILTER); + assert(i->stream_type == TABLE_STREAM); TableStream* stream = (TableStream*) i; HashKey* idxhash = HashValues(stream->num_idx_fields, vals); - - if ( idxhash == 0 ) + + if ( idxhash == 0 ) { reporter->Error("Could not hash line. Ignoring"); return stream->num_val_fields + stream->num_idx_fields; - } - + } + hash_t valhash = 0; - if ( stream->num_val_fields > 0 ) + if ( stream->num_val_fields > 0 ) { HashKey* valhashkey = HashValues(stream->num_val_fields, vals+stream->num_idx_fields); - if ( valhashkey == 0 ) { + if ( valhashkey == 0 ) + { // empty line. index, but no values. // hence we also have no hash value... } - else + else { - valhash = valhashkey->Hash(); - delete(valhashkey); + valhash = valhashkey->Hash(); + delete(valhashkey); } } - InputHash *h = stream->lastDict->Lookup(idxhash); - if ( h != 0 ) + InputHash *h = stream->lastDict->Lookup(idxhash); + if ( h != 0 ) { // seen before if ( stream->num_val_fields == 0 || h->valhash == valhash ) @@ -932,41 +934,41 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) delete idxhash; return stream->num_val_fields + stream->num_idx_fields; } - else + + else { assert( stream->num_val_fields > 0 ); // entry was updated in some way stream->lastDict->Remove(idxhash); // keep h for predicates updated = true; - + } } - Val* valval; RecordVal* predidx = 0; - + int position = stream->num_idx_fields; - if ( stream->num_val_fields == 0 ) + + if ( stream->num_val_fields == 0 ) valval = 0; - else if ( stream->num_val_fields == 1 && !stream->want_record ) + + else if ( stream->num_val_fields == 1 && !stream->want_record ) valval = ValueToVal(vals[position], stream->rtype->FieldType(0)); - else + + else valval = ValueToRecordVal(vals, stream->rtype, &position); // call stream first to determine if we really add / change the entry - if ( stream->pred ) + if ( stream->pred ) { EnumVal* ev; - //Ref(idxval); - int startpos = 0; - //Val* predidx = ListValToRecordVal(idxval->AsListVal(), stream->itype, &startpos); + int startpos = 0; predidx = ValueToRecordVal(vals, stream->itype, &startpos); - //ValueToRecordVal(vals, stream->itype, &startpos); - if ( updated ) + if ( updated ) ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); else ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); @@ -976,12 +978,13 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) result = CallPred(stream->pred, 3, ev, predidx->Ref(), valval->Ref()); else // no values result = CallPred(stream->pred, 2, ev, predidx->Ref()); - - if ( result == false ) + + if ( result == false ) { Unref(predidx); Unref(valval); - if ( !updated ) + + if ( ! updated ) { // throw away. Hence - we quit. And remove the entry from the current dictionary... // (but why should it be in there? assert this). @@ -989,8 +992,9 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) delete idxhash; delete h; return stream->num_val_fields + stream->num_idx_fields; - } - else + } + + else { // keep old one stream->currDict->Insert(idxhash, h); @@ -998,42 +1002,37 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) return stream->num_val_fields + stream->num_idx_fields; } } - - } + } // now we don't need h anymore - if we are here, the entry is updated and a new h is created. - if ( h ) + if ( h ) { delete h; h = 0; } - + Val* idxval; - if ( predidx != 0 ) + if ( predidx != 0 ) { idxval = RecordValToIndexVal(predidx); // I think there is an unref missing here. But if I insert is, it crashes :) - } - else + } + else idxval = ValueToIndexVal(stream->num_idx_fields, stream->itype, vals); - + Val* oldval = 0; - if ( updated == true ) + if ( updated == true ) { assert(stream->num_val_fields > 0); // in that case, we need the old value to send the event (if we send an event). oldval = stream->tab->Lookup(idxval, false); } - //i->tab->Assign(idxval, valval); assert(idxval); HashKey* k = stream->tab->ComputeHash(idxval); - if ( !k ) - { + if ( ! k ) reporter->InternalError("could not hash"); - assert(false); - } InputHash* ih = new InputHash(); ih->idxkey = new HashKey(k->Key(), k->Size(), k->Hash()); @@ -1044,63 +1043,62 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) stream->tab->Assign(idxval, k, valval); Unref(idxval); // asssign does not consume idxval. + if ( predidx != 0 ) Unref(predidx); stream->currDict->Insert(idxhash, ih); delete idxhash; - if ( stream->event ) + if ( stream->event ) { EnumVal* ev; int startpos = 0; Val* predidx = ValueToRecordVal(vals, stream->itype, &startpos); - if ( updated ) + if ( updated ) { // in case of update send back the old value. assert ( stream->num_val_fields > 0 ); ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, oldval); - } - else + } + + else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - if ( stream->num_val_fields == 0 ) + if ( stream->num_val_fields == 0 ) { Ref(stream->description); SendEvent(stream->event, 3, stream->description->Ref(), ev, predidx); - } - else + } + else SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, valval->Ref()); - + } - } + } - - return stream->num_val_fields + stream->num_idx_fields; + return stream->num_val_fields + stream->num_idx_fields; } - -void Manager::EndCurrentSend(ReaderFrontend* reader) +void Manager::EndCurrentSend(ReaderFrontend* reader) { Stream *i = FindStream(reader); - if ( i == 0 ) + if ( i == 0 ) { reporter->InternalError("Unknown reader in EndCurrentSend"); return; } #ifdef DEBUG - DBG_LOG(DBG_INPUT, "Got EndCurrentSend stream %s", - i->name.c_str()); + DBG_LOG(DBG_INPUT, "Got EndCurrentSend stream %s", i->name.c_str()); #endif - if ( i->stream_type == EVENT_FILTER ) // nothing to do.. + if ( i->stream_type == EVENT_STREAM ) // nothing to do.. return; - assert(i->stream_type == TABLE_FILTER); + assert(i->stream_type == TABLE_STREAM); TableStream* stream = (TableStream*) i; // lastdict contains all deleted entries and should be empty apart from that @@ -1108,17 +1106,17 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) stream->lastDict->MakeRobustCookie(c); InputHash* ih; HashKey *lastDictIdxKey; - //while ( ( ih = i->lastDict->NextEntry(c) ) ) { - while ( ( ih = stream->lastDict->NextEntry(lastDictIdxKey, c) ) ) + + while ( ( ih = stream->lastDict->NextEntry(lastDictIdxKey, c) ) ) { ListVal * idx = 0; Val *val = 0; - + Val* predidx = 0; EnumVal* ev = 0; int startpos = 0; - if ( stream->pred || stream->event ) + if ( stream->pred || stream->event ) { idx = stream->tab->RecoverIndex(ih->idxkey); assert(idx != 0); @@ -1128,7 +1126,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); } - if ( stream->pred ) + if ( stream->pred ) { // ask predicate, if we want to expire this element... @@ -1138,7 +1136,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) bool result = CallPred(stream->pred, 3, ev, predidx, val); - if ( result == false ) + if ( result == false ) { // Keep it. Hence - we quit and simply go to the next entry of lastDict // ah well - and we have to add the entry to currDict... @@ -1147,10 +1145,10 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) stream->currDict->Insert(lastDictIdxKey, stream->lastDict->RemoveEntry(lastDictIdxKey)); delete lastDictIdxKey; continue; - } - } + } + } - if ( stream->event ) + if ( stream->event ) { Ref(predidx); Ref(val); @@ -1160,7 +1158,8 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) if ( predidx ) // if we have a stream or an event... Unref(predidx); - if ( ev ) + + if ( ev ) Unref(ev); Unref(stream->tab->Delete(ih->idxkey)); @@ -1172,54 +1171,57 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) stream->lastDict->Clear(); // should be empt. buti- well... who knows... delete(stream->lastDict); - stream->lastDict = stream->currDict; + stream->lastDict = stream->currDict; stream->currDict = new PDict(InputHash); stream->currDict->SetDeleteFunc(input_hash_delete_func); #ifdef DEBUG - DBG_LOG(DBG_INPUT, "EndCurrentSend complete for stream %s, queueing update_finished event", - i->name.c_str()); + DBG_LOG(DBG_INPUT, "EndCurrentSend complete for stream %s, queueing update_finished event", + i->name.c_str()); #endif // Send event that the current update is indeed finished. EventHandler* handler = event_registry->Lookup("Input::update_finished"); - if ( handler == 0 ) + if ( handler == 0 ) reporter->InternalError("Input::update_finished not found!"); SendEvent(handler, 2, new StringVal(i->name.c_str()), new StringVal(i->source.c_str())); } -void Manager::Put(ReaderFrontend* reader, Value* *vals) +void Manager::Put(ReaderFrontend* reader, Value* *vals) { Stream *i = FindStream(reader); - if ( i == 0 ) + if ( i == 0 ) { reporter->InternalError("Unknown reader in Put"); return; } - int readFields; - if ( i->stream_type == TABLE_FILTER ) + int readFields = 0; + + if ( i->stream_type == TABLE_STREAM ) readFields = PutTable(i, vals); - else if ( i->stream_type == EVENT_FILTER ) + + else if ( i->stream_type == EVENT_STREAM ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); readFields = SendEventStreamEvent(i, type, vals); - } - else + } + + else assert(false); - - for ( int i = 0; i < readFields; i++ ) + + for ( int i = 0; i < readFields; i++ ) delete vals[i]; - delete [] vals; + delete [] vals; } -int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const *vals) +int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const *vals) { assert(i); - assert(i->stream_type == EVENT_FILTER); + assert(i->stream_type == EVENT_STREAM); EventStream* stream = (EventStream*) i; Val *val; @@ -1227,71 +1229,77 @@ int Manager::SendEventStreamEvent(Stream* i, EnumVal* type, const Value* const * Ref(stream->description); out_vals.push_back(stream->description); // no tracking, send everything with a new event... - //out_vals.push_back(new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event)); out_vals.push_back(type); int position = 0; - if ( stream->want_record ) + + if ( stream->want_record ) { RecordVal * r = ValueToRecordVal(vals, stream->fields, &position); out_vals.push_back(r); } - else - { - for ( int j = 0; j < stream->fields->NumFields(); j++) + + else + { + for ( int j = 0; j < stream->fields->NumFields(); j++) { Val* val = 0; - if ( stream->fields->FieldType(j)->Tag() == TYPE_RECORD ) - val = ValueToRecordVal(vals, - stream->fields->FieldType(j)->AsRecordType(), + + if ( stream->fields->FieldType(j)->Tag() == TYPE_RECORD ) + val = ValueToRecordVal(vals, + stream->fields->FieldType(j)->AsRecordType(), &position); - else + + else { val = ValueToVal(vals[position], stream->fields->FieldType(j)); position++; } - out_vals.push_back(val); + + out_vals.push_back(val); } } SendEvent(stream->event, out_vals); return stream->fields->NumFields(); - } -int Manager::PutTable(Stream* i, const Value* const *vals) +int Manager::PutTable(Stream* i, const Value* const *vals) { assert(i); - assert(i->stream_type == TABLE_FILTER); - TableStream* stream = (TableStream*) i; + assert(i->stream_type == TABLE_STREAM); + TableStream* stream = (TableStream*) i; Val* idxval = ValueToIndexVal(stream->num_idx_fields, stream->itype, vals); Val* valval; int position = stream->num_idx_fields; - if ( stream->num_val_fields == 0 ) + + if ( stream->num_val_fields == 0 ) valval = 0; - else if ( stream->num_val_fields == 1 && stream->want_record == 0 ) + + else if ( stream->num_val_fields == 1 && stream->want_record == 0 ) valval = ValueToVal(vals[position], stream->rtype->FieldType(0)); - else + + else valval = ValueToRecordVal(vals, stream->rtype, &position); // if we have a subscribed event, we need to figure out, if this is an update or not // same for predicates - if ( stream->pred || stream->event ) + if ( stream->pred || stream->event ) { bool updated = false; Val* oldval = 0; - - if ( stream->num_val_fields > 0 ) + + if ( stream->num_val_fields > 0 ) { // in that case, we need the old value to send the event (if we send an event). oldval = stream->tab->Lookup(idxval, false); } - if ( oldval != 0 ) + if ( oldval != 0 ) { // it is an update updated = true; @@ -1300,27 +1308,27 @@ int Manager::PutTable(Stream* i, const Value* const *vals) // predicate if we want the update or not - if ( stream->pred ) + if ( stream->pred ) { EnumVal* ev; int startpos = 0; Val* predidx = ValueToRecordVal(vals, stream->itype, &startpos); Ref(valval); - if ( updated ) - ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, + if ( updated ) + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); - else - ev = new EnumVal(BifEnum::Input::EVENT_NEW, + else + ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); - + bool result; if ( stream->num_val_fields > 0 ) // we have values result = CallPred(stream->pred, 3, ev, predidx, valval); else // no values result = CallPred(stream->pred, 2, ev, predidx); - - if ( result == false ) + + if ( result == false ) { // do nothing Unref(idxval); @@ -1331,51 +1339,51 @@ int Manager::PutTable(Stream* i, const Value* const *vals) } - stream->tab->Assign(idxval, valval); + stream->tab->Assign(idxval, valval); - if ( stream->event ) - { + if ( stream->event ) + { EnumVal* ev; int startpos = 0; Val* predidx = ValueToRecordVal(vals, stream->itype, &startpos); - if ( updated ) - { + if ( updated ) + { // in case of update send back the old value. assert ( stream->num_val_fields > 0 ); - ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, + ev = new EnumVal(BifEnum::Input::EVENT_CHANGED, BifType::Enum::Input::Event); assert ( oldval != 0 ); SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, oldval); - } - else + } + else { ev = new EnumVal(BifEnum::Input::EVENT_NEW, BifType::Enum::Input::Event); if ( stream->num_val_fields == 0 ) - SendEvent(stream->event, 4, stream->description->Ref(), + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx); else - SendEvent(stream->event, 4, stream->description->Ref(), + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, valval->Ref()); } - + } - } + } + else // no predicates or other stuff stream->tab->Assign(idxval, valval); - return stream->num_idx_fields + stream->num_val_fields; } // Todo:: perhaps throw some kind of clear-event? -void Manager::Clear(ReaderFrontend* reader) +void Manager::Clear(ReaderFrontend* reader) { Stream *i = FindStream(reader); - if ( i == 0 ) + if ( i == 0 ) { reporter->InternalError("Unknown reader in Clear"); return; @@ -1386,17 +1394,17 @@ void Manager::Clear(ReaderFrontend* reader) i->name.c_str()); #endif - assert(i->stream_type == TABLE_FILTER); - TableStream* stream = (TableStream*) i; + assert(i->stream_type == TABLE_STREAM); + TableStream* stream = (TableStream*) i; stream->tab->RemoveAll(); } // put interface: delete old entry from table. -bool Manager::Delete(ReaderFrontend* reader, Value* *vals) +bool Manager::Delete(ReaderFrontend* reader, Value* *vals) { Stream *i = FindStream(reader); - if ( i == 0 ) + if ( i == 0 ) { reporter->InternalError("Unknown reader in Delete"); return false; @@ -1405,19 +1413,19 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) bool success = false; int readVals = 0; - if ( i->stream_type == TABLE_FILTER ) + if ( i->stream_type == TABLE_STREAM ) { - TableStream* stream = (TableStream*) i; + TableStream* stream = (TableStream*) i; Val* idxval = ValueToIndexVal(stream->num_idx_fields, stream->itype, vals); assert(idxval != 0); readVals = stream->num_idx_fields + stream->num_val_fields; bool streamresult = true; - if ( stream->pred || stream->event ) + if ( stream->pred || stream->event ) { Val *val = stream->tab->Lookup(idxval); - if ( stream->pred ) + if ( stream->pred ) { Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); @@ -1426,7 +1434,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) streamresult = CallPred(stream->pred, 3, ev, predidx, val); - if ( streamresult == false ) + if ( streamresult == false ) { // keep it. Unref(idxval); @@ -1436,56 +1444,58 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) } // only if stream = true -> no streaming - if ( streamresult && stream->event ) + if ( streamresult && stream->event ) { Ref(idxval); assert(val != 0); - Ref(val); + Ref(val); EnumVal *ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); SendEvent(stream->event, 4, stream->description->Ref(), ev, idxval, val); } } // only if stream = true -> no streaming - if ( streamresult ) + if ( streamresult ) { Val* retptr = stream->tab->Delete(idxval); success = ( retptr != 0 ); - if ( !success ) + if ( ! success ) reporter->Error("Internal error while deleting values from input table"); else Unref(retptr); } - - } - else if ( i->stream_type == EVENT_FILTER ) + + } + + else if ( i->stream_type == EVENT_STREAM ) { EnumVal *type = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); - readVals = SendEventStreamEvent(i, type, vals); + readVals = SendEventStreamEvent(i, type, vals); success = true; } - else + + else { assert(false); return false; } - for ( int i = 0; i < readVals; i++ ) + for ( int i = 0; i < readVals; i++ ) delete vals[i]; - delete [] vals; + delete [] vals; return success; - } + } -bool Manager::CallPred(Func* pred_func, const int numvals, ...) +bool Manager::CallPred(Func* pred_func, const int numvals, ...) { bool result; val_list vl(numvals); - + va_list lP; va_start(lP, numvals); - for ( int i = 0; i < numvals; i++ ) + for ( int i = 0; i < numvals; i++ ) vl.append( va_arg(lP, Val*) ); va_end(lP); @@ -1497,10 +1507,10 @@ bool Manager::CallPred(Func* pred_func, const int numvals, ...) return(result); } -bool Manager::SendEvent(const string& name, const int num_vals, Value* *vals) +bool Manager::SendEvent(const string& name, const int num_vals, Value* *vals) { EventHandler* handler = event_registry->Lookup(name.c_str()); - if ( handler == 0 ) + if ( handler == 0 ) { reporter->Error("Event %s not found", name.c_str()); return false; @@ -1508,33 +1518,33 @@ bool Manager::SendEvent(const string& name, const int num_vals, Value* *vals) RecordType *type = handler->FType()->Args(); int num_event_vals = type->NumFields(); - if ( num_vals != num_event_vals ) + if ( num_vals != num_event_vals ) { reporter->Error("Wrong number of values for event %s", name.c_str()); return false; } val_list* vl = new val_list; - for ( int i = 0; i < num_vals; i++) + for ( int i = 0; i < num_vals; i++) vl->append(ValueToVal(vals[i], type->FieldType(i))); mgr.Dispatch(new Event(handler, vl)); - for ( int i = 0; i < num_vals; i++ ) + for ( int i = 0; i < num_vals; i++ ) delete vals[i]; - delete [] vals; + delete [] vals; return true; -} +} -void Manager::SendEvent(EventHandlerPtr ev, const int numvals, ...) +void Manager::SendEvent(EventHandlerPtr ev, const int numvals, ...) { val_list* vl = new val_list; - + va_list lP; va_start(lP, numvals); - for ( int i = 0; i < numvals; i++ ) + for ( int i = 0; i < numvals; i++ ) vl->append( va_arg(lP, Val*) ); va_end(lP); @@ -1545,8 +1555,8 @@ void Manager::SendEvent(EventHandlerPtr ev, const int numvals, ...) void Manager::SendEvent(EventHandlerPtr ev, list events) { val_list* vl = new val_list; - - for ( list::iterator i = events.begin(); i != events.end(); i++ ) + + for ( list::iterator i = events.begin(); i != events.end(); i++ ) { vl->append( *i ); } @@ -1554,31 +1564,31 @@ void Manager::SendEvent(EventHandlerPtr ev, list events) mgr.QueueEvent(ev, vl, SOURCE_LOCAL); } -// Convert a bro list value to a bro record value. +// Convert a bro list value to a bro record value. // I / we could think about moving this functionality to val.cc -RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, int* position) +RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, int* position) { assert(position != 0 ); // we need the pointer to point to data; - if ( request_type->Tag() != TYPE_RECORD ) + if ( request_type->Tag() != TYPE_RECORD ) { reporter->InternalError("ListValToRecordVal called on non-record-value."); return 0; - } + } RecordVal* rec = new RecordVal(request_type->AsRecordType()); assert(list != 0); int maxpos = list->Length(); - for ( int i = 0; i < request_type->NumFields(); i++ ) + for ( int i = 0; i < request_type->NumFields(); i++ ) { assert ( (*position) <= maxpos ); Val* fieldVal = 0; if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) - fieldVal = ListValToRecordVal(list, request_type->FieldType(i)->AsRecordType(), position); - else + fieldVal = ListValToRecordVal(list, request_type->FieldType(i)->AsRecordType(), position); + else { fieldVal = list->Index(*position); (*position)++; @@ -1592,24 +1602,23 @@ RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, // Convert a threading value to a record value RecordVal* Manager::ValueToRecordVal(const Value* const *vals, - RecordType *request_type, int* position) + RecordType *request_type, int* position) { assert(position != 0); // we need the pointer to point to data. - if ( request_type->Tag() != TYPE_RECORD ) + if ( request_type->Tag() != TYPE_RECORD ) { reporter->InternalError("ValueToRecordVal called on non-record-value."); return 0; - } + } RecordVal* rec = new RecordVal(request_type->AsRecordType()); - for ( int i = 0; i < request_type->NumFields(); i++ ) + for ( int i = 0; i < request_type->NumFields(); i++ ) { - Val* fieldVal = 0; if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) - fieldVal = ValueToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); - else + fieldVal = ValueToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); + else { fieldVal = ValueToVal(vals[*position], request_type->FieldType(i)); (*position)++; @@ -1619,10 +1628,10 @@ RecordVal* Manager::ValueToRecordVal(const Value* const *vals, } return rec; - } + } -// Count the length of the values -// used to create a correct length buffer for hashing later +// Count the length of the values used to create a correct length buffer for +// hashing later int Manager::GetValueLength(const Value* val) { assert( val->present ); // presence has to be checked elsewhere int length = 0; @@ -1642,7 +1651,7 @@ int Manager::GetValueLength(const Value* val) { length += sizeof(val->val.port_val.port); length += sizeof(val->val.port_val.proto); break; - + case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: @@ -1688,17 +1697,17 @@ int Manager::GetValueLength(const Value* val) { } break; - case TYPE_TABLE: + case TYPE_TABLE: { - for ( int i = 0; i < val->val.set_val.size; i++ ) + for ( int i = 0; i < val->val.set_val.size; i++ ) length += GetValueLength(val->val.set_val.vals[i]); break; } - case TYPE_VECTOR: + case TYPE_VECTOR: { int j = val->val.vector_val.size; - for ( int i = 0; i < j; i++ ) + for ( int i = 0; i < j; i++ ) length += GetValueLength(val->val.vector_val.vals[i]); break; } @@ -1708,12 +1717,12 @@ int Manager::GetValueLength(const Value* val) { } return length; - + } // Given a threading::value, copy the raw data bytes into *data and return how many bytes were copied. // Used for hashing the values for lookup in the bro table -int Manager::CopyValue(char *data, const int startpos, const Value* val) +int Manager::CopyValue(char *data, const int startpos, const Value* val) { assert( val->present ); // presence has to be checked elsewhere @@ -1722,42 +1731,37 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) case TYPE_INT: memcpy(data+startpos, (const void*) &(val->val.int_val), sizeof(val->val.int_val)); return sizeof(val->val.int_val); - break; case TYPE_COUNT: case TYPE_COUNTER: memcpy(data+startpos, (const void*) &(val->val.uint_val), sizeof(val->val.uint_val)); return sizeof(val->val.uint_val); - break; - case TYPE_PORT: + case TYPE_PORT: { int length = 0; - memcpy(data+startpos, (const void*) &(val->val.port_val.port), + memcpy(data+startpos, (const void*) &(val->val.port_val.port), sizeof(val->val.port_val.port)); length += sizeof(val->val.port_val.port); - memcpy(data+startpos+length, (const void*) &(val->val.port_val.proto), + memcpy(data+startpos+length, (const void*) &(val->val.port_val.proto), sizeof(val->val.port_val.proto)); length += sizeof(val->val.port_val.proto); return length; - break; } - + case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: - memcpy(data+startpos, (const void*) &(val->val.double_val), + memcpy(data+startpos, (const void*) &(val->val.double_val), sizeof(val->val.double_val)); return sizeof(val->val.double_val); - break; case TYPE_STRING: case TYPE_ENUM: { memcpy(data+startpos, val->val.string_val->c_str(), val->val.string_val->length()); return val->val.string_val->size(); - break; } case TYPE_ADDR: @@ -1768,86 +1772,89 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) length = sizeof(val->val.addr_val.in.in4); memcpy(data + startpos, (const char*) &(val->val.addr_val.in.in4), length); break; + case IPv6: length = sizeof(val->val.addr_val.in.in6); memcpy(data + startpos, (const char*) &(val->val.addr_val.in.in6), length); break; + default: assert(false); } + return length; } - break; - - case TYPE_SUBNET: + + case TYPE_SUBNET: { int length; switch ( val->val.subnet_val.prefix.family ) { case IPv4: length = sizeof(val->val.addr_val.in.in4); - memcpy(data + startpos, + memcpy(data + startpos, (const char*) &(val->val.subnet_val.prefix.in.in4), length); break; + case IPv6: length = sizeof(val->val.addr_val.in.in6); - memcpy(data + startpos, + memcpy(data + startpos, (const char*) &(val->val.subnet_val.prefix.in.in4), length); break; + default: assert(false); } + int lengthlength = sizeof(val->val.subnet_val.length); - memcpy(data + startpos + length , + memcpy(data + startpos + length , (const char*) &(val->val.subnet_val.length), lengthlength); length += lengthlength; + return length; } - break; - case TYPE_TABLE: + case TYPE_TABLE: { int length = 0; int j = val->val.set_val.size; - for ( int i = 0; i < j; i++ ) + for ( int i = 0; i < j; i++ ) length += CopyValue(data, startpos+length, val->val.set_val.vals[i]); return length; - break; } - case TYPE_VECTOR: + case TYPE_VECTOR: { int length = 0; int j = val->val.vector_val.size; - for ( int i = 0; i < j; i++ ) + for ( int i = 0; i < j; i++ ) length += CopyValue(data, startpos+length, val->val.vector_val.vals[i]); return length; - break; } default: reporter->InternalError("unsupported type %d for CopyValue", val->type); return 0; } - + assert(false); return 0; } // Hash num_elements threading values and return the HashKey for them. At least one of the vals has to be ->present. -HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) +HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) { int length = 0; - for ( int i = 0; i < num_elements; i++ ) + for ( int i = 0; i < num_elements; i++ ) { const Value* val = vals[i]; if ( val->present ) length += GetValueLength(val); } - if ( length == 0 ) + if ( length == 0 ) { reporter->Error("Input reader sent line where all elements are null values. Ignoring line"); return NULL; @@ -1855,10 +1862,10 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) int position = 0; char *data = (char*) malloc(length); - if ( data == 0 ) + if ( data == 0 ) reporter->InternalError("Could not malloc?"); - for ( int i = 0; i < num_elements; i++ ) + for ( int i = 0; i < num_elements; i++ ) { const Value* val = vals[i]; if ( val->present ) @@ -1873,16 +1880,16 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) } // convert threading value to Bro value -Val* Manager::ValueToVal(const Value* val, BroType* request_type) +Val* Manager::ValueToVal(const Value* val, BroType* request_type) { - - if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) + + if ( request_type->Tag() != TYPE_ANY && request_type->Tag() != val->type ) { reporter->InternalError("Typetags don't match: %d vs %d", request_type->Tag(), val->type); return 0; } - if ( !val->present ) + if ( !val->present ) return 0; // unset field switch ( val->type ) { @@ -1894,24 +1901,20 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) case TYPE_COUNT: case TYPE_COUNTER: return new Val(val->val.uint_val, val->type); - break; - + case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: return new Val(val->val.double_val, val->type); - break; case TYPE_STRING: { BroString *s = new BroString(*(val->val.string_val)); return new StringVal(s); - break; } - + case TYPE_PORT: return new PortVal(val->val.port_val.port, val->val.port_val.proto); - break; case TYPE_ADDR: { @@ -1920,12 +1923,15 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) case IPv4: addr = new IPAddr(val->val.addr_val.in.in4); break; + case IPv6: addr = new IPAddr(val->val.addr_val.in.in6); break; + default: assert(false); } + AddrVal* addrval = new AddrVal(*addr); delete addr; return addrval; @@ -1938,19 +1944,21 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) case IPv4: addr = new IPAddr(val->val.subnet_val.prefix.in.in4); break; + case IPv6: addr = new IPAddr(val->val.subnet_val.prefix.in.in6); break; + default: assert(false); } + SubNetVal* subnetval = new SubNetVal(*addr, val->val.subnet_val.length); delete addr; return subnetval; - break; } - case TYPE_TABLE: + case TYPE_TABLE: { // all entries have to have the same type... BroType* type = request_type->AsTableType()->Indices()->PureType(); @@ -1958,7 +1966,7 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) set_index->Append(type->Ref()); SetType* s = new SetType(set_index, 0); TableVal* t = new TableVal(s); - for ( int i = 0; i < val->val.set_val.size; i++ ) + for ( int i = 0; i < val->val.set_val.size; i++ ) { Val* assignval = ValueToVal( val->val.set_val.vals[i], type ); t->Assign(assignval, 0); @@ -1967,21 +1975,19 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) Unref(s); return t; - break; } - case TYPE_VECTOR: + case TYPE_VECTOR: { // all entries have to have the same type... BroType* type = request_type->AsVectorType()->YieldType(); VectorType* vt = new VectorType(type->Ref()); VectorVal* v = new VectorVal(vt); - for ( int i = 0; i < val->val.vector_val.size; i++ ) + for ( int i = 0; i < val->val.vector_val.size; i++ ) v->Assign(i, ValueToVal( val->val.set_val.vals[i], type ), 0); Unref(vt); return v; - break; } case TYPE_ENUM: { @@ -1990,14 +1996,13 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) string module = extract_module_name(val->val.string_val->c_str()); string var = extract_var_name(val->val.string_val->c_str()); bro_int_t index = request_type->AsEnumType()->Lookup(module, var.c_str()); - if ( index == -1 ) - reporter->InternalError("Value not found in enum mappimg. Module: %s, var: %s", + if ( index == -1 ) + reporter->InternalError("Value not found in enum mappimg. Module: %s, var: %s", module.c_str(), var.c_str()); - + return new EnumVal(index, request_type->Ref()->AsEnumType() ); - break; } - + default: reporter->InternalError("unsupported type for input_read"); @@ -2006,22 +2011,22 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) assert(false); return NULL; } - + Manager::Stream* Manager::FindStream(const string &name) { for ( map::iterator s = readers.begin(); s != readers.end(); ++s ) { - if ( (*s).second->name == name ) + if ( (*s).second->name == name ) return (*s).second; } return 0; } -Manager::Stream* Manager::FindStream(ReaderFrontend* reader) +Manager::Stream* Manager::FindStream(ReaderFrontend* reader) { map::iterator s = readers.find(reader); - if ( s != readers.end() ) + if ( s != readers.end() ) return s->second; return 0; diff --git a/src/input/Manager.h b/src/input/Manager.h index d15febe0d6..984fcf3841 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. // -// Class for managing input streams +// Class for managing input streams. #ifndef INPUT_MANAGER_H #define INPUT_MANAGER_H @@ -16,7 +16,7 @@ namespace input { class ReaderFrontend; -class ReaderBackend; +class ReaderBackend; /** * Singleton class for managing input streams. @@ -25,58 +25,60 @@ class Manager { public: /** * Constructor. - */ + */ Manager(); /** * Destructor. */ ~Manager(); - + /** - * Creates a new input stream which will write the data from the data source into + * Creates a new input stream which will write the data from the data + * source into a table. * - * @param description A record of script type \c Input:StreamDescription. + * @param description A record of script type \c + * Input:StreamDescription. * * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. - */ - bool CreateTableStream(RecordVal* description); + */ + bool CreateTableStream(RecordVal* description); /** * Creates a new input stream which sends events for read input data. * - * @param description A record of script type \c Input:StreamDescription. + * @param description A record of script type \c + * Input:StreamDescription. * * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. - */ - bool CreateEventStream(RecordVal* description); - + */ + bool CreateEventStream(RecordVal* description); /** - * Force update on a input stream. - * Forces a re-read of the whole input source. - * Usually used, when an input stream is opened in managed mode. - * Otherwise, this can be used to trigger a input source check before a heartbeat message arrives. - * May be ignored by the reader. + * Force update on a input stream. Forces a re-read of the whole + * input source. Usually used when an input stream is opened in + * managed mode. Otherwise, this can be used to trigger a input + * source check before a heartbeat message arrives. May be ignored by + * the reader. * - * @param id The enum value corresponding the input stream. + * @param id The enum value corresponding the input stream. * * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. */ bool ForceUpdate(const string &id); - + /** - * Deletes an existing input stream + * Deletes an existing input stream. * - * @param id The enum value corresponding the input stream. + * @param id The enum value corresponding the input stream. * * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. */ - bool RemoveStream(const string &id); + bool RemoveStream(const string &id); protected: friend class ReaderFrontend; @@ -88,90 +90,100 @@ protected: friend class EndCurrentSendMessage; friend class ReaderClosedMessage; - // For readers to write to input stream in direct mode (reporting new/deleted values directly) - // Functions take ownership of threading::Value fields + // For readers to write to input stream in direct mode (reporting + // new/deleted values directly). Functions take ownership of + // threading::Value fields. void Put(ReaderFrontend* reader, threading::Value* *vals); void Clear(ReaderFrontend* reader); bool Delete(ReaderFrontend* reader, threading::Value* *vals); - // for readers to write to input stream in indirect mode (manager is monitoring new/deleted values) - // Functions take ownership of threading::Value fields + // For readers to write to input stream in indirect mode (manager is + // monitoring new/deleted values) Functions take ownership of + // threading::Value fields. void SendEntry(ReaderFrontend* reader, threading::Value* *vals); void EndCurrentSend(ReaderFrontend* reader); - - // Allows readers to directly send Bro events. - // The num_vals and vals must be the same the named event expects. - // Takes ownership of threading::Value fields + + // Allows readers to directly send Bro events. The num_vals and vals + // must be the same the named event expects. Takes ownership of + // threading::Value fields. bool SendEvent(const string& name, const int num_vals, threading::Value* *vals); // Instantiates a new ReaderBackend of the given type (note that - // doing so creates a new thread!). - ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); - - // Functions are called from the ReaderBackend to notify the manager, that a stream has been removed - // or a stream has been closed. - // Used to prevent race conditions where data for a specific stream is still in the queue when the - // RemoveStream directive is executed by the main thread. - // This makes sure all data that has ben queued for a stream is still received. + // doing so creates a new thread!). + ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); + + // Function called from the ReaderBackend to notify the manager that + // a stream has been removed or a stream has been closed. Used to + // prevent race conditions where data for a specific stream is still + // in the queue when the RemoveStream directive is executed by the + // main thread. This makes sure all data that has ben queued for a + // stream is still received. bool RemoveStreamContinuation(ReaderFrontend* reader); - + private: class Stream; class TableStream; class EventStream; - - bool CreateStream(Stream*, RecordVal* description); - // SendEntry implementation for Table stream - int SendEntryTable(Stream* i, const threading::Value* const *vals); + bool CreateStream(Stream*, RecordVal* description); - // Put implementation for Table stream - int PutTable(Stream* i, const threading::Value* const *vals); + // SendEntry implementation for Table stream. + int SendEntryTable(Stream* i, const threading::Value* const *vals); - // SendEntry and Put implementation for Event stream + // Put implementation for Table stream. + int PutTable(Stream* i, const threading::Value* const *vals); + + // SendEntry and Put implementation for Event stream. int SendEventStreamEvent(Stream* i, EnumVal* type, const threading::Value* const *vals); - // Checks is a bro type can be used for data reading. The equivalend in threading cannot be used, because we have support different types - // from the log framework + // Checks that a Bro type can be used for data reading. The + // equivalend in threading cannot be used, because we have support + // different types from the log framework bool IsCompatibleType(BroType* t, bool atomic_only=false); - // Check if a record is made up of compatible types and return a list of all fields that are in the record in order. - // Recursively unrolls records + // Check if a record is made up of compatible types and return a list + // of all fields that are in the record in order. Recursively unrolls + // records bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); // Send events - void SendEvent(EventHandlerPtr ev, const int numvals, ...); - void SendEvent(EventHandlerPtr ev, list events); + void SendEvent(EventHandlerPtr ev, const int numvals, ...); + void SendEvent(EventHandlerPtr ev, list events); - // Call predicate function and return result + // Call predicate function and return result. bool CallPred(Func* pred_func, const int numvals, ...); - // get a hashkey for a set of threading::Values + // Get a hashkey for a set of threading::Values. HashKey* HashValues(const int num_elements, const threading::Value* const *vals); - // Get the memory used by a specific value + // Get the memory used by a specific value. int GetValueLength(const threading::Value* val); - // Copies the raw data in a specific threading::Value to position sta + + // Copies the raw data in a specific threading::Value to position + // startpos. int CopyValue(char *data, const int startpos, const threading::Value* val); - // Convert Threading::Value to an internal Bro Type (works also with Records) + // Convert Threading::Value to an internal Bro Type (works also with + // Records). Val* ValueToVal(const threading::Value* val, BroType* request_type); - // Convert Threading::Value to an internal Bro List type + // Convert Threading::Value to an internal Bro List type. Val* ValueToIndexVal(int num_fields, const RecordType* type, const threading::Value* const *vals); - // Converts a threading::value to a record type. mostly used by ValueToVal - RecordVal* ValueToRecordVal(const threading::Value* const *vals, RecordType *request_type, int* position); + // Converts a threading::value to a record type. Mostly used by + // ValueToVal. + RecordVal* ValueToRecordVal(const threading::Value* const *vals, RecordType *request_type, int* position); + Val* RecordValToIndexVal(RecordVal *r); - - // Converts a Bro ListVal to a RecordVal given the record type + + // Converts a Bro ListVal to a RecordVal given the record type. RecordVal* ListValToRecordVal(ListVal* list, RecordType *request_type, int* position); Stream* FindStream(const string &name); Stream* FindStream(ReaderFrontend* reader); - enum StreamType { TABLE_FILTER, EVENT_FILTER }; - + enum StreamType { TABLE_STREAM, EVENT_STREAM }; + map readers; }; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index c625301383..328e0bc535 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -15,7 +15,7 @@ public: : threading::OutputMessage("Put", reader), val(val) {} - virtual bool Process() + virtual bool Process() { input_mgr->Put(Object(), val); return true; @@ -31,7 +31,7 @@ public: : threading::OutputMessage("Delete", reader), val(val) {} - virtual bool Process() + virtual bool Process() { return input_mgr->Delete(Object(), val); } @@ -45,7 +45,7 @@ public: ClearMessage(ReaderFrontend* reader) : threading::OutputMessage("Clear", reader) {} - virtual bool Process() + virtual bool Process() { input_mgr->Clear(Object()); return true; @@ -60,14 +60,14 @@ public: : threading::OutputMessage("SendEvent", reader), name(name), num_vals(num_vals), val(val) {} - virtual bool Process() + virtual bool Process() { bool success = input_mgr->SendEvent(name, num_vals, val); - if ( !success ) + if ( ! success ) reporter->Error("SendEvent for event %s failed", name.c_str()); - return true; // we do not want to die if sendEvent fails because the event did not return. + return true; // We do not want to die if sendEvent fails because the event did not return. } private: @@ -82,7 +82,7 @@ public: : threading::OutputMessage("SendEntry", reader), val(val) { } - virtual bool Process() + virtual bool Process() { input_mgr->SendEntry(Object(), val); return true; @@ -97,7 +97,7 @@ public: EndCurrentSendMessage(ReaderFrontend* reader) : threading::OutputMessage("EndCurrentSend", reader) {} - virtual bool Process() + virtual bool Process() { input_mgr->EndCurrentSend(Object()); return true; @@ -111,7 +111,7 @@ public: ReaderClosedMessage(ReaderFrontend* reader) : threading::OutputMessage("ReaderClosed", reader) {} - virtual bool Process() + virtual bool Process() { return input_mgr->RemoveStreamContinuation(Object()); } @@ -127,49 +127,46 @@ public: : threading::OutputMessage("Disable", writer) {} virtual bool Process() - { - Object()->SetDisable(); - return true; + { + Object()->SetDisable(); + return true; } }; ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { - buf = 0; - buf_len = 1024; disabled = true; // disabled will be set correcty in init. - frontend = arg_frontend; SetName(frontend->Name()); } -ReaderBackend::~ReaderBackend() - { +ReaderBackend::~ReaderBackend() + { } -void ReaderBackend::Put(Value* *val) +void ReaderBackend::Put(Value* *val) { SendOut(new PutMessage(frontend, val)); } -void ReaderBackend::Delete(Value* *val) +void ReaderBackend::Delete(Value* *val) { SendOut(new DeleteMessage(frontend, val)); } -void ReaderBackend::Clear() +void ReaderBackend::Clear() { SendOut(new ClearMessage(frontend)); } -void ReaderBackend::SendEvent(const string& name, const int num_vals, Value* *vals) +void ReaderBackend::SendEvent(const string& name, const int num_vals, Value* *vals) { SendOut(new SendEventMessage(frontend, name, num_vals, vals)); - } + } -void ReaderBackend::EndCurrentSend() +void ReaderBackend::EndCurrentSend() { SendOut(new EndCurrentSendMessage(frontend)); } @@ -179,19 +176,19 @@ void ReaderBackend::SendEntry(Value* *vals) SendOut(new SendEntryMessage(frontend, vals)); } -bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, - const threading::Field* const* arg_fields) +bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, + const threading::Field* const* arg_fields) { source = arg_source; SetName("InputReader/"+source); num_fields = arg_num_fields; - fields = arg_fields; + fields = arg_fields; // disable if DoInit returns error. int success = DoInit(arg_source, mode, arg_num_fields, arg_fields); - if ( !success ) + if ( ! success ) { Error("Init failed"); DisableFrontend(); @@ -202,30 +199,30 @@ bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, return success; } -void ReaderBackend::Close() +void ReaderBackend::Close() { DoClose(); disabled = true; DisableFrontend(); SendOut(new ReaderClosedMessage(frontend)); - if ( fields != 0 ) + if ( fields != 0 ) { - for ( unsigned int i = 0; i < num_fields; i++ ) + for ( unsigned int i = 0; i < num_fields; i++ ) delete(fields[i]); - delete[] (fields); + delete [] (fields); fields = 0; } } -bool ReaderBackend::Update() +bool ReaderBackend::Update() { - if ( disabled ) + if ( disabled ) return false; bool success = DoUpdate(); - if ( !success ) + if ( ! success ) DisableFrontend(); return success; @@ -233,8 +230,9 @@ bool ReaderBackend::Update() void ReaderBackend::DisableFrontend() { - disabled = true; - // we also set disabled here, because there still may be other messages queued and we will dutifully ignore these from now + // We also set disabled here, because there still may be other + // messages queued and we will dutifully ignore these from now. + disabled = true; SendOut(new DisableMessage(frontend)); } @@ -244,9 +242,9 @@ bool ReaderBackend::DoHeartbeat(double network_time, double current_time) return true; } -TransportProto ReaderBackend::StringToProto(const string &proto) +TransportProto ReaderBackend::StringToProto(const string &proto) { - if ( proto == "unknown" ) + if ( proto == "unknown" ) return TRANSPORT_UNKNOWN; else if ( proto == "tcp" ) return TRANSPORT_TCP; @@ -261,8 +259,8 @@ TransportProto ReaderBackend::StringToProto(const string &proto) } -// more or less verbose copy from IPAddr.cc -- which uses reporter -Value::addr_t ReaderBackend::StringToAddr(const string &s) +// More or less verbose copy from IPAddr.cc -- which uses reporter. +Value::addr_t ReaderBackend::StringToAddr(const string &s) { Value::addr_t val; @@ -270,9 +268,9 @@ Value::addr_t ReaderBackend::StringToAddr(const string &s) { val.family = IPv4; - if ( inet_aton(s.c_str(), &(val.in.in4)) <= 0 ) + if ( inet_aton(s.c_str(), &(val.in.in4)) <= 0 ) { - Error(Fmt("Bad addres: %s", s.c_str())); + Error(Fmt("Bad address: %s", s.c_str())); memset(&val.in.in4.s_addr, 0, sizeof(val.in.in4.s_addr)); } @@ -283,7 +281,7 @@ Value::addr_t ReaderBackend::StringToAddr(const string &s) val.family = IPv6; if ( inet_pton(AF_INET6, s.c_str(), val.in.in6.s6_addr) <=0 ) { - Error(Fmt("Bad IP address: %s", s.c_str())); + Error(Fmt("Bad address: %s", s.c_str())); memset(val.in.in6.s6_addr, 0, sizeof(val.in.in6.s6_addr)); } } diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index b4d9101bc8..ae8437b08c 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -12,13 +12,13 @@ namespace input { class ReaderFrontend; /** - * Base class for reader implementation. When the input:Manager creates a - * new input stream, it instantiates a ReaderFrontend. That then in turn - * creates a ReaderBackend of the right type. The frontend then forwards - * message over the backend as its methods are called. + * Base class for reader implementation. When the input:Manager creates a new + * input stream, it instantiates a ReaderFrontend. That then in turn creates + * a ReaderBackend of the right type. The frontend then forwards messages + * over the backend as its methods are called. * - * All of this methods must be called only from the corresponding child - * thread (the constructor is the one exception.) + * All methods must be called only from the corresponding child thread (the + * constructor is the one exception.) */ class ReaderBackend : public threading::MsgThread { public: @@ -27,54 +27,51 @@ public: * * @param frontend The frontend reader that created this backend. The * *only* purpose of this value is to be passed back via messages as - * a argument to callbacks. One must not otherwise access the + * an argument to callbacks. One must not otherwise access the * frontend, it's running in a different thread. - * - * @param frontend pointer to the reader frontend - */ + */ ReaderBackend(ReaderFrontend* frontend); - + /** * Destructor. - */ + */ virtual ~ReaderBackend(); /** * One-time initialization of the reader to define the input source. * - * @param arg_source A string left to the interpretation of the reader - * implementation; it corresponds to the value configured on the - * script-level for the input stream. + * @param arg_source A string left to the interpretation of the + * reader implementation; it corresponds to the value configured on + * the script-level for the input stream. * - * @param num_fields The number of log fields for the stream. + * @param fields An array of size \a num_fields with the input + * fields. The method takes ownership of the array. * - * @param fields An array of size \a num_fields with the log fields. - * The methods takes ownership of the array. - * - * @param mode the opening mode for the input source + * @param mode The opening mode for the input source as one of the + * Input::Mode script constants. * - * @param arg_num_fields number of fields contained in \a fields + * @param arg_num_fields Number of fields contained in \a fields. * - * @param fields the types and names of the fields to be retrieved - * from the input source + * @param fields The types and names of the fields to be retrieved + * from the input source. * * @return False if an error occured. */ bool Init(string arg_source, int mode, int arg_num_fields, const threading::Field* const* fields); /** - * Finishes reading from this input stream in a regular fashion. Must not be - * called if an error has been indicated earlier. After calling this, - * no further reading from the stream can be performed + * Finishes reading from this input stream in a regular fashion. Must + * not be called if an error has been indicated earlier. After + * calling this, no further reading from the stream can be performed. * * @return False if an error occured. */ void Close(); /** - * Force trigger an update of the input stream. - * The action that will be taken depends on the current read mode and the - * individual input backend + * Force trigger an update of the input stream. The action that will + * be taken depends on the current read mode and the individual input + * backend. * * An backend can choose to ignore this. * @@ -84,16 +81,17 @@ public: /** * Disables the frontend that has instantiated this backend. Once - * disabled,the frontend will not send any further message over. + * disabled, the frontend will not send any further message over. */ - void DisableFrontend(); - + void DisableFrontend(); + protected: - // Methods that have to be overwritten by the individual readers - + // Methods that have to be overwritten by the individual readers + /** - * Reader-specific intialization method. Note that data may only be - * read from the input source after the Start function has been called. + * Reader-specific intialization method. Note that data may only be + * read from the input source after the Init() function has been + * called. * * A reader implementation must override this method. If it returns * false, it will be assumed that a fatal error has occured that @@ -105,39 +103,39 @@ protected: /** * Reader-specific method implementing input finalization at - * termination. + * termination. * * A reader implementation must override this method but it can just - * ignore calls if an input source must not be closed. + * ignore calls if an input source can't actually be closed. * - * After the method is called, the writer will be deleted. If an error occurs - * during shutdown, an implementation should also call Error() to indicate what - * happened. - */ + * After the method is called, the writer will be deleted. If an + * error occurs during shutdown, an implementation should also call + * Error() to indicate what happened. + */ virtual void DoClose() = 0; /** - * Reader-specific method implementing the forced update trigger + * Reader-specific method implementing the forced update trigger. * - * A reader implementation must override this method but it can just ignore - * calls, if a forced update does not fit the input source or the current input - * reading mode. + * A reader implementation must override this method but it can just + * ignore calls if a forced update does not fit the input source or + * the current input reading mode. * - * If it returns false, it will be assumed that a fatal error has occured - * that prevents the reader from further operation; it will then be - * disabled and eventually deleted. When returning false, an implementation - * should also call Error to indicate what happened. + * If it returns false, it will be assumed that a fatal error has + * occured that prevents the reader from further operation; it will + * then be disabled and eventually deleted. When returning false, an + * implementation should also call Error to indicate what happened. */ virtual bool DoUpdate() = 0; - + /** * Returns the input source as passed into the constructor. */ const string Source() const { return source; } /** - * Method allowing a reader to send a specified bro event. - * Vals must match the values expected by the bro event. + * Method allowing a reader to send a specified Bro event. Vals must + * match the values expected by the bro event. * * @param name name of the bro event to send * @@ -147,30 +145,33 @@ protected: */ void SendEvent(const string& name, const int num_vals, threading::Value* *vals); - // Content-sending-functions (simple mode). Including table-specific stuff that - // simply is not used if we have no table + // Content-sending-functions (simple mode). Including table-specific + // stuff that simply is not used if we have no table. + /** - * Method allowing a reader to send a list of values read for a specific stream - * back to the manager. + * Method allowing a reader to send a list of values read from a + * specific stream back to the manager in simple mode. * - * If the stream is a table stream, the values are inserted into the table; - * if it is an event stream, the event is raised. + * If the stream is a table stream, the values are inserted into the + * table; if it is an event stream, the event is raised. * * @param val list of threading::Values expected by the stream */ void Put(threading::Value* *val); /** - * Method allowing a reader to delete a specific value from a bro table. + * Method allowing a reader to delete a specific value from a Bro + * table. * - * If the receiving stream is an event stream, only a removed event is raised + * If the receiving stream is an event stream, only a removed event + * is raised. * * @param val list of threading::Values expected by the stream */ void Delete(threading::Value* *val); /** - * Method allowing a reader to clear a value from a bro table. + * Method allowing a reader to clear a Bro table. * * If the receiving stream is an event stream, this is ignored. * @@ -178,26 +179,25 @@ protected: void Clear(); // Content-sending-functions (tracking mode): Only changed lines are propagated. - /** - * Method allowing a reader to send a list of values read for a specific stream - * back to the manager. + * Method allowing a reader to send a list of values read from + * specific stream back to the manager in tracking mode. * - * If the stream is a table stream, the values are inserted into the table; - * if it is an event stream, the event is raised. + * If the stream is a table stream, the values are inserted into the + * table; if it is an event stream, the event is raised. * * @param val list of threading::Values expected by the stream */ void SendEntry(threading::Value* *vals); /** - * Method telling the manager, that the current list of entries sent by SendEntry - * is finished. - * - * For table streams, all entries that were not updated since the last EndCurrentSend - * will be deleted, because they are no longer present in the input source + * Method telling the manager, that the current list of entries sent + * by SendEntry is finished. * + * For table streams, all entries that were not updated since the + * last EndCurrentSend will be deleted, because they are no longer + * present in the input source */ void EndCurrentSend(); @@ -207,14 +207,14 @@ protected: * This method can be overridden but once must call * ReaderBackend::DoHeartbeat(). */ - virtual bool DoHeartbeat(double network_time, double current_time); + virtual bool DoHeartbeat(double network_time, double current_time); /** * Utility function for Readers - convert a string into a TransportProto * * @param proto the transport protocol */ - TransportProto StringToProto(const string &proto); + TransportProto StringToProto(const string &proto); /** * Utility function for Readers - convert a string into a Value::addr_t @@ -224,20 +224,16 @@ protected: threading::Value::addr_t StringToAddr(const string &addr); private: - // Frontend that instantiated us. This object must not be access from - // this class, it's running in a different thread! - ReaderFrontend* frontend; + // Frontend that instantiated us. This object must not be accessed + // from this class, it's running in a different thread! + ReaderFrontend* frontend; string source; - + bool disabled; - // For implementing Fmt(). - char* buf; - unsigned int buf_len; - unsigned int num_fields; - const threading::Field* const * fields; // raw mapping + const threading::Field* const * fields; // raw mapping }; } diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index f61fd357b9..75bb7fec50 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -12,11 +12,15 @@ namespace input { class InitMessage : public threading::InputMessage { public: - InitMessage(ReaderBackend* backend, const string source, const int mode, const int num_fields, const threading::Field* const* fields) + InitMessage(ReaderBackend* backend, const string source, const int mode, + const int num_fields, const threading::Field* const* fields) : threading::InputMessage("Init", backend), source(source), mode(mode), num_fields(num_fields), fields(fields) { } - virtual bool Process() { return Object()->Init(source, mode, num_fields, fields); } + virtual bool Process() + { + return Object()->Init(source, mode, num_fields, fields); + } private: const string source; @@ -46,7 +50,7 @@ public: }; -ReaderFrontend::ReaderFrontend(bro_int_t type) +ReaderFrontend::ReaderFrontend(bro_int_t type) { disabled = initialized = false; ty_name = ""; @@ -56,12 +60,12 @@ ReaderFrontend::ReaderFrontend(bro_int_t type) backend->Start(); } -ReaderFrontend::~ReaderFrontend() +ReaderFrontend::~ReaderFrontend() { } -void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, - const threading::Field* const* fields) +void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, + const threading::Field* const* fields) { if ( disabled ) return; @@ -73,14 +77,14 @@ void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, initialized = true; backend->SendIn(new InitMessage(backend, arg_source, mode, num_fields, fields)); - } + } -void ReaderFrontend::Update() +void ReaderFrontend::Update() { - if ( disabled ) + if ( disabled ) return; - if ( !initialized ) + if ( ! initialized ) { reporter->Error("Tried to call update on uninitialized reader"); return; @@ -89,12 +93,12 @@ void ReaderFrontend::Update() backend->SendIn(new UpdateMessage(backend)); } -void ReaderFrontend::Close() +void ReaderFrontend::Close() { - if ( disabled ) + if ( disabled ) return; - - if ( !initialized ) + + if ( ! initialized ) { reporter->Error("Tried to call finish on uninitialized reader"); return; diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 88cf60804e..c18e22a064 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -29,14 +29,14 @@ public: * corresponding type. * * Frontends must only be instantiated by the main thread. - */ + */ ReaderFrontend(bro_int_t type); /** * Destructor. * * Frontends must only be destroyed by the main thread. - */ + */ virtual ~ReaderFrontend(); /** @@ -47,37 +47,39 @@ public: * sends a message back that will asynchronously call Disable(). * * See ReaderBackend::Init() for arguments. + * * This method must only be called from the main thread. - */ + */ void Init(string arg_source, int mode, const int arg_num_fields, const threading::Field* const* fields); /** - * Force an update of the current input source. Actual action depends on - * the opening mode and on the input source. + * Force an update of the current input source. Actual action depends + * on the opening mode and on the input source. * * This method generates a message to the backend reader and triggers * the corresponding message there. + * * This method must only be called from the main thread. */ void Update(); /** - * Finalizes writing to this tream. + * Finalizes reading from this stream. * * This method generates a message to the backend reader and triggers - * the corresponding message there. - * This method must only be called from the main thread. - */ + * the corresponding message there. This method must only be called + * from the main thread. + */ void Close(); /** * Disables the reader frontend. From now on, all method calls that * would normally send message over to the backend, turn into no-ops. - * Note though that it does not stop the backend itself, use Finsh() + * Note though that it does not stop the backend itself, use Finish() * to do that as well (this method is primarily for use as callback * when the backend wants to disable the frontend). * - * Disabled frontend will eventually be discarded by the + * Disabled frontends will eventually be discarded by the * input::Manager. * * This method must only be called from the main thread. @@ -85,9 +87,10 @@ public: void SetDisable() { disabled = true; } /** - * Returns true if the reader frontend has been disabled with SetDisable(). + * Returns true if the reader frontend has been disabled with + * SetDisable(). */ - bool Disabled() { return disabled; } + bool Disabled() { return disabled; } /** * Returns a descriptive name for the reader, including the type of @@ -101,18 +104,21 @@ protected: friend class Manager; /** - * Returns the source as passed into the constructor + * Returns the source as passed into the constructor. */ - const string Source() const { return source; }; + const string& Source() const { return source; }; - string ty_name; // Name of the backend type. Set by the manager. + /** + * Returns the name of the backend's type. + */ + const string& TypeName() const { return ty_name; } private: - ReaderBackend* backend; // The backend we have instanatiated. + ReaderBackend* backend; // The backend we have instanatiated. string source; + string ty_name; // Backend type, set by manager. bool disabled; // True if disabled. - bool initialized; // True if initialized. - + bool initialized; // True if initialized. }; } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 8223d6e201..157ea90916 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -20,8 +20,7 @@ using namespace input::reader; using threading::Value; using threading::Field; - -FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position) +FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position) : name(arg_name), type(arg_type) { position = arg_position; @@ -29,8 +28,8 @@ FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, int present = true; } -FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, - const TypeTag& arg_subtype, int arg_position) +FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, + const TypeTag& arg_subtype, int arg_position) : name(arg_name), type(arg_type), subtype(arg_subtype) { position = arg_position; @@ -38,14 +37,14 @@ FieldMapping::FieldMapping(const string& arg_name, const TypeTag& arg_type, present = true; } -FieldMapping::FieldMapping(const FieldMapping& arg) +FieldMapping::FieldMapping(const FieldMapping& arg) : name(arg.name), type(arg.type), subtype(arg.subtype), present(arg.present) { position = arg.position; secondary_position = arg.secondary_position; } -FieldMapping FieldMapping::subType() +FieldMapping FieldMapping::subType() { return FieldMapping(name, subtype, position); } @@ -54,23 +53,23 @@ Ascii::Ascii(ReaderFrontend *frontend) : ReaderBackend(frontend) { file = 0; - - separator.assign( (const char*) BifConst::InputAscii::separator->Bytes(), + separator.assign( (const char*) BifConst::InputAscii::separator->Bytes(), BifConst::InputAscii::separator->Len()); - if ( separator.size() != 1 ) + + if ( separator.size() != 1 ) Error("separator length has to be 1. Separator will be truncated."); set_separator.assign( (const char*) BifConst::InputAscii::set_separator->Bytes(), BifConst::InputAscii::set_separator->Len()); - if ( set_separator.size() != 1 ) + + if ( set_separator.size() != 1 ) Error("set_separator length has to be 1. Separator will be truncated."); - empty_field.assign( (const char*) BifConst::InputAscii::empty_field->Bytes(), + empty_field.assign( (const char*) BifConst::InputAscii::empty_field->Bytes(), BifConst::InputAscii::empty_field->Len()); - - unset_field.assign( (const char*) BifConst::InputAscii::unset_field->Bytes(), - BifConst::InputAscii::unset_field->Len()); + unset_field.assign( (const char*) BifConst::InputAscii::unset_field->Bytes(), + BifConst::InputAscii::unset_field->Len()); } Ascii::~Ascii() @@ -80,7 +79,7 @@ Ascii::~Ascii() void Ascii::DoClose() { - if ( file != 0 ) + if ( file != 0 ) { file->close(); delete(file); @@ -93,26 +92,26 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c fname = path; mode = arg_mode; mtime = 0; - + num_fields = arg_num_fields; fields = arg_fields; - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) + if ( (mode != MANUAL) && (mode != REREAD) && (mode != STREAM) ) { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); return false; - } + } file = new ifstream(path.c_str()); - if ( !file->is_open() ) + if ( ! file->is_open() ) { Error(Fmt("Init: cannot open %s", fname.c_str())); delete(file); file = 0; return false; } - - if ( ReadHeader(false) == false ) + + if ( ReadHeader(false) == false ) { Error(Fmt("Init: cannot open %s; headers are incorrect", fname.c_str())); file->close(); @@ -120,22 +119,22 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c file = 0; return false; } - + DoUpdate(); return true; } -bool Ascii::ReadHeader(bool useCached) +bool Ascii::ReadHeader(bool useCached) { // try to read the header line... string line; map ifields; - if ( !useCached ) + if ( ! useCached ) { - if ( !GetLine(line) ) + if ( ! GetLine(line) ) { Error("could not read first line"); return false; @@ -143,16 +142,17 @@ bool Ascii::ReadHeader(bool useCached) headerline = line; } - else + + else line = headerline; - + // construct list of field names. istringstream splitstream(line); int pos=0; - while ( splitstream ) + while ( splitstream ) { string s; - if ( !getline(splitstream, s, separator[0])) + if ( ! getline(splitstream, s, separator[0])) break; ifields[s] = pos; @@ -161,15 +161,15 @@ bool Ascii::ReadHeader(bool useCached) //printf("Updating fields from description %s\n", line.c_str()); columnMap.clear(); - - for ( unsigned int i = 0; i < num_fields; i++ ) + + for ( unsigned int i = 0; i < num_fields; i++ ) { const Field* field = fields[i]; - - map::iterator fit = ifields.find(field->name); - if ( fit == ifields.end() ) + + map::iterator fit = ifields.find(field->name); + if ( fit == ifields.end() ) { - if ( field->optional ) + if ( field->optional ) { // we do not really need this field. mark it as not present and always send an undef back. FieldMapping f(field->name, field->type, field->subtype, -1); @@ -178,38 +178,43 @@ bool Ascii::ReadHeader(bool useCached) continue; } - Error(Fmt("Did not find requested field %s in input data file %s.", field->name.c_str(), fname.c_str())); + Error(Fmt("Did not find requested field %s in input data file %s.", + field->name.c_str(), fname.c_str())); return false; } FieldMapping f(field->name, field->type, field->subtype, ifields[field->name]); - if ( field->secondary_name != "" ) + + if ( field->secondary_name != "" ) { - map::iterator fit2 = ifields.find(field->secondary_name); - if ( fit2 == ifields.end() ) + map::iterator fit2 = ifields.find(field->secondary_name); + if ( fit2 == ifields.end() ) { - Error(Fmt("Could not find requested port type field %s in input data file.", field->secondary_name.c_str())); + Error(Fmt("Could not find requested port type field %s in input data file.", + field->secondary_name.c_str())); return false; } + f.secondary_position = ifields[field->secondary_name]; } + columnMap.push_back(f); } - + // well, that seems to have worked... return true; } -bool Ascii::GetLine(string& str) +bool Ascii::GetLine(string& str) { while ( getline(*file, str) ) { - if ( str[0] != '#' ) + if ( str[0] != '#' ) return true; - if ( str.compare(0,8, "#fields\t") == 0 ) + if ( str.compare(0,8, "#fields\t") == 0 ) { str = str.substr(8); return true; @@ -220,14 +225,13 @@ bool Ascii::GetLine(string& str) } -Value* Ascii::EntryToVal(string s, FieldMapping field) +Value* Ascii::EntryToVal(string s, FieldMapping field) { - if ( s.compare(unset_field) == 0 ) // field is not set... return new Value(field.type, false); Value* val = new Value(field.type, true); - + switch ( field.type ) { case TYPE_ENUM: case TYPE_STRING: @@ -235,13 +239,14 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) break; case TYPE_BOOL: - if ( s == "T" ) + if ( s == "T" ) val->val.int_val = 1; - else if ( s == "F" ) + else if ( s == "F" ) val->val.int_val = 0; - else + else { - Error(Fmt("Field: %s Invalid value for boolean: %s", field.name.c_str(), s.c_str())); + Error(Fmt("Field: %s Invalid value for boolean: %s", + field.name.c_str(), s.c_str())); return false; } break; @@ -266,13 +271,15 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) val->val.port_val.proto = TRANSPORT_UNKNOWN; break; - case TYPE_SUBNET: + case TYPE_SUBNET: { size_t pos = s.find("/"); - if ( pos == s.npos ) { + if ( pos == s.npos ) + { Error(Fmt("Invalid value for subnet: %s", s.c_str())); return false; - } + } + int width = atoi(s.substr(pos+1).c_str()); string addr = s.substr(0, pos); @@ -281,7 +288,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) break; } - case TYPE_ADDR: + case TYPE_ADDR: val->val.addr_val = StringToAddr(s); break; @@ -295,42 +302,42 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) // how many entries do we have... unsigned int length = 1; for ( unsigned int i = 0; i < s.size(); i++ ) - if ( s[i] == ',') length++; + if ( s[i] == ',' ) length++; unsigned int pos = 0; - - if ( s.compare(empty_field) == 0 ) + + if ( s.compare(empty_field) == 0 ) length = 0; Value** lvals = new Value* [length]; - if ( field.type == TYPE_TABLE ) + if ( field.type == TYPE_TABLE ) { val->val.set_val.vals = lvals; val->val.set_val.size = length; } - else if ( field.type == TYPE_VECTOR ) + + else if ( field.type == TYPE_VECTOR ) { val->val.vector_val.vals = lvals; val->val.vector_val.size = length; - } - else - { - assert(false); } + else + assert(false); + if ( length == 0 ) break; //empty istringstream splitstream(s); - while ( splitstream ) + while ( splitstream ) { string element; - if ( !getline(splitstream, element, set_separator[0]) ) + if ( ! getline(splitstream, element, set_separator[0]) ) break; - if ( pos >= length ) + if ( pos >= length ) { Error(Fmt("Internal error while parsing set. pos %d >= length %d." " Element: %s", pos, length, element.c_str())); @@ -338,18 +345,18 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) } Value* newval = EntryToVal(element, field.subType()); - if ( newval == 0 ) + if ( newval == 0 ) { Error("Error while reading set"); return 0; } + lvals[pos] = newval; pos++; } - - if ( pos != length ) + if ( pos != length ) { Error("Internal error while parsing set: did not find all elements"); return 0; @@ -358,24 +365,24 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) break; } - default: Error(Fmt("unsupported field format %d for %s", field.type, field.name.c_str())); return 0; - } + } return val; } // read the entire file and send appropriate thingies back to InputMgr -bool Ascii::DoUpdate() +bool Ascii::DoUpdate() { switch ( mode ) { case REREAD: + { // check if the file has changed struct stat sb; - if ( stat(fname.c_str(), &sb) == -1 ) + if ( stat(fname.c_str(), &sb) == -1 ) { Error(Fmt("Could not get stat for %s", fname.c_str())); return false; @@ -388,54 +395,58 @@ bool Ascii::DoUpdate() // file changed. reread. // fallthrough + } + case MANUAL: case STREAM: - - // dirty, fix me. (well, apparently after trying seeking, etc + { + // dirty, fix me. (well, apparently after trying seeking, etc // - this is not that bad) - if ( file && file->is_open() ) + if ( file && file->is_open() ) { - if ( mode == STREAM ) + if ( mode == STREAM ) { file->clear(); // remove end of file evil bits - if ( !ReadHeader(true) ) + if ( !ReadHeader(true) ) return false; // header reading failed break; } file->close(); } + file = new ifstream(fname.c_str()); - if ( !file->is_open() ) + if ( !file->is_open() ) { Error(Fmt("cannot open %s", fname.c_str())); return false; } - - if ( ReadHeader(false) == false ) + if ( ReadHeader(false) == false ) { return false; } break; + } + default: assert(false); } string line; - while ( GetLine(line ) ) + while ( GetLine(line ) ) { // split on tabs istringstream splitstream(line); map stringfields; int pos = 0; - while ( splitstream ) + while ( splitstream ) { string s; - if ( !getline(splitstream, s, separator[0]) ) + if ( ! getline(splitstream, s, separator[0]) ) break; stringfields[pos] = s; @@ -444,7 +455,6 @@ bool Ascii::DoUpdate() pos--; // for easy comparisons of max element. - Value** fields = new Value*[num_fields]; int fpos = 0; @@ -453,33 +463,34 @@ bool Ascii::DoUpdate() fit++ ) { - if ( ! fit->present ) + if ( ! fit->present ) { // add non-present field fields[fpos] = new Value((*fit).type, false); fpos++; continue; } - + assert(fit->position >= 0 ); - if ( (*fit).position > pos || (*fit).secondary_position > pos ) + if ( (*fit).position > pos || (*fit).secondary_position > pos ) { - Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", line.c_str(), pos, (*fit).position, (*fit).secondary_position)); + Error(Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", + line.c_str(), pos, (*fit).position, (*fit).secondary_position)); return false; } Value* val = EntryToVal(stringfields[(*fit).position], *fit); - if ( val == 0 ) + if ( val == 0 ) { Error("Could not convert String value to Val"); return false; } - - if ( (*fit).secondary_position != -1 ) + + if ( (*fit).secondary_position != -1 ) { // we have a port definition :) - assert(val->type == TYPE_PORT ); + assert(val->type == TYPE_PORT ); // Error(Fmt("Got type %d != PORT with secondary position!", val->type)); val->val.port_val.proto = StringToProto(stringfields[(*fit).secondary_position]); @@ -493,31 +504,33 @@ bool Ascii::DoUpdate() //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); assert ( (unsigned int) fpos == num_fields ); - if ( mode == STREAM ) + if ( mode == STREAM ) Put(fields); else SendEntry(fields); } - if ( mode != STREAM ) + if ( mode != STREAM ) EndCurrentSend(); - + return true; } bool Ascii::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - + switch ( mode ) { case MANUAL: // yay, we do nothing :) break; + case REREAD: case STREAM: - Update(); // call update and not DoUpdate, because update + Update(); // call update and not DoUpdate, because update // checks disabled. break; + default: assert(false); } diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index e5f3070724..e5540c5467 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -14,73 +14,57 @@ namespace input { namespace reader { struct FieldMapping { string name; TypeTag type; - // internal type for sets and vectors - TypeTag subtype; + TypeTag subtype; // internal type for sets and vectors int position; - // for ports: pos of the second field - int secondary_position; + int secondary_position; // for ports: pos of the second field bool present; - FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); - FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position); + FieldMapping(const string& arg_name, const TypeTag& arg_type, int arg_position); + FieldMapping(const string& arg_name, const TypeTag& arg_type, const TypeTag& arg_subtype, int arg_position); FieldMapping(const FieldMapping& arg); FieldMapping() { position = -1; secondary_position = -1; } FieldMapping subType(); - //bool IsEmpty() { return position == -1; } }; - class Ascii : public ReaderBackend { public: - Ascii(ReaderFrontend* frontend); - ~Ascii(); - - static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Ascii(frontend); } - + Ascii(ReaderFrontend* frontend); + ~Ascii(); + + static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Ascii(frontend); } + protected: - virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); - virtual void DoClose(); - virtual bool DoUpdate(); private: - virtual bool DoHeartbeat(double network_time, double current_time); - unsigned int num_fields; - - const threading::Field* const * fields; // raw mapping - - // map columns in the file to columns to send back to the manager - vector columnMap; - bool ReadHeader(bool useCached); + bool GetLine(string& str); threading::Value* EntryToVal(string s, FieldMapping type); - bool GetLine(string& str); - + unsigned int num_fields; + const threading::Field* const *fields; // raw mapping + ifstream* file; string fname; + int mode; + time_t mtime; - // Options set from the script-level. - string separator; + // map columns in the file to columns to send back to the manager + vector columnMap; - string set_separator; - - string empty_field; - - string unset_field; - // keep a copy of the headerline to determine field locations when stream descriptions change string headerline; - int mode; - - time_t mtime; - + // Options set from the script-level. + string separator; + string set_separator; + string empty_field; + string unset_field; }; diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 29f0070fec..c6cc1649eb 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -23,15 +23,14 @@ using threading::Field; Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) { - multiplication_factor = double(BifConst::InputBenchmark::factor); - autospread = double(BifConst::InputBenchmark::autospread); + multiplication_factor = double(BifConst::InputBenchmark::factor); + autospread = double(BifConst::InputBenchmark::autospread); spread = int(BifConst::InputBenchmark::spread); add = int(BifConst::InputBenchmark::addfactor); autospread_time = 0; stopspreadat = int(BifConst::InputBenchmark::stopspreadat); timedspread = double(BifConst::InputBenchmark::timedspread); heart_beat_interval = double(BifConst::Threading::heart_beat_interval); - } Benchmark::~Benchmark() @@ -46,15 +45,15 @@ void Benchmark::DoClose() bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) { mode = arg_mode; - + num_fields = arg_num_fields; fields = arg_fields; num_lines = atoi(path.c_str()); - + if ( autospread != 0.0 ) autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) + if ( (mode != MANUAL) && (mode != REREAD) && (mode != STREAM) ) { Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); return false; @@ -66,7 +65,7 @@ bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Fiel return true; } -string Benchmark::RandomString(const int len) +string Benchmark::RandomString(const int len) { string s(len, ' '); @@ -75,13 +74,13 @@ string Benchmark::RandomString(const int len) "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"; - for (int i = 0; i < len; ++i) - s[i] = values[rand() / (RAND_MAX / sizeof(values))]; + for (int i = 0; i < len; ++i) + s[i] = values[rand() / (RAND_MAX / sizeof(values))]; return s; } -double Benchmark::CurrTime() +double Benchmark::CurrTime() { struct timeval tv; assert ( gettimeofday(&tv, 0) >= 0 ); @@ -91,56 +90,57 @@ double Benchmark::CurrTime() // read the entire file and send appropriate thingies back to InputMgr -bool Benchmark::DoUpdate() +bool Benchmark::DoUpdate() { - int linestosend = num_lines * heart_beat_interval; - for ( int i = 0; i < linestosend; i++ ) + int linestosend = num_lines * heart_beat_interval; + for ( int i = 0; i < linestosend; i++ ) { Value** field = new Value*[num_fields]; - for (unsigned int j = 0; j < num_fields; j++ ) + for (unsigned int j = 0; j < num_fields; j++ ) field[j] = EntryToVal(fields[j]->type, fields[j]->subtype); - if ( mode == STREAM ) + if ( mode == STREAM ) // do not do tracking, spread out elements over the second that we have... Put(field); - else + else SendEntry(field); - - if ( stopspreadat == 0 || num_lines < stopspreadat ) + + if ( stopspreadat == 0 || num_lines < stopspreadat ) { - if ( spread != 0 ) + if ( spread != 0 ) usleep(spread); - if ( autospread_time != 0 ) + if ( autospread_time != 0 ) usleep( autospread_time ); } - if ( timedspread != 0.0 ) + if ( timedspread != 0.0 ) { double diff; - do + do diff = CurrTime() - heartbeatstarttime; - while ( diff/heart_beat_interval < i/(linestosend + while ( diff/heart_beat_interval < i/(linestosend + (linestosend * timedspread) ) ); } } - if ( mode != STREAM ) + if ( mode != STREAM ) EndCurrentSend(); return true; } -threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) +threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) { Value* val = new Value(type, true); // basically construct something random from the fields that we want. - + switch ( type ) { case TYPE_ENUM: assert(false); // no enums, please. + case TYPE_STRING: val->val.string_val = new string(RandomString(10)); break; @@ -172,14 +172,14 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) val->val.port_val.proto = TRANSPORT_UNKNOWN; break; - case TYPE_SUBNET: + case TYPE_SUBNET: { val->val.subnet_val.prefix = StringToAddr("192.168.17.1"); val->val.subnet_val.length = 16; } break; - case TYPE_ADDR: + case TYPE_ADDR: val->val.addr_val = StringToAddr("192.168.17.1"); break; @@ -195,26 +195,26 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) Value** lvals = new Value* [length]; - if ( type == TYPE_TABLE ) + if ( type == TYPE_TABLE ) { val->val.set_val.vals = lvals; val->val.set_val.size = length; - } - else if ( type == TYPE_VECTOR ) + } + else if ( type == TYPE_VECTOR ) { val->val.vector_val.vals = lvals; val->val.vector_val.size = length; - } - else + } + else assert(false); if ( length == 0 ) break; //empty - for ( unsigned int pos = 0; pos < length; pos++ ) + for ( unsigned int pos = 0; pos < length; pos++ ) { Value* newval = EntryToVal(subtype, TYPE_ENUM); - if ( newval == 0 ) + if ( newval == 0 ) { Error("Error while reading set"); return 0; @@ -229,7 +229,7 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) default: Error(Fmt("unsupported field format %d", type)); return 0; - } + } return val; @@ -247,9 +247,10 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) case MANUAL: // yay, we do nothing :) break; + case REREAD: case STREAM: - if ( multiplication_factor != 1 || add != 0 ) + if ( multiplication_factor != 1 || add != 0 ) { // we have to document at what time we changed the factor to what value. Value** v = new Value*[2]; @@ -261,10 +262,10 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) SendEvent("lines_changed", 2, v); } - if ( autospread != 0.0 ) + if ( autospread != 0.0 ) // because executing this in every loop is apparently too expensive. autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); - + Update(); // call update and not DoUpdate, because update actually checks disabled. SendEvent("HeartbeatDone", 0, 0); @@ -275,4 +276,3 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) return true; } - diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index b791dabe21..ec14dc6567 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -3,41 +3,37 @@ #ifndef INPUT_READERS_BENCHMARK_H #define INPUT_READERS_BENCHMARK_H - #include "../ReaderBackend.h" namespace input { namespace reader { +/** + * A benchmark reader to measure performance of the input framework. + */ class Benchmark : public ReaderBackend { public: - Benchmark(ReaderFrontend* frontend); - ~Benchmark(); - - static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Benchmark(frontend); } - + Benchmark(ReaderFrontend* frontend); + ~Benchmark(); + + static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Benchmark(frontend); } + protected: - virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); - virtual void DoClose(); - virtual bool DoUpdate(); private: - virtual bool DoHeartbeat(double network_time, double current_time); - unsigned int num_fields; - double CurrTime(); - - const threading::Field* const * fields; // raw mapping - + string RandomString(const int len); threading::Value* EntryToVal(TypeTag Type, TypeTag subtype); + unsigned int num_fields; + const threading::Field* const * fields; // raw mapping + int mode; int num_lines; - double multiplication_factor; int spread; double autospread; @@ -47,9 +43,6 @@ private: double heartbeatstarttime; double timedspread; double heart_beat_interval; - - string RandomString(const int len); - }; diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index ce0b4f8a5f..6538da070b 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -28,8 +28,10 @@ Raw::Raw(ReaderFrontend *frontend) : ReaderBackend(frontend) file = 0; in = 0; - separator.assign( (const char*) BifConst::InputRaw::record_separator->Bytes(), BifConst::InputRaw::record_separator->Len()); - if ( separator.size() != 1 ) + separator.assign( (const char*) BifConst::InputRaw::record_separator->Bytes(), + BifConst::InputRaw::record_separator->Len()); + + if ( separator.size() != 1 ) Error("separator length has to be 1. Separator will be truncated."); } @@ -40,57 +42,56 @@ Raw::~Raw() void Raw::DoClose() { - if ( file != 0 ) + if ( file != 0 ) { Close(); } } -bool Raw::Open() +bool Raw::Open() { - if ( execute ) + if ( execute ) { file = popen(fname.c_str(), "r"); - if ( file == NULL ) + if ( file == NULL ) { Error(Fmt("Could not execute command %s", fname.c_str())); return false; } } - else + else { file = fopen(fname.c_str(), "r"); - if ( file == NULL ) + if ( file == NULL ) { Error(Fmt("Init: cannot open %s", fname.c_str())); return false; } } - + + // This is defined in input/fdstream.h in = new boost::fdistream(fileno(file)); - if ( execute && mode == STREAM ) - { + if ( execute && mode == STREAM ) fcntl(fileno(file), F_SETFL, O_NONBLOCK); - } return true; } bool Raw::Close() { - if ( file == NULL ) + if ( file == NULL ) { InternalError(Fmt("Trying to close closed file for stream %s", fname.c_str())); return false; } - if ( execute ) + if ( execute ) { delete(in); pclose(file); - } - else + } + else { delete(in); fclose(file); @@ -114,13 +115,13 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con num_fields = arg_num_fields; fields = arg_fields; - if ( path.length() == 0 ) + if ( path.length() == 0 ) { Error("No source path provided"); return false; } - - if ( arg_num_fields != 1 ) + + if ( arg_num_fields != 1 ) { Error("Filter for raw reader contains more than one field. " "Filters for the raw reader may only contain exactly one string field. " @@ -128,7 +129,7 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con return false; } - if ( fields[0]->type != TYPE_STRING ) + if ( fields[0]->type != TYPE_STRING ) { Error("Filter for raw reader contains a field that is not of type string."); return false; @@ -136,30 +137,32 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con // do Initialization char last = path[path.length()-1]; - if ( last == '|' ) + if ( last == '|' ) { execute = true; fname = path.substr(0, fname.length() - 1); - if ( ( mode != MANUAL ) && ( mode != STREAM ) ) { - Error(Fmt("Unsupported read mode %d for source %s in execution mode", mode, fname.c_str())); + if ( (mode != MANUAL) && (mode != STREAM) ) { + Error(Fmt("Unsupported read mode %d for source %s in execution mode", + mode, fname.c_str())); return false; - } - + } + result = Open(); } else { execute = false; - if ( ( mode != MANUAL ) && (mode != REREAD) && ( mode != STREAM ) ) + if ( (mode != MANUAL) && (mode != REREAD) && (mode != STREAM) ) { - Error(Fmt("Unsupported read mode %d for source %s", mode, fname.c_str())); + Error(Fmt("Unsupported read mode %d for source %s", + mode, fname.c_str())); return false; } - result = Open(); + result = Open(); } - if ( result == false ) + if ( result == false ) return result; #ifdef DEBUG @@ -176,80 +179,78 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con } -bool Raw::GetLine(string& str) +bool Raw::GetLine(string& str) { - if ( in->peek() == std::iostream::traits_type::eof() ) + if ( in->peek() == std::iostream::traits_type::eof() ) return false; - if ( in->eofbit == true || in->failbit == true ) + if ( in->eofbit == true || in->failbit == true ) return false; - while ( getline(*in, str, separator[0]) ) - return true; - - return false; + return getline(*in, str, separator[0]); } - // read the entire file and send appropriate thingies back to InputMgr -bool Raw::DoUpdate() +bool Raw::DoUpdate() { - if ( firstrun ) + if ( firstrun ) firstrun = false; + else { switch ( mode ) { - case REREAD: + case REREAD: + { + // check if the file has changed + struct stat sb; + if ( stat(fname.c_str(), &sb) == -1 ) { - // check if the file has changed - struct stat sb; - if ( stat(fname.c_str(), &sb) == -1 ) - { - Error(Fmt("Could not get stat for %s", fname.c_str())); - return false; - } - - if ( sb.st_mtime <= mtime ) - // no change - return true; - - mtime = sb.st_mtime; - // file changed. reread. - - // fallthrough + Error(Fmt("Could not get stat for %s", fname.c_str())); + return false; } - case MANUAL: - case STREAM: - if ( mode == STREAM && file != NULL && in != NULL ) - { - //fpurge(file); - in->clear(); // remove end of file evil bits - break; - } - Close(); - if ( !Open() ) - return false; + if ( sb.st_mtime <= mtime ) + // no change + return true; + mtime = sb.st_mtime; + // file changed. reread. + // + // fallthrough + } + + case MANUAL: + case STREAM: + if ( mode == STREAM && file != NULL && in != NULL ) + { + //fpurge(file); + in->clear(); // remove end of file evil bits break; - default: - assert(false); + } + Close(); + if ( ! Open() ) + return false; + + break; + + default: + assert(false); } } string line; - while ( GetLine(line) ) + while ( GetLine(line) ) { assert (num_fields == 1); - + Value** fields = new Value*[1]; // filter has exactly one text field. convert to it. Value* val = new Value(TYPE_STRING, true); val->val.string_val = new string(line); fields[0] = val; - + Put(fields); } @@ -260,7 +261,6 @@ bool Raw::DoUpdate() return true; } - bool Raw::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); @@ -269,10 +269,11 @@ bool Raw::DoHeartbeat(double network_time, double current_time) case MANUAL: // yay, we do nothing :) break; + case REREAD: case STREAM: - Update(); // call update and not DoUpdate, because update - // checks disabled. + Update(); // call update and not DoUpdate, because update + // checks disabled. break; default: assert(false); diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index 9f575bb89c..3fa09309b0 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -10,51 +10,44 @@ namespace input { namespace reader { +/** + * A reader that returns a file (or the output of a command) as a single + * blob. + */ class Raw : public ReaderBackend { public: - Raw(ReaderFrontend* frontend); - ~Raw(); - - static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } - + Raw(ReaderFrontend* frontend); + ~Raw(); + + static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } + protected: - virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); - virtual void DoClose(); - virtual bool DoUpdate(); private: - virtual bool DoHeartbeat(double network_time, double current_time); + bool Open(); bool Close(); - bool GetLine(string& str); - + + unsigned int num_fields; + const threading::Field* const * fields; // raw mapping + istream* in; - FILE* file; - string fname; - - // Options set from the script-level. - string separator; - int mode; bool execute; bool firstrun; - time_t mtime; - - unsigned int num_fields; - - const threading::Field* const * fields; // raw mapping + // Options set from the script-level. + string separator; }; - } } diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out index 8611b35dd3..a38f3fce84 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out @@ -6,4 +6,4 @@ print outfile, s; close(outfile); }] Input::EVENT_NEW - 8 ../input.log +8 ../input.log diff --git a/testing/btest/scripts/base/frameworks/input/executeraw.bro b/testing/btest/scripts/base/frameworks/input/executeraw.bro index 6d07a9bf29..6df28d08ea 100644 --- a/testing/btest/scripts/base/frameworks/input/executeraw.bro +++ b/testing/btest/scripts/base/frameworks/input/executeraw.bro @@ -1,6 +1,7 @@ # # @TEST-EXEC: btest-bg-run bro bro -b %INPUT # @TEST-EXEC: btest-bg-wait -k 1 +# @TEST-EXEC: cat out.tmp | sed 's/^ *//g' >out # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -31,7 +32,7 @@ event line(description: Input::EventDescription, tpe: Input::Event, s: string) { event bro_init() { - outfile = open ("../out"); + outfile = open ("../out.tmp"); Input::add_event([$source="wc -l ../input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line]); Input::remove("input"); } From f4864c69af58c153700924774092b85c7405eaa9 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 29 May 2012 09:21:16 -0700 Subject: [PATCH 323/651] fix another memory lead (when updating tables). Adjust twotables testcase - now it is faster. Shorten the output -- because of threading, the results did not always come out in the same order (it depends on which thread manages to sneak in the results into the queue earlier). --- src/input/Manager.cc | 3 +- .../out | 231 ++---------------- .../base/frameworks/input/twotables.bro | 21 +- 3 files changed, 41 insertions(+), 214 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 6cae5e2f34..56d7d82ce6 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1123,6 +1123,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) val = stream->tab->Lookup(idx); assert(val != 0); predidx = ListValToRecordVal(idx, stream->itype, &startpos); + Unref(idx); ev = new EnumVal(BifEnum::Input::EVENT_REMOVED, BifType::Enum::Input::Event); } @@ -1594,7 +1595,7 @@ RecordVal* Manager::ListValToRecordVal(ListVal* list, RecordType *request_type, (*position)++; } - rec->Assign(i, fieldVal); + rec->Assign(i, fieldVal->Ref()); } return rec; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out index 41d9438da0..e9e03add3a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out @@ -29,68 +29,6 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ -Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ -[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}] -Type -Input::EVENT_NEW -Left -[i=-42] -Right -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] ==========SERVERS============ { [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ @@ -119,68 +57,6 @@ BB }, vc=[10, 20, 30], ve=[]] } ============EVENT============ -Description -[source=../input2.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh2, destination={ -[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE 2============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}] -Type -Input::EVENT_NEW -Left -[i=-43] -Right -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] ==========SERVERS============ { [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ @@ -239,87 +115,7 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ -Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ -[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}] -Type -Input::EVENT_NEW -Left -[i=-44] -Right -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] ============EVENT============ -Description -Input::EVENT_REMOVED -Type -[i=-42] -Left -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -Right ==========SERVERS============ { [-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ @@ -347,3 +143,30 @@ BB }, vc=[10, 20, 30], ve=[]] } +done +{ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} diff --git a/testing/btest/scripts/base/frameworks/input/twotables.bro b/testing/btest/scripts/base/frameworks/input/twotables.bro index 6f18e0e939..1413275e63 100644 --- a/testing/btest/scripts/base/frameworks/input/twotables.bro +++ b/testing/btest/scripts/base/frameworks/input/twotables.bro @@ -64,14 +64,14 @@ global try: count; event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { print outfile, "============EVENT============"; - print outfile, "Description"; - print outfile, description; - print outfile, "Type"; - print outfile, tpe; - print outfile, "Left"; - print outfile, left; - print outfile, "Right"; - print outfile, right; +# print outfile, "Description"; +# print outfile, description; +# print outfile, "Type"; +# print outfile, tpe; +# print outfile, "Left"; +# print outfile, left; +# print outfile, "Right"; +# print outfile, right; } event bro_init() @@ -105,9 +105,12 @@ event Input::update_finished(name: string, source: string) { print outfile, servers; try = try + 1; - if ( try == 5 ) { + if ( try == 3 ) { print outfile, "done"; + print outfile, servers; close(outfile); Input::remove("input"); + Input::remove("input2"); + terminate(); } } From 1416d5404dca9d0e54c5c270176bb9a4ee56db3f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 29 May 2012 10:35:56 -0700 Subject: [PATCH 324/651] and another small memory leak when using streaming reads. --- src/input/Manager.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 56d7d82ce6..9bf885072b 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -836,7 +836,6 @@ Val* Manager::ValueToIndexVal(int num_fields, const RecordType *type, const Valu idxval = ValueToVal(vals[0], type->FieldType(0)); position = 1; } - else { ListVal *l = new ListVal(TYPE_ANY); @@ -1283,7 +1282,6 @@ int Manager::PutTable(Stream* i, const Value* const *vals) else if ( stream->num_val_fields == 1 && stream->want_record == 0 ) valval = ValueToVal(vals[position], stream->rtype->FieldType(0)); - else valval = ValueToRecordVal(vals, stream->rtype, &position); @@ -1377,6 +1375,8 @@ int Manager::PutTable(Stream* i, const Value* const *vals) else // no predicates or other stuff stream->tab->Assign(idxval, valval); + Unref(idxval); // not consumed by assign + return stream->num_idx_fields + stream->num_val_fields; } From 15689ce005d8c7c4428df62d3ae89d2c90f5609f Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 29 May 2012 14:04:36 -0500 Subject: [PATCH 325/651] Add more tests for previously-untested BIFs --- .../Baseline/bifs.bytestring_to_hexstr/out | 3 ++ .../Baseline/bifs.convert_for_pattern/out | 3 ++ testing/btest/Baseline/bifs.file_mode/out | 10 ++++++ testing/btest/Baseline/bifs.is_port/out | 9 +++++ testing/btest/Baseline/bifs.merge_pattern/out | 2 ++ .../btest/Baseline/bifs.parse_dotted_addr/out | 2 ++ testing/btest/Baseline/bifs.parse_ftp/out | 5 +++ .../Baseline/bifs.raw_bytes_to_v4_addr/out | 2 ++ .../btest/Baseline/bifs.string_to_pattern/out | 6 ++++ testing/btest/bifs/bytestring_to_hexstr.bro | 10 ++++++ testing/btest/bifs/convert_for_pattern.bro | 10 ++++++ testing/btest/bifs/file_mode.bro | 36 +++++++++++++++++++ testing/btest/bifs/is_port.bro | 22 ++++++++++++ testing/btest/bifs/merge_pattern.bro | 17 +++++++++ testing/btest/bifs/parse_dotted_addr.bro | 9 +++++ testing/btest/bifs/parse_ftp.bro | 15 ++++++++ testing/btest/bifs/raw_bytes_to_v4_addr.bro | 9 +++++ testing/btest/bifs/string_to_pattern.bro | 14 ++++++++ 18 files changed, 184 insertions(+) create mode 100644 testing/btest/Baseline/bifs.bytestring_to_hexstr/out create mode 100644 testing/btest/Baseline/bifs.convert_for_pattern/out create mode 100644 testing/btest/Baseline/bifs.file_mode/out create mode 100644 testing/btest/Baseline/bifs.is_port/out create mode 100644 testing/btest/Baseline/bifs.merge_pattern/out create mode 100644 testing/btest/Baseline/bifs.parse_dotted_addr/out create mode 100644 testing/btest/Baseline/bifs.parse_ftp/out create mode 100644 testing/btest/Baseline/bifs.raw_bytes_to_v4_addr/out create mode 100644 testing/btest/Baseline/bifs.string_to_pattern/out create mode 100644 testing/btest/bifs/bytestring_to_hexstr.bro create mode 100644 testing/btest/bifs/convert_for_pattern.bro create mode 100644 testing/btest/bifs/file_mode.bro create mode 100644 testing/btest/bifs/is_port.bro create mode 100644 testing/btest/bifs/merge_pattern.bro create mode 100644 testing/btest/bifs/parse_dotted_addr.bro create mode 100644 testing/btest/bifs/parse_ftp.bro create mode 100644 testing/btest/bifs/raw_bytes_to_v4_addr.bro create mode 100644 testing/btest/bifs/string_to_pattern.bro diff --git a/testing/btest/Baseline/bifs.bytestring_to_hexstr/out b/testing/btest/Baseline/bifs.bytestring_to_hexstr/out new file mode 100644 index 0000000000..241fa43ec3 --- /dev/null +++ b/testing/btest/Baseline/bifs.bytestring_to_hexstr/out @@ -0,0 +1,3 @@ +3034 + +00 diff --git a/testing/btest/Baseline/bifs.convert_for_pattern/out b/testing/btest/Baseline/bifs.convert_for_pattern/out new file mode 100644 index 0000000000..0de79c0927 --- /dev/null +++ b/testing/btest/Baseline/bifs.convert_for_pattern/out @@ -0,0 +1,3 @@ +foo + +b\[a\-z\]\+ diff --git a/testing/btest/Baseline/bifs.file_mode/out b/testing/btest/Baseline/bifs.file_mode/out new file mode 100644 index 0000000000..0c7b672b5b --- /dev/null +++ b/testing/btest/Baseline/bifs.file_mode/out @@ -0,0 +1,10 @@ +rw-r--r-- +rwxrwxrwx +rwxrwxrwt +rwxr-x--T +rwsr-xr-x +r-S------ +rwxr-sr-x +r--r-S--- +--xr-xrwx +--------- diff --git a/testing/btest/Baseline/bifs.is_port/out b/testing/btest/Baseline/bifs.is_port/out new file mode 100644 index 0000000000..0a7c80fc6e --- /dev/null +++ b/testing/btest/Baseline/bifs.is_port/out @@ -0,0 +1,9 @@ +T +F +F +F +T +F +F +F +T diff --git a/testing/btest/Baseline/bifs.merge_pattern/out b/testing/btest/Baseline/bifs.merge_pattern/out new file mode 100644 index 0000000000..fe8ebc3c01 --- /dev/null +++ b/testing/btest/Baseline/bifs.merge_pattern/out @@ -0,0 +1,2 @@ +match +match diff --git a/testing/btest/Baseline/bifs.parse_dotted_addr/out b/testing/btest/Baseline/bifs.parse_dotted_addr/out new file mode 100644 index 0000000000..1a09fd45a5 --- /dev/null +++ b/testing/btest/Baseline/bifs.parse_dotted_addr/out @@ -0,0 +1,2 @@ +192.168.0.2 +1234::1 diff --git a/testing/btest/Baseline/bifs.parse_ftp/out b/testing/btest/Baseline/bifs.parse_ftp/out new file mode 100644 index 0000000000..c080d56bdf --- /dev/null +++ b/testing/btest/Baseline/bifs.parse_ftp/out @@ -0,0 +1,5 @@ +[h=192.168.0.2, p=257/tcp, valid=T] +[h=192.168.0.2, p=257/tcp, valid=T] +[h=fe80::12, p=1234/tcp, valid=T] +[h=192.168.0.2, p=257/tcp, valid=T] +[h=::, p=1234/tcp, valid=T] diff --git a/testing/btest/Baseline/bifs.raw_bytes_to_v4_addr/out b/testing/btest/Baseline/bifs.raw_bytes_to_v4_addr/out new file mode 100644 index 0000000000..e0424e0e07 --- /dev/null +++ b/testing/btest/Baseline/bifs.raw_bytes_to_v4_addr/out @@ -0,0 +1,2 @@ +65.66.67.68 +0.0.0.0 diff --git a/testing/btest/Baseline/bifs.string_to_pattern/out b/testing/btest/Baseline/bifs.string_to_pattern/out new file mode 100644 index 0000000000..2492fbade2 --- /dev/null +++ b/testing/btest/Baseline/bifs.string_to_pattern/out @@ -0,0 +1,6 @@ +/^?(foo)$?/ +/^?()$?/ +/^?(b[a-z]+)$?/ +/^?(foo)$?/ +/^?()$?/ +/^?(b\[a\-z\]\+)$?/ diff --git a/testing/btest/bifs/bytestring_to_hexstr.bro b/testing/btest/bifs/bytestring_to_hexstr.bro new file mode 100644 index 0000000000..976a4ccf71 --- /dev/null +++ b/testing/btest/bifs/bytestring_to_hexstr.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print bytestring_to_hexstr("04"); + print bytestring_to_hexstr(""); + print bytestring_to_hexstr("\0"); + } diff --git a/testing/btest/bifs/convert_for_pattern.bro b/testing/btest/bifs/convert_for_pattern.bro new file mode 100644 index 0000000000..11533cd49b --- /dev/null +++ b/testing/btest/bifs/convert_for_pattern.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print convert_for_pattern("foo"); + print convert_for_pattern(""); + print convert_for_pattern("b[a-z]+"); + } diff --git a/testing/btest/bifs/file_mode.bro b/testing/btest/bifs/file_mode.bro new file mode 100644 index 0000000000..c63a2fa188 --- /dev/null +++ b/testing/btest/bifs/file_mode.bro @@ -0,0 +1,36 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 420; # octal: 0644 + print file_mode(a); + + a = 511; # octal: 0777 + print file_mode(a); + + a = 1023; # octal: 01777 + print file_mode(a); + + a = 1000; # octal: 01750 + print file_mode(a); + + a = 2541; # octal: 04755 + print file_mode(a); + + a = 2304; # octal: 04400 + print file_mode(a); + + a = 1517; # octal: 02755 + print file_mode(a); + + a = 1312; # octal: 02440 + print file_mode(a); + + a = 111; # octal: 0157 + print file_mode(a); + + a = 0; + print file_mode(a); + } diff --git a/testing/btest/bifs/is_port.bro b/testing/btest/bifs/is_port.bro new file mode 100644 index 0000000000..fe2c3f7c35 --- /dev/null +++ b/testing/btest/bifs/is_port.bro @@ -0,0 +1,22 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 123/tcp; + local b = 123/udp; + local c = 123/icmp; + + print is_tcp_port(a); + print is_tcp_port(b); + print is_tcp_port(c); + + print is_udp_port(a); + print is_udp_port(b); + print is_udp_port(c); + + print is_icmp_port(a); + print is_icmp_port(b); + print is_icmp_port(c); + } diff --git a/testing/btest/bifs/merge_pattern.bro b/testing/btest/bifs/merge_pattern.bro new file mode 100644 index 0000000000..b447f9a15b --- /dev/null +++ b/testing/btest/bifs/merge_pattern.bro @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = /foo/; + local b = /b[a-z]+/; + local c = merge_pattern(a, b); + + if ( "bar" == c ) + print "match"; + + if ( "foo" == c ) + print "match"; + + } diff --git a/testing/btest/bifs/parse_dotted_addr.bro b/testing/btest/bifs/parse_dotted_addr.bro new file mode 100644 index 0000000000..6fdba26452 --- /dev/null +++ b/testing/btest/bifs/parse_dotted_addr.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print parse_dotted_addr("192.168.0.2"); + print parse_dotted_addr("1234::1"); + } diff --git a/testing/btest/bifs/parse_ftp.bro b/testing/btest/bifs/parse_ftp.bro new file mode 100644 index 0000000000..ffdc941b4b --- /dev/null +++ b/testing/btest/bifs/parse_ftp.bro @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print parse_ftp_port("192,168,0,2,1,1"); + + print parse_eftp_port("|1|192.168.0.2|257|"); + print parse_eftp_port("|2|fe80::12|1234|"); + + print parse_ftp_pasv("227 Entering Passive Mode (192,168,0,2,1,1)"); + + print parse_ftp_epsv("229 Entering Extended Passive Mode (|||1234|)"); + } diff --git a/testing/btest/bifs/raw_bytes_to_v4_addr.bro b/testing/btest/bifs/raw_bytes_to_v4_addr.bro new file mode 100644 index 0000000000..754580a5b0 --- /dev/null +++ b/testing/btest/bifs/raw_bytes_to_v4_addr.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print raw_bytes_to_v4_addr("ABCD"); + print raw_bytes_to_v4_addr("ABC"); + } diff --git a/testing/btest/bifs/string_to_pattern.bro b/testing/btest/bifs/string_to_pattern.bro new file mode 100644 index 0000000000..5164c4576f --- /dev/null +++ b/testing/btest/bifs/string_to_pattern.bro @@ -0,0 +1,14 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print string_to_pattern("foo", F); + print string_to_pattern("", F); + print string_to_pattern("b[a-z]+", F); + + print string_to_pattern("foo", T); + print string_to_pattern("", T); + print string_to_pattern("b[a-z]+", T); + } From 0c5afc59f79099fa1874cbe96e3bb74b7df693ad Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 29 May 2012 14:51:45 -0500 Subject: [PATCH 326/651] Improve script debugger backtrace and print commands. Stack trace context descriptions are no longer limited to 1024 chars and better error messages are relayed when the arguments to print commands fail to parse (e.g. an "unknown identifier" was given). --- src/Debug.cc | 18 +++++++++++++++--- src/DebugCmds.cc | 6 +++--- src/parse.y | 7 +++++-- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/Debug.cc b/src/Debug.cc index ea9c52f77e..a1e2000bea 100644 --- a/src/Debug.cc +++ b/src/Debug.cc @@ -721,7 +721,6 @@ static char* get_prompt(bool reset_counter = false) string get_context_description(const Stmt* stmt, const Frame* frame) { - char buf[1024]; ODesc d; const BroFunc* func = frame->GetFunction(); @@ -739,10 +738,14 @@ string get_context_description(const Stmt* stmt, const Frame* frame) loc.last_line = 0; } - safe_snprintf(buf, sizeof(buf), "In %s at %s:%d", + size_t buf_size = strlen(d.Description()) + strlen(loc.filename) + 1024; + char* buf = new char[buf_size]; + safe_snprintf(buf, buf_size, "In %s at %s:%d", d.Description(), loc.filename, loc.last_line); - return string(buf); + string retval(buf); + delete [] buf; + return retval; } int dbg_handle_debug_input() @@ -924,6 +927,8 @@ bool post_execute_stmt(Stmt* stmt, Frame* f, Val* result, stmt_flow_type* flow) // Evaluates the given expression in the context of the currently selected // frame. Returns the resulting value, or nil if none (or there was an error). Expr* g_curr_debug_expr = 0; +const char* g_curr_debug_error = 0; +bool in_debug = false; // ### fix this hardwired access to external variables etc. struct yy_buffer_state; @@ -969,6 +974,10 @@ Val* dbg_eval_expr(const char* expr) Val* result = 0; if ( yyparse() ) { + if ( g_curr_debug_error ) + debug_msg("Parsing expression '%s' failed: %s\n", expr, g_curr_debug_error); + else + debug_msg("Parsing expression '%s' failed\n", expr); if ( g_curr_debug_expr ) { delete g_curr_debug_expr; @@ -983,6 +992,9 @@ Val* dbg_eval_expr(const char* expr) delete g_curr_debug_expr; g_curr_debug_expr = 0; + delete [] g_curr_debug_error; + g_curr_debug_error = 0; + in_debug = false; return result; } diff --git a/src/DebugCmds.cc b/src/DebugCmds.cc index 1d3b9dd220..bfb4d6ecc8 100644 --- a/src/DebugCmds.cc +++ b/src/DebugCmds.cc @@ -553,7 +553,8 @@ int dbg_cmd_print(DebugCmd cmd, const vector& args) for ( int i = 0; i < int(args.size()); ++i ) { expr += args[i]; - expr += " "; + if ( i < int(args.size()) - 1 ) + expr += " "; } Val* val = dbg_eval_expr(expr.c_str()); @@ -566,8 +567,7 @@ int dbg_cmd_print(DebugCmd cmd, const vector& args) } else { - // ### Print something? - // debug_msg("\n"); + debug_msg("\n"); } return 1; diff --git a/src/parse.y b/src/parse.y index f78003f08b..6875f07668 100644 --- a/src/parse.y +++ b/src/parse.y @@ -112,13 +112,14 @@ bool is_export = false; // true if in an export {} block * (obviously not reentrant). */ extern Expr* g_curr_debug_expr; +extern bool in_debug; +extern const char* g_curr_debug_error; #define YYLTYPE yyltype Expr* bro_this = 0; int in_init = 0; int in_record = 0; -bool in_debug = false; bool resolving_global_ID = false; bool defining_global_ID = false; @@ -249,7 +250,6 @@ bro: TOK_DEBUG { in_debug = true; } expr { g_curr_debug_expr = $3; - in_debug = false; } ; @@ -1685,6 +1685,9 @@ int yyerror(const char msg[]) strcat(msgbuf, "\nDocumentation mode is enabled: " "remember to check syntax of ## style comments\n"); + if ( in_debug ) + g_curr_debug_error = copy_string(msg); + reporter->Error("%s", msgbuf); return 0; From 0aecca979e830d0ee8f6524c4dee3fe83cfc3c4c Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 29 May 2012 17:29:11 -0500 Subject: [PATCH 327/651] Remove unnecessary assert in ICMP analyzer (addresses #822). The ICMP/ICMPv6 analyzers function correctly when full packets have not been captured, but everything up to and including the ICMP header is there (e.g. the functions that inspect ICMP error message context correctly check the caplen to see if more info can be extracted). The "Should have been caught earlier already." comment may have referred to NetSessions::CheckHeaderTrunc, which works as intended to catch cases where the ICMP header is not there in full, but then the assert was still not correctly formulated for that... Also changed the ICMP checksum calculation to not occur when the full packet has not been captured, which seems consistent with what the UDP analysis does. --- src/ICMP.cc | 4 +--- testing/btest/Baseline/core.truncation/output | 8 ++++++++ testing/btest/Traces/trunc/icmp-header-trunc.pcap | Bin 0 -> 136 bytes .../btest/Traces/trunc/icmp-payload-trunc.pcap | Bin 0 -> 408 bytes testing/btest/core/truncation.test | 13 +++++++++++++ 5 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 testing/btest/Traces/trunc/icmp-header-trunc.pcap create mode 100644 testing/btest/Traces/trunc/icmp-payload-trunc.pcap diff --git a/src/ICMP.cc b/src/ICMP.cc index 05a6b67dff..b06c6440e1 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -49,9 +49,7 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, const struct icmp* icmpp = (const struct icmp*) data; - assert(caplen >= len); // Should have been caught earlier already. - - if ( ! ignore_checksums ) + if ( ! ignore_checksums && caplen >= len ) { int chksum = 0; diff --git a/testing/btest/Baseline/core.truncation/output b/testing/btest/Baseline/core.truncation/output index f3d64b8b28..95d9073648 100644 --- a/testing/btest/Baseline/core.truncation/output +++ b/testing/btest/Baseline/core.truncation/output @@ -22,3 +22,11 @@ #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334094648.590126 - - - - - truncated_IP - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1338328954.078361 - - - - - internally_truncated_header - F bro diff --git a/testing/btest/Traces/trunc/icmp-header-trunc.pcap b/testing/btest/Traces/trunc/icmp-header-trunc.pcap new file mode 100644 index 0000000000000000000000000000000000000000..5765cf288605f05f9344d27334bb26258b467ebf GIT binary patch literal 136 zcmca|c+)~A1{MYw`2U}Qff2~5azE-XX~f8&0c0nEBn57FTzU^;Ffed1xH2$=L>^&a zaA4HF#Rb%GfI*v!gW=}w%=;jH^IMVhL9~E%L-g&M>%j1X@zQ^g9*|xJhKE3X04E?J Al>h($ literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/trunc/icmp-payload-trunc.pcap b/testing/btest/Traces/trunc/icmp-payload-trunc.pcap new file mode 100644 index 0000000000000000000000000000000000000000..13607dd50cd5344a6d6706b56d8070474a5e3cee GIT binary patch literal 408 zcmca|c+)~A1{MYw`2U}Qff2}AYkt%}p^J+l0LV@PNebNVxbz;xU|`^2aAjZ!d3%6? z!GST{kqfB7tM;KL2ZKuQ&CLu9{zuK%GH|uqb8vET^YHTV3kV7ci-?MeOGrvd%gD;f zD<~={tEhs^-!v1+e6Yz7TOj7o*vi0Q!MMc-WH!hS1_m7x%-;YspE(7|d<^&J&jq<( zj1gq^0S0X@4hGxzk0I{gz`&J!3%~n8=C^M{G9SbJITsulelRMW1(^+Tzbgsm0{}2B BQ#$|v literal 0 HcmV?d00001 diff --git a/testing/btest/core/truncation.test b/testing/btest/core/truncation.test index ee8bdd5bf9..3406879183 100644 --- a/testing/btest/core/truncation.test +++ b/testing/btest/core/truncation.test @@ -6,4 +6,17 @@ # @TEST-EXEC: cat weird.log >> output # @TEST-EXEC: bro -r $TRACES/trunc/ip6-ext-trunc.pcap # @TEST-EXEC: cat weird.log >> output + +# If an ICMP packet's payload is truncated due to too small snaplen, +# the checksum calculation is bypassed (and Bro doesn't crash, of course). + +# @TEST-EXEC: rm -f weird.log +# @TEST-EXEC: bro -r $TRACES/trunc/icmp-payload-trunc.pcap +# @TEST-EXEC: test ! -e weird.log + +# If an ICMP packet has the ICMP header truncated due to too small snaplen, +# an internally_truncated_header weird gets generated. + +# @TEST-EXEC: bro -r $TRACES/trunc/icmp-header-trunc.pcap +# @TEST-EXEC: cat weird.log >> output # @TEST-EXEC: btest-diff output From 65b50ab2da9537428355059a5c640298dd8acefa Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 30 May 2012 10:16:05 -0700 Subject: [PATCH 328/651] another small memory leak in ascii reader: on re-read istream instance was re-created but not freed before. --- src/input/readers/Ascii.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 157ea90916..275b3a9e67 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -413,6 +413,8 @@ bool Ascii::DoUpdate() break; } file->close(); + delete(file); + file = 0; } file = new ifstream(fname.c_str()); From 6869e1aadc9f9f16a1a8f1b938f53c921a5131bc Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 30 May 2012 16:30:50 -0500 Subject: [PATCH 329/651] Add more tests for previously-untested BIFs --- testing/btest/Baseline/bifs.create_file/out | 15 +++++ .../btest/Baseline/bifs.create_file/testfile | 2 + .../btest/Baseline/bifs.create_file/testfile2 | 1 + testing/btest/Baseline/bifs.find_entropy/out | 2 + testing/btest/Baseline/bifs.fmt_ftp_port/out | 2 + .../bifs.get_port_transport_proto/out | 3 + testing/btest/Baseline/bifs.global_ids/out | 1 + testing/btest/Baseline/bifs.is_ascii/out | 2 + testing/btest/Baseline/bifs.rotate_file/out | 3 + .../Baseline/bifs.rotate_file_by_name/out | 3 + testing/btest/Baseline/bifs.same_object/out | 3 + .../btest/Baseline/bifs.uuid_to_string/out | 2 + testing/btest/bifs/create_file.bro | 65 +++++++++++++++++++ testing/btest/bifs/find_entropy.bro | 13 ++++ testing/btest/bifs/fmt_ftp_port.bro | 13 ++++ .../btest/bifs/get_port_transport_proto.bro | 13 ++++ testing/btest/bifs/global_ids.bro | 16 +++++ testing/btest/bifs/is_ascii.bro | 12 ++++ testing/btest/bifs/rotate_file.bro | 15 +++++ testing/btest/bifs/rotate_file_by_name.bro | 16 +++++ testing/btest/bifs/same_object.bro | 16 +++++ testing/btest/bifs/uuid_to_string.bro | 10 +++ 22 files changed, 228 insertions(+) create mode 100644 testing/btest/Baseline/bifs.create_file/out create mode 100644 testing/btest/Baseline/bifs.create_file/testfile create mode 100644 testing/btest/Baseline/bifs.create_file/testfile2 create mode 100644 testing/btest/Baseline/bifs.find_entropy/out create mode 100644 testing/btest/Baseline/bifs.fmt_ftp_port/out create mode 100644 testing/btest/Baseline/bifs.get_port_transport_proto/out create mode 100644 testing/btest/Baseline/bifs.global_ids/out create mode 100644 testing/btest/Baseline/bifs.is_ascii/out create mode 100644 testing/btest/Baseline/bifs.rotate_file/out create mode 100644 testing/btest/Baseline/bifs.rotate_file_by_name/out create mode 100644 testing/btest/Baseline/bifs.same_object/out create mode 100644 testing/btest/Baseline/bifs.uuid_to_string/out create mode 100644 testing/btest/bifs/create_file.bro create mode 100644 testing/btest/bifs/find_entropy.bro create mode 100644 testing/btest/bifs/fmt_ftp_port.bro create mode 100644 testing/btest/bifs/get_port_transport_proto.bro create mode 100644 testing/btest/bifs/global_ids.bro create mode 100644 testing/btest/bifs/is_ascii.bro create mode 100644 testing/btest/bifs/rotate_file.bro create mode 100644 testing/btest/bifs/rotate_file_by_name.bro create mode 100644 testing/btest/bifs/same_object.bro create mode 100644 testing/btest/bifs/uuid_to_string.bro diff --git a/testing/btest/Baseline/bifs.create_file/out b/testing/btest/Baseline/bifs.create_file/out new file mode 100644 index 0000000000..330268ec59 --- /dev/null +++ b/testing/btest/Baseline/bifs.create_file/out @@ -0,0 +1,15 @@ +T +testfile +F +15.0 +T +F +28.0 +-1.0 +15.0 +0.0 +T +15.0 +T +testdir/testfile4 +F diff --git a/testing/btest/Baseline/bifs.create_file/testfile b/testing/btest/Baseline/bifs.create_file/testfile new file mode 100644 index 0000000000..a29421755d --- /dev/null +++ b/testing/btest/Baseline/bifs.create_file/testfile @@ -0,0 +1,2 @@ +This is a test +another test diff --git a/testing/btest/Baseline/bifs.create_file/testfile2 b/testing/btest/Baseline/bifs.create_file/testfile2 new file mode 100644 index 0000000000..eee417f1b9 --- /dev/null +++ b/testing/btest/Baseline/bifs.create_file/testfile2 @@ -0,0 +1 @@ +new text diff --git a/testing/btest/Baseline/bifs.find_entropy/out b/testing/btest/Baseline/bifs.find_entropy/out new file mode 100644 index 0000000000..08a09de4e4 --- /dev/null +++ b/testing/btest/Baseline/bifs.find_entropy/out @@ -0,0 +1,2 @@ +[entropy=4.715374, chi_square=591.981818, mean=75.472727, monte_carlo_pi=4.0, serial_correlation=-0.11027] +[entropy=2.083189, chi_square=3906.018182, mean=69.054545, monte_carlo_pi=4.0, serial_correlation=0.849402] diff --git a/testing/btest/Baseline/bifs.fmt_ftp_port/out b/testing/btest/Baseline/bifs.fmt_ftp_port/out new file mode 100644 index 0000000000..124878dd48 --- /dev/null +++ b/testing/btest/Baseline/bifs.fmt_ftp_port/out @@ -0,0 +1,2 @@ +192,168,0,2,1,1 + diff --git a/testing/btest/Baseline/bifs.get_port_transport_proto/out b/testing/btest/Baseline/bifs.get_port_transport_proto/out new file mode 100644 index 0000000000..dceddbc0f3 --- /dev/null +++ b/testing/btest/Baseline/bifs.get_port_transport_proto/out @@ -0,0 +1,3 @@ +tcp +udp +icmp diff --git a/testing/btest/Baseline/bifs.global_ids/out b/testing/btest/Baseline/bifs.global_ids/out new file mode 100644 index 0000000000..415b9ac63d --- /dev/null +++ b/testing/btest/Baseline/bifs.global_ids/out @@ -0,0 +1 @@ +func diff --git a/testing/btest/Baseline/bifs.is_ascii/out b/testing/btest/Baseline/bifs.is_ascii/out new file mode 100644 index 0000000000..82d2bc093e --- /dev/null +++ b/testing/btest/Baseline/bifs.is_ascii/out @@ -0,0 +1,2 @@ +F +T diff --git a/testing/btest/Baseline/bifs.rotate_file/out b/testing/btest/Baseline/bifs.rotate_file/out new file mode 100644 index 0000000000..1e833bbae4 --- /dev/null +++ b/testing/btest/Baseline/bifs.rotate_file/out @@ -0,0 +1,3 @@ +file rotated +15.0 +0.0 diff --git a/testing/btest/Baseline/bifs.rotate_file_by_name/out b/testing/btest/Baseline/bifs.rotate_file_by_name/out new file mode 100644 index 0000000000..1e833bbae4 --- /dev/null +++ b/testing/btest/Baseline/bifs.rotate_file_by_name/out @@ -0,0 +1,3 @@ +file rotated +15.0 +0.0 diff --git a/testing/btest/Baseline/bifs.same_object/out b/testing/btest/Baseline/bifs.same_object/out new file mode 100644 index 0000000000..3ea3c39b0d --- /dev/null +++ b/testing/btest/Baseline/bifs.same_object/out @@ -0,0 +1,3 @@ +T +F +F diff --git a/testing/btest/Baseline/bifs.uuid_to_string/out b/testing/btest/Baseline/bifs.uuid_to_string/out new file mode 100644 index 0000000000..8ea4f86dae --- /dev/null +++ b/testing/btest/Baseline/bifs.uuid_to_string/out @@ -0,0 +1,2 @@ +626180fe-6463-6665-6730-313233343536 + diff --git a/testing/btest/bifs/create_file.bro b/testing/btest/bifs/create_file.bro new file mode 100644 index 0000000000..8f3d6cfdcd --- /dev/null +++ b/testing/btest/bifs/create_file.bro @@ -0,0 +1,65 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff testfile +# @TEST-EXEC: btest-diff testfile2 +# @TEST-EXEC: test -f testdir/testfile4 + +event bro_init() + { + # Test that creating a file works as expected + local a = open("testfile"); + print active_file(a); + print get_file_name(a); + write_file(a, "This is a test\n"); + close(a); + + print active_file(a); + print file_size("testfile"); + + # Test that "open_for_append" doesn't overwrite an existing file + a = open_for_append("testfile"); + print active_file(a); + write_file(a, "another test\n"); + close(a); + + print active_file(a); + print file_size("testfile"); + + # This should fail + print file_size("doesnotexist"); + + # Test that "open" overwrites existing file + a = open("testfile2"); + write_file(a, "this will be overwritten\n"); + close(a); + a = open("testfile2"); + write_file(a, "new text\n"); + close(a); + + # Test that set_buf and flush_all work correctly + a = open("testfile3"); + set_buf(a, F); + write_file(a, "This is a test\n"); + print file_size("testfile3"); + close(a); + a = open("testfile3"); + set_buf(a, T); + write_file(a, "This is a test\n"); + print file_size("testfile3"); + print flush_all(); + print file_size("testfile3"); + close(a); + + # Create a new directory + print mkdir("testdir"); + + # Create a file in the new directory + a = open("testdir/testfile4"); + print get_file_name(a); + write_file(a, "This is a test\n"); + close(a); + + # This should fail + print mkdir("/thisdoesnotexist/dir"); + } diff --git a/testing/btest/bifs/find_entropy.bro b/testing/btest/bifs/find_entropy.bro new file mode 100644 index 0000000000..24f1c0ed84 --- /dev/null +++ b/testing/btest/bifs/find_entropy.bro @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"; + local b = "0011000aaabbbbcccc000011111000000000aaaabbbbcccc0000000"; + + print find_entropy(a); + + print find_entropy(b); + } diff --git a/testing/btest/bifs/fmt_ftp_port.bro b/testing/btest/bifs/fmt_ftp_port.bro new file mode 100644 index 0000000000..09ec5369e2 --- /dev/null +++ b/testing/btest/bifs/fmt_ftp_port.bro @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 192.168.0.2; + local b = 257/tcp; + print fmt_ftp_port(a, b); + + a = [fe80::1234]; + print fmt_ftp_port(a, b); + } diff --git a/testing/btest/bifs/get_port_transport_proto.bro b/testing/btest/bifs/get_port_transport_proto.bro new file mode 100644 index 0000000000..c9b5e626ec --- /dev/null +++ b/testing/btest/bifs/get_port_transport_proto.bro @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 123/tcp; + local b = 123/udp; + local c = 123/icmp; + print get_port_transport_proto(a); + print get_port_transport_proto(b); + print get_port_transport_proto(c); + } diff --git a/testing/btest/bifs/global_ids.bro b/testing/btest/bifs/global_ids.bro new file mode 100644 index 0000000000..65f8944ed4 --- /dev/null +++ b/testing/btest/bifs/global_ids.bro @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = global_ids(); + for ( i in a ) + { + # the table is quite large, so just print one item we expect + if ( i == "bro_init" ) + print a[i]$type_name; + + } + + } diff --git a/testing/btest/bifs/is_ascii.bro b/testing/btest/bifs/is_ascii.bro new file mode 100644 index 0000000000..4d1daf96b4 --- /dev/null +++ b/testing/btest/bifs/is_ascii.bro @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "this is a test\xfe"; + local b = "this is a test\x7f"; + + print is_ascii(a); + print is_ascii(b); + } diff --git a/testing/btest/bifs/rotate_file.bro b/testing/btest/bifs/rotate_file.bro new file mode 100644 index 0000000000..7132b0aaa8 --- /dev/null +++ b/testing/btest/bifs/rotate_file.bro @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = open("testfile"); + write_file(a, "this is a test\n"); + + local b = rotate_file(a); + if ( b$new_name != "testfile" ) + print "file rotated"; + print file_size(b$new_name); + print file_size("testfile"); + } diff --git a/testing/btest/bifs/rotate_file_by_name.bro b/testing/btest/bifs/rotate_file_by_name.bro new file mode 100644 index 0000000000..952b09aff3 --- /dev/null +++ b/testing/btest/bifs/rotate_file_by_name.bro @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = open("testfile"); + write_file(a, "this is a test\n"); + close(a); + + local b = rotate_file_by_name("testfile"); + if ( b$new_name != "testfile" ) + print "file rotated"; + print file_size(b$new_name); + print file_size("testfile"); + } diff --git a/testing/btest/bifs/same_object.bro b/testing/btest/bifs/same_object.bro new file mode 100644 index 0000000000..eee8b1621d --- /dev/null +++ b/testing/btest/bifs/same_object.bro @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "This is a test"; + local b: string; + local c = "This is a test"; + b = a; + print same_object(a, b); + print same_object(a, c); + + local d = vector(1, 2, 3); + print same_object(a, d); + } diff --git a/testing/btest/bifs/uuid_to_string.bro b/testing/btest/bifs/uuid_to_string.bro new file mode 100644 index 0000000000..a64e81d783 --- /dev/null +++ b/testing/btest/bifs/uuid_to_string.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "\xfe\x80abcdefg0123456"; + print uuid_to_string(a); + print uuid_to_string(""); + } From 17155a103ddbcfeb05a272e7e44717d4baa56dd6 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 30 May 2012 16:50:43 -0500 Subject: [PATCH 330/651] Fix the join_string_vec BIF and add more tests --- src/strings.bif | 2 +- testing/btest/Baseline/bifs.join_string/out | 5 +++++ testing/btest/bifs/join_string.bro | 11 +++++++++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/strings.bif b/src/strings.bif index 27c11b4013..5b04ec41cb 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -175,7 +175,7 @@ function join_string_vec%(vec: string_vec, sep: string%): string if ( i > 0 ) d.Add(sep->CheckString(), 0); - v->Lookup(i+1)->Describe(&d); + v->Lookup(i)->Describe(&d); } BroString* s = new BroString(1, d.TakeBytes(), d.Len()); diff --git a/testing/btest/Baseline/bifs.join_string/out b/testing/btest/Baseline/bifs.join_string/out index 830c2dace5..f1640a57ee 100644 --- a/testing/btest/Baseline/bifs.join_string/out +++ b/testing/btest/Baseline/bifs.join_string/out @@ -1 +1,6 @@ this * is * a * test +thisisatest +mytest +this__is__another__test +thisisanothertest +Test diff --git a/testing/btest/bifs/join_string.bro b/testing/btest/bifs/join_string.bro index df5f83449b..16222d6303 100644 --- a/testing/btest/bifs/join_string.bro +++ b/testing/btest/bifs/join_string.bro @@ -7,8 +7,15 @@ event bro_init() local a: string_array = { [1] = "this", [2] = "is", [3] = "a", [4] = "test" }; - local b: string_vec = vector( "this", "is", "another", "test" ); + local b: string_array = { [1] = "mytest" }; + local c: string_vec = vector( "this", "is", "another", "test" ); + local d: string_vec = vector( "Test" ); print join_string_array(" * ", a); - print join_string_vec(b, "__"); + print join_string_array("", a); + print join_string_array("x", b); + + print join_string_vec(c, "__"); + print join_string_vec(c, ""); + print join_string_vec(d, "-"); } From fc907c0090fbceed5dd64385c38d09bb88502acf Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 30 May 2012 16:38:08 -0700 Subject: [PATCH 331/651] A set of input framework refactoring, cleanup, and polishing. --- scripts/base/frameworks/input/main.bro | 3 - src/input/Manager.cc | 22 +++++- src/input/ReaderBackend.cc | 7 +- src/input/ReaderBackend.h | 92 +++++++++++++++++++------- src/input/ReaderFrontend.cc | 6 +- src/input/ReaderFrontend.h | 4 +- src/input/readers/Ascii.cc | 61 +++++++---------- src/input/readers/Ascii.h | 14 ++-- src/input/readers/Benchmark.cc | 37 +++-------- src/input/readers/Benchmark.h | 9 +-- src/input/readers/Raw.cc | 47 +++++-------- src/input/readers/Raw.h | 15 ++--- src/threading/Manager.cc | 11 +-- src/threading/Manager.h | 2 - 14 files changed, 162 insertions(+), 168 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 7a372dc120..f5df72473f 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -117,9 +117,6 @@ export { module Input; -#global streams: table[string] of Filter; -# ^ change to set containing the names - function add_table(description: Input::TableDescription) : bool { return __create_table_stream(description); diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 9bf885072b..bc79a2390b 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -74,7 +74,7 @@ public: string source; bool removed; - int mode; + ReaderMode mode; StreamType stream_type; // to distinguish between event and table streams @@ -299,7 +299,25 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) Unref(sourceval); EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); - info->mode = mode->InternalInt(); + + switch ( mode->InternalInt() ) + { + case 0: + info->mode = MODE_MANUAL; + break; + + case 1: + info->mode = MODE_REREAD; + break; + + case 2: + info->mode = MODE_STREAM; + break; + + default: + reporter->InternalError("unknown reader mode"); + } + Unref(mode); info->reader = reader_obj; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 328e0bc535..43cbf8dfc1 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -176,15 +176,16 @@ void ReaderBackend::SendEntry(Value* *vals) SendOut(new SendEntryMessage(frontend, vals)); } -bool ReaderBackend::Init(string arg_source, int mode, const int arg_num_fields, +bool ReaderBackend::Init(string arg_source, ReaderMode arg_mode, const int arg_num_fields, const threading::Field* const* arg_fields) { source = arg_source; - SetName("InputReader/"+source); - + mode = arg_mode; num_fields = arg_num_fields; fields = arg_fields; + SetName("InputReader/"+source); + // disable if DoInit returns error. int success = DoInit(arg_source, mode, arg_num_fields, arg_fields); diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index ae8437b08c..8b5e7d674b 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -4,11 +4,32 @@ #define INPUT_READERBACKEND_H #include "BroString.h" -#include "../threading/SerialTypes.h" + +#include "threading/SerialTypes.h" #include "threading/MsgThread.h" namespace input { +/** + * The modes a reader can be in. + */ +enum ReaderMode { + /** + * TODO Bernhard. + */ + MODE_MANUAL, + + /** + * TODO Bernhard. + */ + MODE_REREAD, + + /** + * TODO Bernhard. + */ + MODE_STREAM +}; + class ReaderFrontend; /** @@ -40,24 +61,20 @@ public: /** * One-time initialization of the reader to define the input source. * - * @param arg_source A string left to the interpretation of the + * @param source A string left to the interpretation of the * reader implementation; it corresponds to the value configured on * the script-level for the input stream. * - * @param fields An array of size \a num_fields with the input - * fields. The method takes ownership of the array. + * @param mode The opening mode for the input source. * - * @param mode The opening mode for the input source as one of the - * Input::Mode script constants. - * - * @param arg_num_fields Number of fields contained in \a fields. + * @param num_fields Number of fields contained in \a fields. * * @param fields The types and names of the fields to be retrieved * from the input source. * * @return False if an error occured. */ - bool Init(string arg_source, int mode, int arg_num_fields, const threading::Field* const* fields); + bool Init(string source, ReaderMode mode, int num_fields, const threading::Field* const* fields); /** * Finishes reading from this input stream in a regular fashion. Must @@ -98,8 +115,15 @@ protected: * prevents the reader from further operation; it will then be * disabled and eventually deleted. When returning false, an * implementation should also call Error() to indicate what happened. + * + * Arguments are the same as Init(). + * + * Note that derived classes don't need to store the values passed in + * here if other methods need them to; the \a ReaderBackend class + * provides accessor methods to get them later, and they are passed + * in here only for convinience. */ - virtual bool DoInit(string arg_sources, int mode, int arg_num_fields, const threading::Field* const* fields) = 0; + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields) = 0; /** * Reader-specific method implementing input finalization at @@ -129,10 +153,25 @@ protected: virtual bool DoUpdate() = 0; /** - * Returns the input source as passed into the constructor. + * Returns the input source as passed into Init()/. */ const string Source() const { return source; } + /** + * Returns the reader mode as passed into Init(). + */ + const ReaderMode Mode() const { return mode; } + + /** + * Returns the number of log fields as passed into Init(). + */ + unsigned int NumFields() const { return num_fields; } + + /** + * Returns the log fields as passed into Init(). + */ + const threading::Field* const * Fields() const { return fields; } + /** * Method allowing a reader to send a specified Bro event. Vals must * match the values expected by the bro event. @@ -145,8 +184,8 @@ protected: */ void SendEvent(const string& name, const int num_vals, threading::Value* *vals); - // Content-sending-functions (simple mode). Including table-specific - // stuff that simply is not used if we have no table. + // Content-sending-functions (simple mode). Include table-specific + // functionality that simply is not used if we have no table. /** * Method allowing a reader to send a list of values read from a @@ -155,9 +194,10 @@ protected: * If the stream is a table stream, the values are inserted into the * table; if it is an event stream, the event is raised. * - * @param val list of threading::Values expected by the stream + * @param val Array of threading::Values expected by the stream. The + * array must have exactly NumEntries() elements. */ - void Put(threading::Value* *val); + void Put(threading::Value** val); /** * Method allowing a reader to delete a specific value from a Bro @@ -166,9 +206,10 @@ protected: * If the receiving stream is an event stream, only a removed event * is raised. * - * @param val list of threading::Values expected by the stream + * @param val Array of threading::Values expected by the stream. The + * array must have exactly NumEntries() elements. */ - void Delete(threading::Value* *val); + void Delete(threading::Value** val); /** * Method allowing a reader to clear a Bro table. @@ -187,9 +228,10 @@ protected: * If the stream is a table stream, the values are inserted into the * table; if it is an event stream, the event is raised. * - * @param val list of threading::Values expected by the stream + * @param val Array of threading::Values expected by the stream. The + * array must have exactly NumEntries() elements. */ - void SendEntry(threading::Value* *vals); + void SendEntry(threading::Value** vals); /** * Method telling the manager, that the current list of entries sent @@ -210,14 +252,16 @@ protected: virtual bool DoHeartbeat(double network_time, double current_time); /** - * Utility function for Readers - convert a string into a TransportProto + * Convert a string into a TransportProto. This is just a utility + * function for Readers. * * @param proto the transport protocol */ TransportProto StringToProto(const string &proto); /** - * Utility function for Readers - convert a string into a Value::addr_t + * Convert a string into a Value::addr_t. This is just a utility + * function for Readers. * * @param addr containing an ipv4 or ipv6 address */ @@ -229,11 +273,11 @@ private: ReaderFrontend* frontend; string source; - - bool disabled; - + ReaderMode mode; unsigned int num_fields; const threading::Field* const * fields; // raw mapping + + bool disabled; }; } diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index 75bb7fec50..d85a227577 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -12,7 +12,7 @@ namespace input { class InitMessage : public threading::InputMessage { public: - InitMessage(ReaderBackend* backend, const string source, const int mode, + InitMessage(ReaderBackend* backend, const string source, ReaderMode mode, const int num_fields, const threading::Field* const* fields) : threading::InputMessage("Init", backend), source(source), mode(mode), num_fields(num_fields), fields(fields) { } @@ -24,7 +24,7 @@ public: private: const string source; - const int mode; + const ReaderMode mode; const int num_fields; const threading::Field* const* fields; }; @@ -64,7 +64,7 @@ ReaderFrontend::~ReaderFrontend() { } -void ReaderFrontend::Init(string arg_source, int mode, const int num_fields, +void ReaderFrontend::Init(string arg_source, ReaderMode mode, const int num_fields, const threading::Field* const* fields) { if ( disabled ) diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index c18e22a064..0de4e7c3dc 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -6,6 +6,8 @@ #include "../threading/MsgThread.h" #include "../threading/SerialTypes.h" +#include "ReaderBackend.h" + namespace input { class Manager; @@ -50,7 +52,7 @@ public: * * This method must only be called from the main thread. */ - void Init(string arg_source, int mode, const int arg_num_fields, const threading::Field* const* fields); + void Init(string arg_source, ReaderMode mode, const int arg_num_fields, const threading::Field* const* fields); /** * Force an update of the current input source. Actual action depends diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 157ea90916..b5f81c8732 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -8,10 +8,6 @@ #include "../../threading/SerialTypes.h" -#define MANUAL 0 -#define REREAD 1 -#define STREAM 2 - #include #include #include @@ -87,25 +83,14 @@ void Ascii::DoClose() } } -bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) +bool Ascii::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields) { - fname = path; - mode = arg_mode; mtime = 0; - num_fields = arg_num_fields; - fields = arg_fields; - - if ( (mode != MANUAL) && (mode != REREAD) && (mode != STREAM) ) - { - Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); - return false; - } - file = new ifstream(path.c_str()); if ( ! file->is_open() ) { - Error(Fmt("Init: cannot open %s", fname.c_str())); + Error(Fmt("Init: cannot open %s", path.c_str())); delete(file); file = 0; return false; @@ -113,7 +98,7 @@ bool Ascii::DoInit(string path, int arg_mode, int arg_num_fields, const Field* c if ( ReadHeader(false) == false ) { - Error(Fmt("Init: cannot open %s; headers are incorrect", fname.c_str())); + Error(Fmt("Init: cannot open %s; headers are incorrect", path.c_str())); file->close(); delete(file); file = 0; @@ -162,9 +147,9 @@ bool Ascii::ReadHeader(bool useCached) //printf("Updating fields from description %s\n", line.c_str()); columnMap.clear(); - for ( unsigned int i = 0; i < num_fields; i++ ) + for ( unsigned int i = 0; i < NumFields(); i++ ) { - const Field* field = fields[i]; + const Field* field = Fields()[i]; map::iterator fit = ifields.find(field->name); if ( fit == ifields.end() ) @@ -179,7 +164,7 @@ bool Ascii::ReadHeader(bool useCached) } Error(Fmt("Did not find requested field %s in input data file %s.", - field->name.c_str(), fname.c_str())); + field->name.c_str(), Source().c_str())); return false; } @@ -377,14 +362,14 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) // read the entire file and send appropriate thingies back to InputMgr bool Ascii::DoUpdate() { - switch ( mode ) { - case REREAD: + switch ( Mode() ) { + case MODE_REREAD: { // check if the file has changed struct stat sb; - if ( stat(fname.c_str(), &sb) == -1 ) + if ( stat(Source().c_str(), &sb) == -1 ) { - Error(Fmt("Could not get stat for %s", fname.c_str())); + Error(Fmt("Could not get stat for %s", Source().c_str())); return false; } @@ -397,14 +382,14 @@ bool Ascii::DoUpdate() // fallthrough } - case MANUAL: - case STREAM: + case MODE_MANUAL: + case MODE_STREAM: { // dirty, fix me. (well, apparently after trying seeking, etc // - this is not that bad) if ( file && file->is_open() ) { - if ( mode == STREAM ) + if ( Mode() == MODE_STREAM ) { file->clear(); // remove end of file evil bits if ( !ReadHeader(true) ) @@ -415,10 +400,10 @@ bool Ascii::DoUpdate() file->close(); } - file = new ifstream(fname.c_str()); + file = new ifstream(Source().c_str()); if ( !file->is_open() ) { - Error(Fmt("cannot open %s", fname.c_str())); + Error(Fmt("cannot open %s", Source().c_str())); return false; } @@ -455,7 +440,7 @@ bool Ascii::DoUpdate() pos--; // for easy comparisons of max element. - Value** fields = new Value*[num_fields]; + Value** fields = new Value*[NumFields()]; int fpos = 0; for ( vector::iterator fit = columnMap.begin(); @@ -502,15 +487,15 @@ bool Ascii::DoUpdate() } //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); - assert ( (unsigned int) fpos == num_fields ); + assert ( (unsigned int) fpos == NumFields() ); - if ( mode == STREAM ) + if ( Mode() == MODE_STREAM ) Put(fields); else SendEntry(fields); } - if ( mode != STREAM ) + if ( Mode () != MODE_STREAM ) EndCurrentSend(); return true; @@ -520,13 +505,13 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - switch ( mode ) { - case MANUAL: + switch ( Mode() ) { + case MODE_MANUAL: // yay, we do nothing :) break; - case REREAD: - case STREAM: + case MODE_REREAD: + case MODE_STREAM: Update(); // call update and not DoUpdate, because update // checks disabled. break; diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index e5540c5467..a15acc29ee 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -10,7 +10,7 @@ namespace input { namespace reader { -// Description for input field mapping +// Description for input field mapping. struct FieldMapping { string name; TypeTag type; @@ -27,6 +27,9 @@ struct FieldMapping { FieldMapping subType(); }; +/** + * Reader for structured ASCII files. + */ class Ascii : public ReaderBackend { public: Ascii(ReaderFrontend* frontend); @@ -35,23 +38,18 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Ascii(frontend); } protected: - virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); + virtual bool DoHeartbeat(double network_time, double current_time); private: - virtual bool DoHeartbeat(double network_time, double current_time); bool ReadHeader(bool useCached); bool GetLine(string& str); threading::Value* EntryToVal(string s, FieldMapping type); - unsigned int num_fields; - const threading::Field* const *fields; // raw mapping - ifstream* file; - string fname; - int mode; time_t mtime; // map columns in the file to columns to send back to the manager diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index c6cc1649eb..5644f26cb3 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -5,10 +5,6 @@ #include "../../threading/SerialTypes.h" -#define MANUAL 0 -#define REREAD 1 -#define STREAM 2 - #include #include #include @@ -19,8 +15,6 @@ using namespace input::reader; using threading::Value; using threading::Field; - - Benchmark::Benchmark(ReaderFrontend *frontend) : ReaderBackend(frontend) { multiplication_factor = double(BifConst::InputBenchmark::factor); @@ -42,23 +36,13 @@ void Benchmark::DoClose() { } -bool Benchmark::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) +bool Benchmark::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields) { - mode = arg_mode; - - num_fields = arg_num_fields; - fields = arg_fields; num_lines = atoi(path.c_str()); if ( autospread != 0.0 ) autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); - if ( (mode != MANUAL) && (mode != REREAD) && (mode != STREAM) ) - { - Error(Fmt("Unsupported read mode %d for source %s", mode, path.c_str())); - return false; - } - heartbeatstarttime = CurrTime(); DoUpdate(); @@ -95,11 +79,11 @@ bool Benchmark::DoUpdate() int linestosend = num_lines * heart_beat_interval; for ( int i = 0; i < linestosend; i++ ) { - Value** field = new Value*[num_fields]; - for (unsigned int j = 0; j < num_fields; j++ ) - field[j] = EntryToVal(fields[j]->type, fields[j]->subtype); + Value** field = new Value*[NumFields()]; + for (unsigned int j = 0; j < NumFields(); j++ ) + field[j] = EntryToVal(Fields()[j]->type, Fields()[j]->subtype); - if ( mode == STREAM ) + if ( Mode() == MODE_STREAM ) // do not do tracking, spread out elements over the second that we have... Put(field); else @@ -125,7 +109,7 @@ bool Benchmark::DoUpdate() } - if ( mode != STREAM ) + if ( Mode() != MODE_STREAM ) EndCurrentSend(); return true; @@ -243,13 +227,13 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) num_lines += add; heartbeatstarttime = CurrTime(); - switch ( mode ) { - case MANUAL: + switch ( Mode() ) { + case MODE_MANUAL: // yay, we do nothing :) break; - case REREAD: - case STREAM: + case MODE_REREAD: + case MODE_STREAM: if ( multiplication_factor != 1 || add != 0 ) { // we have to document at what time we changed the factor to what value. @@ -270,6 +254,7 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) SendEvent("HeartbeatDone", 0, 0); break; + default: assert(false); } diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index ec14dc6567..2bb23ee17a 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -18,21 +18,16 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Benchmark(frontend); } protected: - virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); - -private: virtual bool DoHeartbeat(double network_time, double current_time); +private: double CurrTime(); string RandomString(const int len); threading::Value* EntryToVal(TypeTag Type, TypeTag subtype); - unsigned int num_fields; - const threading::Field* const * fields; // raw mapping - - int mode; int num_lines; double multiplication_factor; int spread; diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 6538da070b..c0da4969aa 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -9,10 +9,6 @@ #include "../../threading/SerialTypes.h" #include "../fdstream.h" -#define MANUAL 0 -#define REREAD 1 -#define STREAM 2 - #include #include #include @@ -48,7 +44,7 @@ void Raw::DoClose() } } -bool Raw::Open() +bool Raw::OpenInput() { if ( execute ) { @@ -72,13 +68,13 @@ bool Raw::Open() // This is defined in input/fdstream.h in = new boost::fdistream(fileno(file)); - if ( execute && mode == STREAM ) + if ( execute && Mode() == MODE_STREAM ) fcntl(fileno(file), F_SETFL, O_NONBLOCK); return true; } -bool Raw::Close() +bool Raw::CloseInput() { if ( file == NULL ) { @@ -103,25 +99,21 @@ bool Raw::Close() return true; } -bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* const* arg_fields) +bool Raw::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields) { fname = path; - mode = arg_mode; mtime = 0; execute = false; firstrun = true; bool result; - num_fields = arg_num_fields; - fields = arg_fields; - if ( path.length() == 0 ) { Error("No source path provided"); return false; } - if ( arg_num_fields != 1 ) + if ( num_fields != 1 ) { Error("Filter for raw reader contains more than one field. " "Filters for the raw reader may only contain exactly one string field. " @@ -142,7 +134,7 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con execute = true; fname = path.substr(0, fname.length() - 1); - if ( (mode != MANUAL) && (mode != STREAM) ) { + if ( (mode != MODE_MANUAL) && (mode != MODE_STREAM) ) { Error(Fmt("Unsupported read mode %d for source %s in execution mode", mode, fname.c_str())); return false; @@ -152,13 +144,6 @@ bool Raw::DoInit(string path, int arg_mode, int arg_num_fields, const Field* con } else { execute = false; - if ( (mode != MANUAL) && (mode != REREAD) && (mode != STREAM) ) - { - Error(Fmt("Unsupported read mode %d for source %s", - mode, fname.c_str())); - return false; - } - result = Open(); } @@ -198,8 +183,8 @@ bool Raw::DoUpdate() else { - switch ( mode ) { - case REREAD: + switch ( Mode() ) { + case MODE_REREAD: { // check if the file has changed struct stat sb; @@ -219,9 +204,9 @@ bool Raw::DoUpdate() // fallthrough } - case MANUAL: - case STREAM: - if ( mode == STREAM && file != NULL && in != NULL ) + case MODE_MANUAL: + case MODE_STREAM: + if ( Mode() == MODE_STREAM && file != NULL && in != NULL ) { //fpurge(file); in->clear(); // remove end of file evil bits @@ -242,7 +227,7 @@ bool Raw::DoUpdate() string line; while ( GetLine(line) ) { - assert (num_fields == 1); + assert (NumFields() == 1); Value** fields = new Value*[1]; @@ -265,13 +250,13 @@ bool Raw::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - switch ( mode ) { - case MANUAL: + switch ( Mode() ) { + case MODE_MANUAL: // yay, we do nothing :) break; - case REREAD: - case STREAM: + case MODE_REREAD: + case MODE_STREAM: Update(); // call update and not DoUpdate, because update // checks disabled. break; diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index 3fa09309b0..76c9125544 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -22,24 +22,19 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } protected: - virtual bool DoInit(string path, int mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); - -private: virtual bool DoHeartbeat(double network_time, double current_time); - bool Open(); - bool Close(); +private: + bool OpenInput(); + bool CloseInput(); bool GetLine(string& str); - unsigned int num_fields; - const threading::Field* const * fields; // raw mapping - + string fname; // Sources with a potential " |" removed. istream* in; FILE* file; - string fname; - int mode; bool execute; bool firstrun; time_t mtime; diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 267d793e06..6071e70271 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -12,9 +12,6 @@ Manager::Manager() next_beat = 0; terminating = false; idle = true; - - heart_beat_interval = double(BifConst::Threading::heart_beat_interval); - DBG_LOG(DBG_THREADING, "Heart beat interval set to %f", heart_beat_interval); } Manager::~Manager() @@ -61,12 +58,6 @@ void Manager::KillThreads() void Manager::AddThread(BasicThread* thread) { - if ( heart_beat_interval == 0 ) { - // Sometimes initialization does not seem to work from constructor. - heart_beat_interval = double(BifConst::Threading::heart_beat_interval); - DBG_LOG(DBG_THREADING, "Heart beat interval set to %f", heart_beat_interval); - } - DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name().c_str()); all_threads.push_back(thread); idle = false; @@ -107,7 +98,7 @@ void Manager::Process() if ( network_time && (network_time > next_beat || ! next_beat) ) { do_beat = true; - next_beat = ::network_time + heart_beat_interval; + next_beat = ::network_time + BifConst::Threading::heart_beat_interval; } did_process = false; diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 14c5893214..1afd115da0 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -126,8 +126,6 @@ protected: virtual const char* Tag() { return "threading::Manager"; } private: - int heart_beat_interval; - typedef std::list all_thread_list; all_thread_list all_threads; From 14ea7801767bf10c859adcb6539b0425e2286aab Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 30 May 2012 16:40:49 -0700 Subject: [PATCH 332/651] And now it even compiles after my earlier changes. --- src/input/readers/Raw.cc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index c0da4969aa..f6e61a906e 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -39,9 +39,7 @@ Raw::~Raw() void Raw::DoClose() { if ( file != 0 ) - { - Close(); - } + CloseInput(); } bool Raw::OpenInput() @@ -140,11 +138,13 @@ bool Raw::DoInit(string path, ReaderMode mode, int num_fields, const Field* cons return false; } - result = Open(); + result = OpenInput(); - } else { + } + else + { execute = false; - result = Open(); + result = OpenInput(); } if ( result == false ) @@ -213,8 +213,8 @@ bool Raw::DoUpdate() break; } - Close(); - if ( ! Open() ) + CloseInput(); + if ( ! OpenInput() ) return false; break; From f34ebb7b60e3131b5f93cf84d627c84701e93168 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 30 May 2012 19:12:43 -0700 Subject: [PATCH 333/651] Updating submodule(s). [nomail] --- CHANGES | 4 ++++ VERSION | 2 +- aux/btest | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index cb9c0177fc..b68469fc5d 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.0-571 | 2012-05-30 19:12:43 -0700 + + * Updating submodule(s). + 2.0-570 | 2012-05-30 19:08:18 -0700 * A new input framework enables scripts to read in external data diff --git a/VERSION b/VERSION index dea7c2bf67..b3387dc11b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-570 +2.0-571 diff --git a/aux/btest b/aux/btest index 3ee8d4b323..4697bf4c80 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 3ee8d4b3232d74ed7bd475819193ad3a4055e2f5 +Subproject commit 4697bf4c8046a3ab7d5e00e926c5db883cb44664 From be0316ee29979532a4f0fa6df1b97e10613006bd Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 30 May 2012 19:26:43 -0700 Subject: [PATCH 334/651] Fixes for running tests in parallel. --- testing/btest/istate/bro-ipv6-socket.bro | 2 +- testing/btest/istate/broccoli-ipv6-socket.bro | 2 +- testing/btest/istate/broccoli-ssl.bro | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/btest/istate/bro-ipv6-socket.bro b/testing/btest/istate/bro-ipv6-socket.bro index ae77a42c54..b339bf4487 100644 --- a/testing/btest/istate/bro-ipv6-socket.bro +++ b/testing/btest/istate/bro-ipv6-socket.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: ifconfig | grep -q -E "inet6 ::1|inet6 addr: ::1" # diff --git a/testing/btest/istate/broccoli-ipv6-socket.bro b/testing/btest/istate/broccoli-ipv6-socket.bro index e36ac9e9f7..21067c1b23 100644 --- a/testing/btest/istate/broccoli-ipv6-socket.bro +++ b/testing/btest/istate/broccoli-ipv6-socket.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # @TEST-REQUIRES: ifconfig | grep -q -E "inet6 ::1|inet6 addr: ::1" diff --git a/testing/btest/istate/broccoli-ssl.bro b/testing/btest/istate/broccoli-ssl.bro index 61401c483a..4465cd1bb3 100644 --- a/testing/btest/istate/broccoli-ssl.bro +++ b/testing/btest/istate/broccoli-ssl.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # From 60875adfc5fd4eeab44511d606cef99ecdcff918 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 31 May 2012 11:31:01 -0500 Subject: [PATCH 335/651] Fix format specifier on RemoteSerializer::Connect. This caused 32-bit systems to show a warning at compile-time, and fail when connecting to peers. --- src/RemoteSerializer.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index cc5e8c5ff9..838bafb0d6 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -700,7 +700,8 @@ RemoteSerializer::PeerID RemoteSerializer::Connect(const IPAddr& ip, const size_t BUFSIZE = 1024; char* data = new char[BUFSIZE]; - snprintf(data, BUFSIZE, "%"PRIu64",%s,%s,%"PRIu16",%"PRIu32",%d", p->id, + snprintf(data, BUFSIZE, + "%"PRI_PTR_COMPAT_UINT",%s,%s,%"PRIu16",%"PRIu32",%d", p->id, ip.AsString().c_str(), zone_id.c_str(), port, uint32(retry), use_ssl); From 5f16a29aea24f54b9c10ff4f0f0cf143a682d184 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 31 May 2012 12:49:59 -0500 Subject: [PATCH 336/651] Improve tests for sort, order, and system_env BIFs --- testing/btest/Baseline/bifs.order/out | 2 ++ testing/btest/Baseline/bifs.sort/out | 4 ++-- testing/btest/Baseline/bifs.system_env/out | 1 - testing/btest/Baseline/bifs.system_env/testfile | 1 + testing/btest/bifs/order.bro | 13 ++++++------- testing/btest/bifs/sort.bro | 13 ++++++------- testing/btest/bifs/system_env.bro | 6 ++++-- 7 files changed, 21 insertions(+), 19 deletions(-) create mode 100644 testing/btest/Baseline/bifs.order/out delete mode 100644 testing/btest/Baseline/bifs.system_env/out create mode 100644 testing/btest/Baseline/bifs.system_env/testfile diff --git a/testing/btest/Baseline/bifs.order/out b/testing/btest/Baseline/bifs.order/out new file mode 100644 index 0000000000..65e5adb492 --- /dev/null +++ b/testing/btest/Baseline/bifs.order/out @@ -0,0 +1,2 @@ +[1, 3, 0, 2] +[5.0 hrs, 1.0 sec, 7.0 mins] diff --git a/testing/btest/Baseline/bifs.sort/out b/testing/btest/Baseline/bifs.sort/out index 21d24208fb..de4baf1254 100644 --- a/testing/btest/Baseline/bifs.sort/out +++ b/testing/btest/Baseline/bifs.sort/out @@ -1,2 +1,2 @@ -[5, 3, 8] -[3, 5, 8] +[2, 3, 5, 8] +[5.0 hrs, 1.0 sec, 7.0 mins] diff --git a/testing/btest/Baseline/bifs.system_env/out b/testing/btest/Baseline/bifs.system_env/out deleted file mode 100644 index 31e0fce560..0000000000 --- a/testing/btest/Baseline/bifs.system_env/out +++ /dev/null @@ -1 +0,0 @@ -helloworld diff --git a/testing/btest/Baseline/bifs.system_env/testfile b/testing/btest/Baseline/bifs.system_env/testfile new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/testing/btest/Baseline/bifs.system_env/testfile @@ -0,0 +1 @@ + diff --git a/testing/btest/bifs/order.bro b/testing/btest/bifs/order.bro index 176e733cfe..a39a64e6a3 100644 --- a/testing/btest/bifs/order.bro +++ b/testing/btest/bifs/order.bro @@ -2,17 +2,16 @@ # @TEST-EXEC: bro %INPUT >out # @TEST-EXEC: btest-diff out -function myfunc(a: count, b: count): bool +function myfunc(aa: interval, bb: interval): bool { - return a < b; + return aa < bb; } event bro_init() { - local a = vector( 5, 3, 8 ); - - print order(a, myfunc); - - print a; + local a = vector( 5, 2, 8, 3 ); + print order(a); + local b = vector( 5hr, 1sec, 7min ); + print order(b, myfunc); } diff --git a/testing/btest/bifs/sort.bro b/testing/btest/bifs/sort.bro index 2d6d82129f..162e4a4504 100644 --- a/testing/btest/bifs/sort.bro +++ b/testing/btest/bifs/sort.bro @@ -2,17 +2,16 @@ # @TEST-EXEC: bro %INPUT >out # @TEST-EXEC: btest-diff out -function myfunc(a: count, b: count): bool +function myfunc(aa: interval, bb: interval): bool { - return a < b; + return aa < bb; } event bro_init() { - local a = vector( 5, 3, 8 ); - - print sort(a, myfunc); - - print a; + local a = vector( 5, 2, 8, 3 ); + print sort(a); + local b = vector( 5hr, 1sec, 7min ); + print sort(b, myfunc); } diff --git a/testing/btest/bifs/system_env.bro b/testing/btest/bifs/system_env.bro index d8e54a8709..26e40b883f 100644 --- a/testing/btest/bifs/system_env.bro +++ b/testing/btest/bifs/system_env.bro @@ -1,6 +1,6 @@ # # @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff testfile event bro_init() { @@ -11,10 +11,12 @@ event bro_init() if ( |myvar| != 0 ) exit(1); - local a = system_env("echo $TESTBRO > out", vars); + # check if command runs with the env. variable defined + local a = system_env("echo $TESTBRO > testfile", vars); if ( a != 0 ) exit(1); + # make sure the env. variable is still not set myvar = getenv("TESTBRO"); if ( |myvar| != 0 ) exit(1); From 2c62b98b5bbf2621abfd85cfdc02c3b5a2ef193f Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 31 May 2012 15:19:11 -0500 Subject: [PATCH 337/651] Improve tests for to_port and type_name BIFs --- testing/btest/Baseline/bifs.to_port/out | 1 + testing/btest/Baseline/bifs.type_name/out | 6 +++ testing/btest/bifs/to_port.bro | 1 + testing/btest/bifs/type_name.bro | 48 ++++++++++++++--------- 4 files changed, 37 insertions(+), 19 deletions(-) diff --git a/testing/btest/Baseline/bifs.to_port/out b/testing/btest/Baseline/bifs.to_port/out index fb8a536abb..79796d605e 100644 --- a/testing/btest/Baseline/bifs.to_port/out +++ b/testing/btest/Baseline/bifs.to_port/out @@ -1,6 +1,7 @@ 123/tcp 123/udp 123/icmp +0/unknown 256/tcp 256/udp 256/icmp diff --git a/testing/btest/Baseline/bifs.type_name/out b/testing/btest/Baseline/bifs.type_name/out index 610ee304fd..2c5cb408f9 100644 --- a/testing/btest/Baseline/bifs.type_name/out +++ b/testing/btest/Baseline/bifs.type_name/out @@ -1,5 +1,6 @@ string count +int double bool time @@ -18,3 +19,8 @@ set[string] table[count] of string table[string] of count record { c:count; s:string; } +function(aa:int; bb:int;) : bool +function() : any +function() : void +file of string +event() diff --git a/testing/btest/bifs/to_port.bro b/testing/btest/bifs/to_port.bro index 39a0cbed6b..382bf5d333 100644 --- a/testing/btest/bifs/to_port.bro +++ b/testing/btest/bifs/to_port.bro @@ -7,6 +7,7 @@ event bro_init() print to_port("123/tcp"); print to_port("123/udp"); print to_port("123/icmp"); + print to_port("not a port"); local a: transport_proto = tcp; local b: transport_proto = udp; diff --git a/testing/btest/bifs/type_name.bro b/testing/btest/bifs/type_name.bro index a8c51ef69d..531962e3f5 100644 --- a/testing/btest/bifs/type_name.bro +++ b/testing/btest/bifs/type_name.bro @@ -13,24 +13,29 @@ event bro_init() { local a = "foo"; local b = 3; - local c = 3.14; - local d = T; - local e = current_time(); - local f = 5hr; - local g = /^foo|bar/; - local h = Blue; - local i = 123/tcp; - local j = 192.168.0.2; - local k = [fe80::1]; - local l = 192.168.0.0/16; - local m = [fe80:1234::]/32; - local n = vector( 1, 2, 3); - local o = vector( "bro", "test"); - local p = set( 1, 2, 3); - local q = set( "this", "test"); - local r: table[count] of string = { [1] = "test", [2] = "bro" }; - local s: table[string] of count = { ["a"] = 5, ["b"] = 3 }; - local t: myrecord = [ $c = 2, $s = "another test" ]; + local c = -3; + local d = 3.14; + local e = T; + local f = current_time(); + local g = 5hr; + local h = /^foo|bar/; + local i = Blue; + local j = 123/tcp; + local k = 192.168.0.2; + local l = [fe80::1]; + local m = 192.168.0.0/16; + local n = [fe80:1234::]/32; + local o = vector( 1, 2, 3); + local p: vector of string = vector( "bro", "test" ); + local q = set( 1, 2, 3); + local r = set( "this", "test"); + local s: table[count] of string = { [1] = "test", [2] = "bro" }; + local t: table[string] of count = { ["a"] = 5, ["b"] = 3 }; + local u: myrecord = [ $c = 2, $s = "another test" ]; + local v = function(aa: int, bb: int): bool { return aa < bb; }; + local w = function(): any { }; + local x = function() { }; + local y = open("deleteme"); print type_name(a); print type_name(b); @@ -52,5 +57,10 @@ event bro_init() print type_name(r); print type_name(s); print type_name(t); - + print type_name(u); + print type_name(v); + print type_name(w); + print type_name(x); + print type_name(y); + print type_name(bro_init); } From eeb1609768063478f85064c9be55b1f46ede7bf7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 31 May 2012 15:32:28 -0500 Subject: [PATCH 338/651] Change Input::update_finished lookup to happen at init time. Also going through the internal_handler() function will set the event as "used" (i.e. it's marked as being raised somewhere) and fixes the core.check-unused-event-handlers test failure (addresses #823). --- src/input/Manager.cc | 7 ++----- src/input/Manager.h | 2 ++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index bc79a2390b..f35071081b 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -194,6 +194,7 @@ Manager::TableStream::~TableStream() Manager::Manager() { + update_finished = internal_handler("Input::update_finished"); } Manager::~Manager() @@ -1199,11 +1200,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) #endif // Send event that the current update is indeed finished. - EventHandler* handler = event_registry->Lookup("Input::update_finished"); - if ( handler == 0 ) - reporter->InternalError("Input::update_finished not found!"); - - SendEvent(handler, 2, new StringVal(i->name.c_str()), new StringVal(i->source.c_str())); + SendEvent(update_finished, 2, new StringVal(i->name.c_str()), new StringVal(i->source.c_str())); } void Manager::Put(ReaderFrontend* reader, Value* *vals) diff --git a/src/input/Manager.h b/src/input/Manager.h index 269e562e23..400918366e 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -184,6 +184,8 @@ private: enum StreamType { TABLE_STREAM, EVENT_STREAM }; map readers; + + EventHandlerPtr update_finished; }; From 85e29a9471c297bae07046428db7f38aa792ddf8 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 31 May 2012 14:26:25 -0700 Subject: [PATCH 339/651] Documentation --- doc/input.rst | 437 +++++++++++++++++++++++++++----------- src/input/ReaderBackend.h | 13 +- 2 files changed, 327 insertions(+), 123 deletions(-) diff --git a/doc/input.rst b/doc/input.rst index d9fe8aa6b8..2038ccb22f 100644 --- a/doc/input.rst +++ b/doc/input.rst @@ -1,92 +1,345 @@ -===================== -Loading Data into Bro -===================== +============================================== +Loading Data into Bro with the Input Framework +============================================== -.. rst-class:: opening - - Bro comes with a flexible input interface that allows to read - previously stored data. Data is either read into bro tables or - sent to scripts using events. - This document describes how the input framework can be used. +Bro now features a flexible input frameworks that allows users +to import data into Bro. Data is either read into Bro tables or +converted to events which can then be handled by scripts. + +The input framework is merged into the git master and we +will give a short summary on how to use it. +The input framework is automatically compiled and installed +together with Bro. The interface to it is exposed via the +scripting layer. +This document gives the most common examples. For more complex +scenarios it is worthwhile to take a look at the unit tests in +``testing/btest/scripts/base/frameworks/input/``. + .. contents:: -Terminology -=========== +Reading Data into Tables +======================== -Bro's input framework is built around three main abstracts, that are -very similar to the abstracts used in the logging framework: +Probably the most interesting use-case of the input framework is to +read data into a Bro table. - Input Streams - An input stream corresponds to a single input source - (usually a textfile). It defined the information necessary - to find the source (e.g. the filename), the reader that it used - to get data from it (see below). - It also defines exactly what data is read from the input source. - There are two different kind of streams, event streams and table - streams. - By default, event streams generate an event for each line read - from the input source. - Table streams on the other hand read the input source in a bro - table for easy later access. +By default, the input framework reads the data in the same format +as it is written by the logging framework in Bro - a tab-separated +ASCII file. - Readers - A reader defines the input format for the specific input stream. - At the moment, Bro comes with two types of reader. The default reader is READER_ASCII, - which can read the tab seperated ASCII logfiles that were generated by the - logging framework. - READER_RAW can files containing records separated by a character(like e.g. newline) and send - one event per line. +We will show the ways to read files into Bro with a simple example. +For this example we assume that we want to import data from a blacklist +that contains server IP addresses as well as the timestamp and the reason +for the block. +An example input file could look like this: -Event Streams -============= +:: -For examples, please look at the unit tests in -``testing/btest/scripts/base/frameworks/input/``. + #fields ip timestamp reason + 192.168.17.1 1333252748 Malware host + 192.168.27.2 1330235733 Botnet server + 192.168.250.3 1333145108 Virus detected -Event Streams are streams that generate an event for each line in of the input source. +To read a file into a Bro table, two record types have to be defined. +One contains the types and names of the columns that should constitute the +table keys and the second contains the types and names of the columns that +should constitute the table values. -For example, a simple stream retrieving the fields ``i`` and ``b`` from an inputSource -could be defined as follows: +In our case, we want to be able to lookup IPs. Hence, our key record +only contains the server IP. All other elements should be stored as +the table content. + +The two records are defined as: .. code:: bro - type Val: record { - i: int; - b: bool; + type Idx: record { + ip: addr; }; - event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) { - # work with event data + type Val: record { + timestamp: time; + reason: string; + }; + +Not ethat the record definition has to contain the same names as the fields +line in the log file. + +The log file is read into the table with a simple call of the add_table function: + +.. code:: bro + + global blacklist: table[addr] of Val = table(); + + Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist]); + Input::remove("blacklist"); + +With these three lines we first create an empty table that should contain the +blacklist data and then instruct the Input framework to open an input stream +named ``blacklist`` to read the data into the table. The third line removes the +input stream again, because we do not need it any more after the data has been +read. + +Because some data files can - potentially - be rather big, the input framework +works asynchronously. A new thread is created for each new input stream. +This thread opens the input data file, converts the data into a Bro format and +sends it back to the main Bro thread. + +Because of this, the data is not immediately accessible. Depending on the +size of the data source it might take from a few milliseconds up to a few seconds +until all data is present in the table. + +Subsequent calls to an input source are queued until the previous action has been +completed. Because of this, it is, for example, possible to call ``add_table`` and +``remove`` in two subsequent lines: the ``remove`` action will remain queued until +the first read has been completed. + +Once the input framework finishes reading from a data source, it fires the ``update_finished`` +event. Once this event has been received all data from the input file is available +in the table. + +.. code:: bro + + event Input::update_finished(name: string, source: string) { + # now all data is in the table + print blacklist; } + +The table can also already be used while the data is still being read - it just might +not contain all lines in the input file when the event has not yet fired. After it has +been populated it can be used like any other Bro table and blacklist entries easily be +tested: + +.. code:: bro + + if ( 192.168.18.12 in blacklist ) + # take action + + +Re-reading and streaming data +----------------------------- + +For many data sources, like for many blacklists, the source data is continually +changing. For this cases, the Bro input framework supports several ways to +deal with changing data files. + +The first, very basic method is an explicit refresh of an input stream. When an input +stream is open, the function ``force_update`` can be called. This will trigger +a complete refresh of the table; any changed elements from the file will be updated. +After the update is finished the ``update_finished`` event will be raised. + +In our example the call would look like: + +.. code:: bro + + Input::force_update("blacklist"); + +The input framework also supports two automatic refresh mode. The first mode +continually checks if a file has been changed. If the file has been changed, it +is re-read and the data in the Bro table is updated to reflect the current state. +Each time a change has been detected and all the new data has been read into the +table, the ``update_finished`` event is raised. + +The second mode is a streaming mode. This mode assumes that the source data file +is an append-only file to which new data is continually appended. Bro continually +checks for new data at the end of the file and will add the new data to the table. +If newer lines in the file have the same index as previous lines, they will overwrite +the values in the output table. +Because of the nature of streaming reads (data is continually added to the table), +the ``update_finished`` event is never raised when using streaming reads. + +The reading mode can be selected by setting the ``mode`` option of the add_table call. +Valid values are ``MANUAL`` (the default), ``REREAD`` and ``STREAM``. + +Hence, when using adding ``$mode=Input::REREAD`` to the previous example, the blacklists +table will always reflect the state of the blacklist input file. + +.. code:: bro + + Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD]); + +Receiving change events +----------------------- + +When re-reading files, it might be interesting to know exactly which lines in the source +files have changed. + +For this reason, the input framework can raise an event each time when a data item is added to, +removed from or changed in a table. + +The event definition looks like this: + +.. code:: bro + + event entry(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { + # act on values + } + +The event has to be specified in ``$ev`` in the ``add_table`` call: + +.. code:: bro + + Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD, $ev=entry]); + +The ``description`` field of the event contains the arguments that were originally supplied to the add_table call. +Hence, the name of the stream can, for example, be accessed with ``description$name``. ``tpe`` is an enum containing +the type of the change that occurred. + +It will contain ``Input::EVENT_NEW``, when a line that was not previously been +present in the table has been added. In this case ``left`` contains the Index of the added table entry and ``right`` contains +the values of the added entry. + +If a table entry that already was present is altered during the re-reading or streaming read of a file, ``tpe`` will contain +``Input::EVENT_CHANGED``. In this case ``left`` contains the Index of the changed table entry and ``right`` contains the +values of the entry before the change. The reason for this is, that the table already has been updated when the event is +raised. The current value in the table can be ascertained by looking up the current table value. Hence it is possible to compare +the new and the old value of the table. + +``tpe`` contains ``Input::REMOVED``, when a table element is removed because it was no longer present during a re-read. +In this case ``left`` contains the index and ``right`` the values of the removed element. + + +Filtering data during import +---------------------------- + +The input framework also allows a user to filter the data during the import. To this end, predicate functions are used. A predicate +function is called before a new element is added/changed/removed from a table. The predicate can either accept or veto +the change by returning true for an accepted change and false for an rejected change. Furthermore, it can alter the data +before it is written to the table. + +The following example filter will reject to add entries to the table when they were generated over a month ago. It +will accept all changes and all removals of values that are already present in the table. + +.. code:: bro + + Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD, + $pred(typ: Input::Event, left: Idx, right: Val) = { + if ( typ != Input::EVENT_NEW ) { + return T; + } + return ( ( current_time() - right$timestamp ) < (30 day) ); + }]); + +To change elements while they are being imported, the predicate function can manipulate ``left`` and ``right``. Note +that predicate functions are called before the change is committed to the table. Hence, when a table element is changed ( ``tpe`` +is ``INPUT::EVENT_CHANGED`` ), ``left`` and ``right`` contain the new values, but the destination (``blacklist`` in our example) +still contains the old values. This allows predicate functions to examine the changes between the old and the new version before +deciding if they should be allowed. + +Different readers +----------------- + +The input framework supports different kinds of readers for different kinds of source data files. At the moment, the default +reader reads ASCII files formatted in the Bro log-file-format (tab-separated values). At the moment, Bro comes with two +other readers. The ``RAW`` reader reads a file that is split by a specified record separator (usually newline). The contents +are returned line-by-line as strings; it can, for example, be used to read configuration files and the like and is probably +only useful in the event mode and not for reading data to tables. + +Another included reader is the ``BENCHMARK`` reader, which is being used to optimize the speed of the input framework. It +can generate arbitrary amounts of semi-random data in all Bro data types supported by the input framework. + +In the future, the input framework will get support for new data sources like, for example, different databases. + +Add_table options +----------------- + +This section lists all possible options that can be used for the add_table function and gives +a short explanation of their use. Most of the options already have been discussed in the +previous sections. + +The possible fields that can be set for an table stream are: - event bro_init { - Input::add_event([$source="input.log", $name="input", $fields=Val, $ev=line]); - } - -The fields that can be set for an event stream are: - - ``want_record`` - Boolean value, that defines if the event wants to receive the fields inside of - a single record value, or individually (default). - ``source`` A mandatory string identifying the source of the data. For the ASCII reader this is the filename. - ``reader`` + ``name`` + A mandatory name for the filter that can later be used + to manipulate it further. + + ``idx`` + Record type that defines the index of the table + + ``val`` + Record type that defines the values of the table + + ``reader`` The reader used for this stream. Default is ``READER_ASCII``. ``mode`` The mode in which the stream is opened. Possible values are ``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``. ``MANUAL`` means, that the files is not updated after it has been read. Changes to the file will not - be reflected in the data bro knows. + be reflected in the data Bro knows. ``REREAD`` means that the whole file is read again each time a change is found. This should be used for files that are mapped to a table where individual lines can change. ``STREAM`` means that the data from the file is streamed. Events / table entries will be generated as new data is added to the file. + + ``destination`` + The destination table + + ``ev`` + Optional event that is raised, when values are added to, changed in or deleted from the table. + Events are passed an Input::Event description as the first argument, the index record as the second argument + and the values as the third argument. + + ``pred`` + Optional predicate, that can prevent entries from being added to the table and events from being sent. + + ``want_record`` + Boolean value, that defines if the event wants to receive the fields inside of + a single record value, or individually (default). + This can be used, if ``val`` is a record containing only one type. In this case, + if ``want_record`` is set to false, the table will contain elements of the type + contained in ``val``. + +Reading data to events +====================== + +The second supported mode of the input framework is reading data to Bro events instead +of reading them to a table using event streams. + +Event streams work very similarly to table streams that were already discussed in much +detail. To read the blacklist of the previous example into an event stream, the following +Bro code could be used: + +Event Streams are streams that generate an event for each line in of the input source. + +For example, a simple stream retrieving the fields ``i`` and ``b`` from an input Source +could be defined as follows: + +.. code:: bro + + type Val: record { + ip: addr; + timestamp: time; + reason: string; + }; + + event blacklistentry(description: Input::EventDescription, tpe: Input::Event, ip: addr, timestamp: time, reason: string) { + # work with event data + } + + event bro_init() { + Input::add_event([$source="blacklist.file", $name="blacklist", $fields=Val, $ev=blacklistentry]); + } + + +The main difference in the declaration of the event stream is, that an event stream needs no +separate index and value declarations -- instead, all source data types are provided in a single +record definition. + +Apart from this, event streams work exactly the same as table streams and support most of the options +that are also supported for table streams. + +The options that can be set for when creating an event stream with ``add_event`` are: + + ``source`` + A mandatory string identifying the source of the data. + For the ASCII reader this is the filename. ``name`` A mandatory name for the stream that can later be used @@ -102,82 +355,26 @@ The fields that can be set for an event stream are: followed by the data, either inside of a record (if ``want_record is set``) or as individual fields. The Input::Event structure can contain information, if the received line is ``NEW``, has - been ``CHANGED`` or ``DELETED``. Singe the ascii reader cannot track this information + been ``CHANGED`` or ``DELETED``. Singe the ASCII reader cannot track this information for event filters, the value is always ``NEW`` at the moment. - - - -Table Streams -============= - -Table streams are the second, more complex type of input streams. - -Table streams store the information they read from an input source in a bro table. For example, -when reading a file that contains ip addresses and connection attemt information one could use -an approach similar to this: - -.. code:: bro - - type Idx: record { - a: addr; - }; - - type Val: record { - tries: count; - }; - - global conn_attempts: table[addr] of count = table(); - - event bro_init { - Input::add_table([$source="input.txt", $name="input", $idx=Idx, $val=Val, $destination=conn_attempts]); - } - -The table conn_attempts will then contain the information about connection attemps. - -The possible fields that can be set for an table stream are: - - ``want_record`` - Boolean value, that defines if the event wants to receive the fields inside of - a single record value, or individually (default). - - ``source`` - A mandatory string identifying the source of the data. - For the ASCII reader this is the filename. - - ``reader`` - The reader used for this stream. Default is ``READER_ASCII``. - + ``mode`` The mode in which the stream is opened. Possible values are ``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``. ``MANUAL`` means, that the files is not updated after it has been read. Changes to the file will not - be reflected in the data bro knows. + be reflected in the data Bro knows. ``REREAD`` means that the whole file is read again each time a change is found. This should be used for files that are mapped to a table where individual lines can change. ``STREAM`` means that the data from the file is streamed. Events / table entries will be generated as new data is added to the file. - ``name`` - A mandatory name for the filter that can later be used - to manipulate it further. - - ``idx`` - Record type that defines the index of the table - - ``val`` - Record type that defines the values of the table + ``reader`` + The reader used for this stream. Default is ``READER_ASCII``. ``want_record`` - Defines if the values of the table should be stored as a record (default), - or as a simple value. Has to be set if Val contains more than one element. + Boolean value, that defines if the event wants to receive the fields inside of + a single record value, or individually (default). If this is set to true, the + event will receive a single record of the type provided in ``fields``. - ``destination`` - The destination table - ``ev`` - Optional event that is raised, when values are added to, changed in or deleted from the table. - Events are passed an Input::Event description as the first argument, the index record as the second argument - and the values as the third argument. - ``pred`` - Optional predicate, that can prevent entries from being added to the table and events from being sent. diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 8b5e7d674b..877e0bfbf1 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -15,17 +15,24 @@ namespace input { */ enum ReaderMode { /** - * TODO Bernhard. + * Manual refresh reader mode. The reader will read the file once, + * and send all read data back to the manager. After that, no automatic + * refresh should happen. Manual refreshes can be triggered from the + * scripting layer using force_update. */ MODE_MANUAL, /** - * TODO Bernhard. + * Automatic rereading mode. The reader should monitor the + * data source for changes continually. When the data source changes, + * either the whole file has to be resent using the SendEntry/EndCurrentSend functions. */ MODE_REREAD, /** - * TODO Bernhard. + * Streaming reading mode. The reader should monitor the data source + * for new appended data. When new data is appended is has to be sent + * using the Put api functions. */ MODE_STREAM }; From c5ae0715008b92bd6600f92dfa2f30ef39c4a482 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 31 May 2012 14:27:50 -0700 Subject: [PATCH 340/651] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broccoli b/aux/broccoli index 07866915a1..4e17842743 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 07866915a1450ddd25b888917f494b4824b0cc3f +Subproject commit 4e17842743fef8df6abf0588c7ca86c6937a2b6d From 3b2ac75e913b9db75c2308c4f00f1b784df43091 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 1 Jun 2012 12:36:30 -0500 Subject: [PATCH 341/651] Deprecate the parse_dotted_addr BIF (use to_addr instead) --- src/bro.bif | 27 +++++++------------ .../btest/Baseline/bifs.parse_dotted_addr/out | 2 -- testing/btest/bifs/parse_dotted_addr.bro | 9 ------- 3 files changed, 10 insertions(+), 28 deletions(-) delete mode 100644 testing/btest/Baseline/bifs.parse_dotted_addr/out delete mode 100644 testing/btest/bifs/parse_dotted_addr.bro diff --git a/src/bro.bif b/src/bro.bif index e1521adee8..6a79a1ac9d 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -2670,7 +2670,7 @@ function to_port%(s: string%): port ## ## Returns: The IP address corresponding to *s*. ## -## .. bro:see:: addr_to_ptr_name parse_dotted_addr +## .. bro:see:: addr_to_ptr_name to_addr function ptr_name_to_addr%(s: string%): addr %{ if ( s->Len() != 72 ) @@ -2734,27 +2734,12 @@ function ptr_name_to_addr%(s: string%): addr ## ## Returns: The reverse pointer representation of *a*. ## -## .. bro:see:: ptr_name_to_addr parse_dotted_addr +## .. bro:see:: ptr_name_to_addr to_addr function addr_to_ptr_name%(a: addr%): string %{ return new StringVal(a->AsAddr().PtrName().c_str()); %} -# Transforms n0.n1.n2.n3 -> addr. - -## Converts a decimal dotted IP address in a :bro:type:`string` to an -## :bro:type:`addr` type. -## -## s: The IP address in the form ``n0.n1.n2.n3``. -## -## Returns: The IP address as type :bro:type:`addr`. -## -## .. bro:see:: addr_to_ptr_name parse_dotted_addr -function parse_dotted_addr%(s: string%): addr - %{ - IPAddr a(s->CheckString()); - return new AddrVal(a); - %} %%{ static Val* parse_port(const char* line) @@ -5659,6 +5644,14 @@ function match_signatures%(c: connection, pattern_type: int, s: string, # # =========================================================================== +## Deprecated. Will be removed. +function parse_dotted_addr%(s: string%): addr + %{ + IPAddr a(s->CheckString()); + return new AddrVal(a); + %} + + %%{ #include "Anon.h" %%} diff --git a/testing/btest/Baseline/bifs.parse_dotted_addr/out b/testing/btest/Baseline/bifs.parse_dotted_addr/out deleted file mode 100644 index 1a09fd45a5..0000000000 --- a/testing/btest/Baseline/bifs.parse_dotted_addr/out +++ /dev/null @@ -1,2 +0,0 @@ -192.168.0.2 -1234::1 diff --git a/testing/btest/bifs/parse_dotted_addr.bro b/testing/btest/bifs/parse_dotted_addr.bro deleted file mode 100644 index 6fdba26452..0000000000 --- a/testing/btest/bifs/parse_dotted_addr.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print parse_dotted_addr("192.168.0.2"); - print parse_dotted_addr("1234::1"); - } From 19e3f2ee56d9987ee9996a1ab1936d97aee5b731 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 1 Jun 2012 13:12:55 -0500 Subject: [PATCH 342/651] Fix documentation for system_env BIF --- src/bro.bif | 9 +++++---- testing/btest/Baseline/bifs.system_env/testfile | 2 +- testing/btest/bifs/system_env.bro | 6 +++--- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index 6a79a1ac9d..0b880de379 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -466,17 +466,18 @@ function system%(str: string%): int ## ## str: The command to execute. ## -## env: A :bro:type:`set` or :bro:type:`table` with the environment variables -## in the form of key-value pairs (where the value is optional). +## env: A :bro:type:`table` with the environment variables in the form +## of key-value pairs. Each specified environment variable name +## will be automatically prepended with ``BRO_ARG_``. ## ## Returns: The return value from the OS ``system`` function. ## ## .. bro:see:: system str_shell_escape piped_exec -function system_env%(str: string, env: any%): int +function system_env%(str: string, env: table_string_of_string%): int %{ if ( env->Type()->Tag() != TYPE_TABLE ) { - builtin_error("system_env() requires a table/set argument"); + builtin_error("system_env() requires a table argument"); return new Val(-1, TYPE_INT); } diff --git a/testing/btest/Baseline/bifs.system_env/testfile b/testing/btest/Baseline/bifs.system_env/testfile index 8b13789179..31e0fce560 100644 --- a/testing/btest/Baseline/bifs.system_env/testfile +++ b/testing/btest/Baseline/bifs.system_env/testfile @@ -1 +1 @@ - +helloworld diff --git a/testing/btest/bifs/system_env.bro b/testing/btest/bifs/system_env.bro index 26e40b883f..23928e9b10 100644 --- a/testing/btest/bifs/system_env.bro +++ b/testing/btest/bifs/system_env.bro @@ -7,17 +7,17 @@ event bro_init() local vars: table[string] of string = { ["TESTBRO"] = "helloworld" }; # make sure the env. variable is not set - local myvar = getenv("TESTBRO"); + local myvar = getenv("BRO_ARG_TESTBRO"); if ( |myvar| != 0 ) exit(1); # check if command runs with the env. variable defined - local a = system_env("echo $TESTBRO > testfile", vars); + local a = system_env("echo $BRO_ARG_TESTBRO > testfile", vars); if ( a != 0 ) exit(1); # make sure the env. variable is still not set - myvar = getenv("TESTBRO"); + myvar = getenv("BRO_ARG_TESTBRO"); if ( |myvar| != 0 ) exit(1); } From dd4dd0ca6ea0adf94a9cbc87de6e322c34365508 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 1 Jun 2012 14:10:23 -0500 Subject: [PATCH 343/651] Add @load-sigs directive for loading signature files (addresses #551). --- doc/signatures.rst | 19 ++++++++++------- scripts/base/frameworks/dpd/main.bro | 3 +-- scripts/base/init-bare.bro | 4 +++- scripts/base/protocols/http/file-ident.bro | 3 ++- .../policy/protocols/http/detect-webapps.bro | 3 ++- scripts/site/local.bro | 2 +- src/Net.cc | 1 + src/Net.h | 1 + src/main.cc | 4 ++++ src/scan.l | 16 ++++++++++++++ testing/btest/Baseline/core.load-sigs/output | 3 +++ testing/btest/core/load-sigs.bro | 21 +++++++++++++++++++ 12 files changed, 67 insertions(+), 13 deletions(-) create mode 100644 testing/btest/Baseline/core.load-sigs/output create mode 100644 testing/btest/core/load-sigs.bro diff --git a/doc/signatures.rst b/doc/signatures.rst index 7a1b164dbb..f65215eceb 100644 --- a/doc/signatures.rst +++ b/doc/signatures.rst @@ -51,13 +51,18 @@ This script contains a default event handler that raises :bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices ` (as well as others; see the beginning of the script). -As signatures are independent of Bro's policy scripts, they are put -into their own file(s). There are two ways to specify which files -contain signatures: By using the ``-s`` flag when you invoke Bro, or -by extending the Bro variable :bro:id:`signature_files` using the ``+=`` -operator. If a signature file is given without a path, it is searched -along the normal ``BROPATH``. The default extension of the file name -is ``.sig``, and Bro appends that automatically when necessary. +As signatures are independent of Bro's policy scripts, they are put into +their own file(s). There are three ways to specify which files contain +signatures: By using the ``-s`` flag when you invoke Bro, or by +extending the Bro variable :bro:id:`signature_files` using the ``+=`` +operator, or by using the ``@load-sigs`` directive inside a Bro script. +If a signature file is given without a full path, it is searched for +along the normal ``BROPATH``. Additionally, the ``@load-sigs`` +directive can be used to load signature files in a path relative to the +Bro script in which it's placed, e.g. ``@load-sigs ./mysigs.sig`` will +expect that signature file in the same directory as the Bro script. The +default extension of the file name is ``.sig``, and Bro appends that +automatically when necessary. Signature language ================== diff --git a/scripts/base/frameworks/dpd/main.bro b/scripts/base/frameworks/dpd/main.bro index 9eb0b467f8..a5349b6cfb 100644 --- a/scripts/base/frameworks/dpd/main.bro +++ b/scripts/base/frameworks/dpd/main.bro @@ -3,8 +3,7 @@ module DPD; -## Add the DPD signatures to the signature framework. -redef signature_files += "base/frameworks/dpd/dpd.sig"; +@load-sigs ./dpd.sig export { ## Add the DPD logging stream identifier. diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index da2b742725..c35acd525d 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -615,7 +615,9 @@ function add_signature_file(sold: string, snew: string): string } ## Signature files to read. Use ``redef signature_files += "foo.sig"`` to -## extend. Signature files will be searched relative to ``BROPATH``. +## extend. Signature files added this way will be searched relative to +## ``BROPATH``. Using the ``@load-sigs`` directive instead is preferred +## since that can search paths relative to the current script. global signature_files = "" &add_func = add_signature_file; ## ``p0f`` fingerprint file to use. Will be searched relative to ``BROPATH``. diff --git a/scripts/base/protocols/http/file-ident.bro b/scripts/base/protocols/http/file-ident.bro index f2cb9d19ac..b493f02bf0 100644 --- a/scripts/base/protocols/http/file-ident.bro +++ b/scripts/base/protocols/http/file-ident.bro @@ -6,7 +6,8 @@ @load ./utils # Add the magic number signatures to the core signature set. -redef signature_files += "base/protocols/http/file-ident.sig"; +@load-sigs ./file-ident.sig + # Ignore the signatures used to match files redef Signatures::ignored_ids += /^matchfile-/; diff --git a/scripts/policy/protocols/http/detect-webapps.bro b/scripts/policy/protocols/http/detect-webapps.bro index 796da5c29a..fb805bfd33 100644 --- a/scripts/policy/protocols/http/detect-webapps.bro +++ b/scripts/policy/protocols/http/detect-webapps.bro @@ -4,9 +4,10 @@ @load base/frameworks/software @load base/protocols/http +@load-sigs ./detect-webapps.sig + module HTTP; -redef signature_files += "protocols/http/detect-webapps.sig"; # Ignore the signatures used to match webapps redef Signatures::ignored_ids += /^webapp-/; diff --git a/scripts/site/local.bro b/scripts/site/local.bro index 9681f7a75c..db1a786839 100644 --- a/scripts/site/local.bro +++ b/scripts/site/local.bro @@ -25,7 +25,7 @@ redef Software::vulnerable_versions += { @load frameworks/software/version-changes # This adds signatures to detect cleartext forward and reverse windows shells. -redef signature_files += "frameworks/signatures/detect-windows-shells.sig"; +@load-sigs frameworks/signatures/detect-windows-shells # Uncomment the following line to begin receiving (by default hourly) emails # containing all of your notices. diff --git a/src/Net.cc b/src/Net.cc index 5bfae2275b..328998b011 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -69,6 +69,7 @@ PktSrc* current_pktsrc = 0; IOSource* current_iosrc; std::list files_scanned; +std::vector sig_files; RETSIGTYPE watchdog(int /* signo */) { diff --git a/src/Net.h b/src/Net.h index 9e68cc025b..5b959d1688 100644 --- a/src/Net.h +++ b/src/Net.h @@ -111,5 +111,6 @@ struct ScannedFile { }; extern std::list files_scanned; +extern std::vector sig_files; #endif diff --git a/src/main.cc b/src/main.cc index 9e9c867714..b1d0a4d723 100644 --- a/src/main.cc +++ b/src/main.cc @@ -838,6 +838,10 @@ int main(int argc, char** argv) if ( *s ) rule_files.append(s); + // Append signature files defined in @load-sigs + for ( size_t i = 0; i < sig_files.size(); ++i ) + rule_files.append(copy_string(sig_files[i].c_str())); + if ( rule_files.length() > 0 ) { rule_matcher = new RuleMatcher(RE_level); diff --git a/src/scan.l b/src/scan.l index 30d521c6bd..645ce659cd 100644 --- a/src/scan.l +++ b/src/scan.l @@ -358,6 +358,22 @@ when return TOK_WHEN; (void) load_files(new_file); } +@load-sigs{WS}{FILE} { + const char* new_sig_file = skip_whitespace(yytext + 10); + const char* full_filename = 0; + FILE* f = search_for_file(new_sig_file, "sig", &full_filename, false, 0); + + if ( f ) + { + sig_files.push_back(full_filename); + fclose(f); + delete [] full_filename; + } + else + reporter->Error("failed to find file associated with @load-sigs %s", + new_sig_file); + } + @unload{WS}{FILE} { // Skip "@unload". const char* new_file = skip_whitespace(yytext + 7); diff --git a/testing/btest/Baseline/core.load-sigs/output b/testing/btest/Baseline/core.load-sigs/output new file mode 100644 index 0000000000..2a22b47ad4 --- /dev/null +++ b/testing/btest/Baseline/core.load-sigs/output @@ -0,0 +1,3 @@ +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +works +GET /images/wikimedia-button.png HTTP/1.1^M^JHost: meta.wikimedia.org^M^JUser-Agent: Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Geck... diff --git a/testing/btest/core/load-sigs.bro b/testing/btest/core/load-sigs.bro new file mode 100644 index 0000000000..3e08338f2c --- /dev/null +++ b/testing/btest/core/load-sigs.bro @@ -0,0 +1,21 @@ +# A test of signature loading using @load-sigs. + +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +@load-sigs ./subdir/mysigs.sig + +event signature_match(state: signature_state, msg: string, data: string) + { + print state$conn$id; + print msg; + print data; + } + +@TEST-START-FILE subdir/mysigs.sig +signature my-sig { +ip-proto == tcp +payload /GET \/images/ +event "works" +} +@TEST-END-FILE From 2eddaffc9020ffd8e692aaf7099ae9afb97cb699 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 1 Jun 2012 14:24:42 -0500 Subject: [PATCH 344/651] Fix documentation of sort BIF and add more tests --- src/bro.bif | 9 ++-- testing/btest/Baseline/bifs.sort/out | 16 ++++++- testing/btest/bifs/sort.bro | 65 +++++++++++++++++++++++++--- 3 files changed, 80 insertions(+), 10 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index 0b880de379..1ecfbb0e10 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1423,12 +1423,15 @@ bool indirect_int_sort_function(int a, int b) ## Sorts a vector in place. The second argument is a comparison function that ## takes two arguments: if the vector type is ``vector of T``, then the -## comparison function must be ``function(a: T, b: T): bool``, which returns -## ``a < b`` for some type-specific notion of the less-than operator. +## comparison function must be ``function(a: T, b: T): int``, which returns +## a value less than zero if ``a < b`` for some type-specific notion of the +## less-than operator. The comparison function is optional if the type +## is an integral type (int, count, etc.). ## ## v: The vector instance to sort. ## -## Returns: The original vector. +## Returns: The vector, sorted from minimum to maximum value. If the vector +## could not be sorted, then the original vector is returned instead. ## ## .. bro:see:: order function sort%(v: any, ...%) : any diff --git a/testing/btest/Baseline/bifs.sort/out b/testing/btest/Baseline/bifs.sort/out index de4baf1254..fed75265b9 100644 --- a/testing/btest/Baseline/bifs.sort/out +++ b/testing/btest/Baseline/bifs.sort/out @@ -1,2 +1,16 @@ [2, 3, 5, 8] -[5.0 hrs, 1.0 sec, 7.0 mins] +[2, 3, 5, 8] +[-7.0 mins, 1.0 sec, 5.0 hrs, 2.0 days] +[-7.0 mins, 1.0 sec, 5.0 hrs, 2.0 days] +[F, F, T, T] +[F, F, T, T] +[57/tcp, 123/tcp, 7/udp, 500/udp, 12/icmp] +[57/tcp, 123/tcp, 7/udp, 500/udp, 12/icmp] +[3.03, 3.01, 3.02, 3.015] +[3.03, 3.01, 3.02, 3.015] +[192.168.123.200, 10.0.0.157, 192.168.0.3] +[192.168.123.200, 10.0.0.157, 192.168.0.3] +[10.0.0.157, 192.168.0.3, 192.168.123.200] +[10.0.0.157, 192.168.0.3, 192.168.123.200] +[3.01, 3.015, 3.02, 3.03] +[3.01, 3.015, 3.02, 3.03] diff --git a/testing/btest/bifs/sort.bro b/testing/btest/bifs/sort.bro index 162e4a4504..14aa286021 100644 --- a/testing/btest/bifs/sort.bro +++ b/testing/btest/bifs/sort.bro @@ -2,16 +2,69 @@ # @TEST-EXEC: bro %INPUT >out # @TEST-EXEC: btest-diff out -function myfunc(aa: interval, bb: interval): bool +function myfunc1(a: addr, b: addr): int { - return aa < bb; + local x = addr_to_counts(a); + local y = addr_to_counts(b); + if (x[0] < y[0]) + return -1; + else + return 1; + } + +function myfunc2(a: double, b: double): int + { + if (a < b) + return -1; + else + return 1; } event bro_init() { - local a = vector( 5, 2, 8, 3 ); - print sort(a); + # Tests without supplying a comparison function - local b = vector( 5hr, 1sec, 7min ); - print sort(b, myfunc); + local a1 = vector( 5, 2, 8, 3 ); + local b1 = sort(a1); + print a1; + print b1; + + local a2: vector of interval = vector( 5hr, 2days, 1sec, -7min ); + local b2 = sort(a2); + print a2; + print b2; + + local a3: vector of bool = vector( T, F, F, T ); + local b3 = sort(a3); + print a3; + print b3; + + local a4: vector of port = vector( 12/icmp, 123/tcp, 500/udp, 7/udp, 57/tcp ); + local b4 = sort(a4); + print a4; + print b4; + + # this one is expected to fail (i.e., "sort" doesn't sort the vector) + local a5: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); + local b5 = sort(a5); + print a5; + print b5; + + # this one is expected to fail (i.e., "sort" doesn't sort the vector) + local a6: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); + local b6 = sort(a6); + print a6; + print b6; + + # Tests with a comparison function + + local c1: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); + local d1 = sort(c1, myfunc1); + print c1; + print d1; + + local c2: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); + local d2 = sort(c2, myfunc2); + print c2; + print d2; } From 2b0db0187442bddbb7b5ee0262e2d27142103aa1 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 1 Jun 2012 15:50:49 -0500 Subject: [PATCH 345/651] Improve test cases for "order" BIF Also fixed some comments and error messages in the "order" BIF. --- src/bro.bif | 9 +++--- testing/btest/Baseline/bifs.order/out | 8 ++++- testing/btest/bifs/order.bro | 45 +++++++++++++++++++++++---- 3 files changed, 50 insertions(+), 12 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index 1ecfbb0e10..3df4430746 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1526,13 +1526,13 @@ function order%(v: any, ...%) : index_vec } if ( ! comp && ! IsIntegral(elt_type->Tag()) ) - builtin_error("comparison function required for sort() with non-integral types"); + builtin_error("comparison function required for order() with non-integral types"); vector& vv = *v->AsVector(); int n = vv.size(); // Set up initial mapping of indices directly to corresponding - // elements. We stay zero-based until after the sorting. + // elements. vector ind_vv(n); index_map = new Val*[n]; int i; @@ -1548,7 +1548,7 @@ function order%(v: any, ...%) : index_vec if ( comp_type->YieldType()->Tag() != TYPE_INT || ! comp_type->ArgTypes()->AllMatch(elt_type, 0) ) { - builtin_error("invalid comparison function in call to sort()"); + builtin_error("invalid comparison function in call to order()"); return v; } @@ -1562,8 +1562,7 @@ function order%(v: any, ...%) : index_vec delete [] index_map; index_map = 0; - // Now spin through ind_vv to read out the rearrangement, - // adjusting indices as we do so. + // Now spin through ind_vv to read out the rearrangement. for ( i = 0; i < n; ++i ) { int ind = ind_vv[i]; diff --git a/testing/btest/Baseline/bifs.order/out b/testing/btest/Baseline/bifs.order/out index 65e5adb492..e77fbd310c 100644 --- a/testing/btest/Baseline/bifs.order/out +++ b/testing/btest/Baseline/bifs.order/out @@ -1,2 +1,8 @@ +[5, 2, 8, 3] [1, 3, 0, 2] -[5.0 hrs, 1.0 sec, 7.0 mins] +[5.0 hrs, 2.0 days, 1.0 sec, -7.0 mins] +[3, 2, 0, 1] +[192.168.123.200, 10.0.0.157, 192.168.0.3] +[1, 2, 0] +[3.03, 3.01, 3.02, 3.015] +[1, 3, 2, 0] diff --git a/testing/btest/bifs/order.bro b/testing/btest/bifs/order.bro index a39a64e6a3..5f3260ee3f 100644 --- a/testing/btest/bifs/order.bro +++ b/testing/btest/bifs/order.bro @@ -2,16 +2,49 @@ # @TEST-EXEC: bro %INPUT >out # @TEST-EXEC: btest-diff out -function myfunc(aa: interval, bb: interval): bool +function myfunc1(a: addr, b: addr): int { - return aa < bb; + local x = addr_to_counts(a); + local y = addr_to_counts(b); + if (x[0] < y[0]) + return -1; + else + return 1; + } + +function myfunc2(a: double, b: double): int + { + if (a < b) + return -1; + else + return 1; } event bro_init() { - local a = vector( 5, 2, 8, 3 ); - print order(a); + # TODO: these results don't make sense + + # Tests without supplying a comparison function - local b = vector( 5hr, 1sec, 7min ); - print order(b, myfunc); + local a1 = vector( 5, 2, 8, 3 ); + local b1 = order(a1); + print a1; + print b1; + + local a2: vector of interval = vector( 5hr, 2days, 1sec, -7min ); + local b2 = order(a2); + print a2; + print b2; + + # Tests with a comparison function + + local c1: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); + local d1 = order(c1, myfunc1); + print c1; + print d1; + + local c2: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); + local d2 = order(c2, myfunc2); + print c2; + print d2; } From 6292083b743b9dc34086abb3e2e0991c2d292acd Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 1 Jun 2012 16:23:40 -0500 Subject: [PATCH 346/651] Improve tests of the type_name BIF --- testing/btest/Baseline/bifs.type_name/out | 4 ++-- testing/btest/bifs/type_name.bro | 17 +++++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/testing/btest/Baseline/bifs.type_name/out b/testing/btest/Baseline/bifs.type_name/out index 2c5cb408f9..901dca227d 100644 --- a/testing/btest/Baseline/bifs.type_name/out +++ b/testing/btest/Baseline/bifs.type_name/out @@ -15,9 +15,9 @@ subnet vector vector set[count] -set[string] +set[port,string] table[count] of string -table[string] of count +table[string] of table[addr,port] of string record { c:count; s:string; } function(aa:int; bb:int;) : bool function() : any diff --git a/testing/btest/bifs/type_name.bro b/testing/btest/bifs/type_name.bro index 531962e3f5..162d4825f5 100644 --- a/testing/btest/bifs/type_name.bro +++ b/testing/btest/bifs/type_name.bro @@ -26,11 +26,16 @@ event bro_init() local m = 192.168.0.0/16; local n = [fe80:1234::]/32; local o = vector( 1, 2, 3); - local p: vector of string = vector( "bro", "test" ); + local p: vector of table[count] of string = vector( + table( [1] = "test", [2] = "bro" ), + table( [1] = "another", [2] = "test" ) ); local q = set( 1, 2, 3); - local r = set( "this", "test"); + local r: set[port, string] = set( [21/tcp, "ftp"], [23/tcp, "telnet"] ); local s: table[count] of string = { [1] = "test", [2] = "bro" }; - local t: table[string] of count = { ["a"] = 5, ["b"] = 3 }; + local t: table[string] of table[addr, port] of string = { + ["a"] = table( [192.168.0.2, 21/tcp] = "ftp", + [192.168.0.3, 80/tcp] = "http" ), + ["b"] = table( [192.168.0.2, 22/tcp] = "ssh" ) }; local u: myrecord = [ $c = 2, $s = "another test" ]; local v = function(aa: int, bb: int): bool { return aa < bb; }; local w = function(): any { }; @@ -51,8 +56,8 @@ event bro_init() print type_name(l); print type_name(m); print type_name(n); - print type_name(o); - print type_name(p); + print type_name(o); # TODO: result is just "vector" + print type_name(p); # TODO: result is just "vector" print type_name(q); print type_name(r); print type_name(s); @@ -61,6 +66,6 @@ event bro_init() print type_name(v); print type_name(w); print type_name(x); - print type_name(y); + print type_name(y); # TODO: result is "file of string" print type_name(bro_init); } From 6e5eb187dd722429c191ebb394d5816e59dca34b Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 1 Jun 2012 18:11:46 -0500 Subject: [PATCH 347/651] Improve "fmt" BIF documentation comment Also update test cases for fmt. --- src/bro.bif | 5 ++-- testing/btest/Baseline/bifs.fmt/out | 11 +++++-- testing/btest/bifs/fmt.bro | 45 +++++++++++++++++++---------- 3 files changed, 42 insertions(+), 19 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index 3df4430746..f154fd0f86 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1652,7 +1652,7 @@ function cat_sep%(sep: string, def: string, ...%): string ## ## - ``.``: Precision of floating point specifiers ``[efg]`` (< 128) ## -## - ``A``: Escape NUL bytes, i.e., replace ``0`` with ``\0`` +## - ``A``: Escape only NUL bytes (each one replaced with ``\0``) in a string ## ## - ``[DTdxsefg]``: Format specifier ## @@ -1664,7 +1664,8 @@ function cat_sep%(sep: string, def: string, ...%): string ## - ``x``: Unsigned hexadecimal (using C-style ``%llx``); ## addresses/ports are converted to host-byte order ## -## - ``s``: Escaped string +## - ``s``: String (byte values less than 32 or greater than 126 +## will be escaped) ## ## - ``[efg]``: Double ## diff --git a/testing/btest/Baseline/bifs.fmt/out b/testing/btest/Baseline/bifs.fmt/out index 6422294a39..2a28bf333a 100644 --- a/testing/btest/Baseline/bifs.fmt/out +++ b/testing/btest/Baseline/bifs.fmt/out @@ -44,5 +44,12 @@ test 310.000 310 310 -this\0test -this\0test +2 +3 +4 +2 +2 +6 +2 +2 +6 diff --git a/testing/btest/bifs/fmt.bro b/testing/btest/bifs/fmt.bro index bb2740d127..8a30abd199 100644 --- a/testing/btest/bifs/fmt.bro +++ b/testing/btest/bifs/fmt.bro @@ -6,16 +6,13 @@ type color: enum { Red, Blue }; event bro_init() { - local a = "foo"; - local b = 3; - local c = T; - local d = Blue; - local e = vector( 1, 2, 3); - local f = set( 1, 2, 3); - local g: table[count] of string = { [1] = "test", [2] = "bro" }; - local h = "this\0test"; + local a = Blue; + local b = vector( 1, 2, 3); + local c = set( 1, 2, 3); + local d: table[count] of string = { [1] = "test", [2] = "bro" }; - #print fmt(c, b, a); # this should work, according to doc comments + # TODO: this should work, according to doc comments + #print fmt(T, 3, "foo"); # tests with only a format string (no additional args) print fmt("test"); @@ -57,10 +54,10 @@ event bro_init() print fmt("*%10s*", [fe80:1234::]/32); print fmt("*%10s*", 3hr); print fmt("*%10s*", /^foo|bar/); + print fmt("*%10s*", a); + print fmt("*%10s*", b); + print fmt("*%10s*", c); print fmt("*%10s*", d); - print fmt("*%10s*", e); - print fmt("*%10s*", f); - print fmt("*%10s*", g); # tests of various data types without field width print fmt("%e", 3.1e+2); @@ -71,8 +68,26 @@ event bro_init() print fmt("%.3g", 3.1e+2); print fmt("%.7g", 3.1e+2); - # these produce same result - print fmt("%As", h); - print fmt("%s", h); + # Tests comparing "%As" and "%s" (the string length is printed instead + # of the string itself because the print command does its own escaping) + local s0 = "\x00\x07"; + local s1 = fmt("%As", s0); # expands \x00 to "\0" + local s2 = fmt("%s", s0); # expands \x00 to "\0", and \x07 to "^G" + print |s0|; + print |s1|; + print |s2|; + s0 = "\x07\x1f"; + s1 = fmt("%As", s0); + s2 = fmt("%s", s0); # expands \x07 to "^G", and \x1f to "\x1f" + print |s0|; + print |s1|; + print |s2|; + + s0 = "\x7f\xff"; + s1 = fmt("%As", s0); + s2 = fmt("%s", s0); # expands \x7f to "^?", and \xff to "\xff" + print |s0|; + print |s1|; + print |s2|; } From 95f000738bc8f04559b0f3d8ba98ae369a9c640c Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 12:40:09 -0400 Subject: [PATCH 348/651] ElasticSearch log writer implementation test 1 - writes out JSON to file instead of sending it over HTTP for now. --- src/logging/Manager.cc | 9 + src/logging/writers/ElasticSearch.cc | 242 +++++++++++++++++++++++++++ src/logging/writers/ElasticSearch.h | 67 ++++++++ 3 files changed, 318 insertions(+) create mode 100644 src/logging/writers/ElasticSearch.cc create mode 100644 src/logging/writers/ElasticSearch.h diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index baf832e6a9..d338ac97f8 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -17,6 +17,10 @@ #include "writers/Ascii.h" #include "writers/None.h" +#ifdef USE_ELASTICSEARCH +#include "writers/ElasticSearch.h" +#endif + #ifdef USE_DATASERIES #include "writers/DataSeries.h" #endif @@ -35,6 +39,11 @@ struct WriterDefinition { WriterDefinition log_writers[] = { { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, + +#ifdef USE_ELASTICSEARCH + { BifEnum::Log::WRITER_ASCII, "ElasticSearch", 0, writer::ElasticSearch::Instantiate }, +#endif + #ifdef USE_DATASERIES { BifEnum::Log::WRITER_DATASERIES, "DataSeries", 0, writer::DataSeries::Instantiate }, #endif diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc new file mode 100644 index 0000000000..eb83f26542 --- /dev/null +++ b/src/logging/writers/ElasticSearch.cc @@ -0,0 +1,242 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "config.h" + +#ifdef USE_ELASTICSEARCH + +#include +#include + +#include "util.h" + +#include "NetVar.h" +#include "threading/SerialTypes.h" + +#include "ElasticSearch.h" + +using namespace logging; +using namespace writer; +using threading::Value; +using threading::Field; + +#define MAX_EVENT_SIZE 1024 + +ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) + { + cluster_name_len = BifConst::LogElasticSearch::cluster_name->Len(); + cluster_name = new char[cluster_name_len]; + memcpy(cluster_name, BifConst::LogElasticSearch::cluster_name->Bytes(), cluster_name_len); + + server_host_len = BifConst::LogElasticSearch::server_host->Len(); + server_host = new char[server_host_len]; + memcpy(server_host, BifConst::LogElasticSearch::server_host->Bytes(), server_host_len); + + index_name_len = BifConst::LogElasticSearch::index_name->Len(); + index_name = new char[index_name_len]; + memcpy(index_name, BifConst::LogElasticSearch::index_name->Bytes(), index_name_len); + + type_prefix_len = BifConst::LogElasticSearch::type_prefix->Len(); + type_prefix = new char[type_prefix_len]; + memcpy(type_prefix, BifConst::LogElasticSearch::type_prefix->Bytes(), type_prefix_len); + + server_port = BifConst::LogElasticSearch::server_port; + batch_size = BifConst::LogElasticSearch::batch_size; + + buffer = safe_malloc(MAX_EVENT_SIZE * batch_size); + current_offset = 0; + buffer[current_offset] = "\0"; + counter = 0; + } + +ElasticSearch::~ElasticSearch() + { + delete [] cluster_name; + delete [] server_host; + delete [] index_name; + delete [] type_prefix; + delete [] buffer; + } + +bool ElasticSearch::DoInit(string path, int num_fields, const Field* const * fields) + { + //TODO: Determine what, if anything, needs to be done here. + return true; + } + +bool ElasticSearch::DoFlush() + { + //TODO: Send flush command to ElasticSearch + return true; + } + +bool ElasticSearch::DoFinish() + { + return WriterBackend::DoFinish(); + } + +char* ElasticSearch::FormatField(const char* field_name, const char* field_value) +{ + char* result = new char[MAX_EVENT_SIZE]; + strcpy(result, "\""); + strcpy(result, field_name); + strcpy(result, "\":\""); + strcpy(result, field_value); + strcpy(result, "\""); + return result; + +} + +bool ElasticSearch::BatchIndex() +{ + file = fopen("/tmp/batch.test", 'w'); + fwrite(buffer, current_offset, 1, file); + fclose(file); + file = 0; +} + +char* ElasticSearch::AddFieldToBuffer(Value* val, const Field* field) + { + if ( ! val->present ) + { + return ""; + } + + switch ( val->type ) { + + case TYPE_BOOL: + return FormatField(field->name, val->val.int_val ? "T" : "F"); + + case TYPE_INT: + return FormatField(field->name, val->val.int_val); + + case TYPE_COUNT: + case TYPE_COUNTER: + return FormatField(field->name, val->val.uint_val); + + case TYPE_PORT: + return FormatField(field->name, val->val.port_val.port); + + case TYPE_SUBNET: + return FormatField(field->name, Render(val->val.subnet_val)); + + case TYPE_ADDR: + return FormatField(field->name, Render(val->val.addr_val)); + + case TYPE_INTERVAL: + case TYPE_TIME: + case TYPE_DOUBLE: + return FormatField(field->name, val->val.double_val); + + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_FUNC: + { + int size = val->val.string_val->size(); + const char* data = val->val.string_val->data(); + + if ( ! size ) + return ""; + return FormatField(field->name, val->val.string_val->data()); + } + + case TYPE_TABLE: + { + if ( ! val->val.set_val.size ) + return ""; + + char* tmp = new char[MAX_EVENT_SIZE]; + strcpy(tmp, "{"); + for ( int j = 0; j < val->val.set_val.size; j++ ) + { + char* result = AddFieldToBuffer(val->val.set_val.vals[j], field); + bool resultSeen = false; + if ( result ){ + if ( resultSeen ) + strcpy(tmp, ","); + strcpy(tmp, result); + } + } + return FormatField(field->name, tmp); + } + + case TYPE_VECTOR: + { + if ( ! val->val.vector_val.size ) + return ""; + + char* tmp = new char[MAX_EVENT_SIZE]; + strcpy(tmp, "{"); + for ( int j = 0; j < val->val.vector_val.size; j++ ) + { + char* result = AddFieldToBuffer(val->val.vector_val.vals[j], field); + bool resultSeen = false; + if ( result ){ + if ( resultSeen ) + strcpy(tmp, ","); + strcpy(tmp, result); + } + } + return FormatField(field->name, tmp); + } + + default: + return ""; + } + + } + +bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, + Value** vals) + { + // Our action line looks like: + // {"index":"$index_name","type":"$type_prefix$path"}\n{ + + bool resultSeen = false; + + for ( int i = 0; i < num_fields; i++ ) + { + char* result = DoWriteOne(vals[i], fields[i]); + if ( result ) { + if ( ! resultSeen ) { + strcpy(buffer[current_offset], "{\"index\":\""); + strcat(buffer[current_offset], index_name); + strcat(buffer[current_offset], "\",\"type\":\""); + strcat(buffer[current_offset], type_prefix); + strcat(buffer[current_offset], Path()); + strcat(buffer[current_offset], "\"}\n{"); + current_offset = strlen(buffer); + resultSeen = true; + } + else { + strcat(buffer[current_offset], ","); + current_offset += 1; + } + strcat(buffer[current_offset], result); + current_offset += strlen(result); + } + } + + if ( resultSeen ) { + strcat(buffer[current_offset], "}\n"); + current_offset += 2; + counter += 1; + if ( counter >= batch_size ) + BatchIndex(); + } + return true; + } + +bool ElasticSearch::DoRotate(string rotated_path, double open, double close, bool terminating) + { + //TODO: Determine what, if anything, needs to be done here. + return true; + } + +bool ElasticSearch::DoSetBuf(bool enabled) + { + // Nothing to do. + return true; + } + +#endif diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h new file mode 100644 index 0000000000..870290a6e0 --- /dev/null +++ b/src/logging/writers/ElasticSearch.h @@ -0,0 +1,67 @@ +// See the file "COPYING" in the main distribution directory for copyright. +// +// Log writer for writing to an ElasticSearch database + +#ifndef LOGGING_WRITER_ELASTICSEARCH_H +#define LOGGING_WRITER_ELASTICSEARCH_H + +#include "../WriterBackend.h" + +namespace logging { namespace writer { + +class ElasticSearch : public WriterBackend { +public: + ElasticSearch(WriterFrontend* frontend); + ~ElasticSearch(); + + static WriterBackend* Instantiate(WriterFrontend* frontend) + { return new ElasticSearch(frontend); } + static string LogExt(); + +protected: + // Overidden from WriterBackend. + + virtual bool DoInit(string path, int num_fields, + const threading::Field* const * fields); + + virtual bool DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals); + virtual bool DoSetBuf(bool enabled); + virtual bool DoRotate(string rotated_path, double open, + double close, bool terminating); + virtual bool DoFlush(); + virtual bool DoFinish(); + +private: + char* AddFieldToBuffer(threading::Value* val, const threading::Field* field); + char* FormatField(const char* field_name, const char* field_value); + bool BatchIndex(); + + char* buffer; + int current_offset; + int counter; + + // From scripts + char* cluster_name; + int cluster_name_len; + + char* server_host; + int server_host_len; + + uint64 server_port; + + char* index_name; + int index_name_len; + + char* type_prefix; + int type_prefix_len; + + uint64 batch_size; + +}; + +} +} + + +#endif From 7bee0b0d8e91d1b8c09934c671ebb7cd9607cdca Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 22:07:20 -0400 Subject: [PATCH 349/651] Added sending messages to ElasticSearch over HTTP. --- CMakeLists.txt | 10 + config.h.in | 3 + configure | 5 + scripts/base/frameworks/logging/__load__.bro | 1 + .../logging/writers/elasticsearch.bro | 25 ++ src/CMakeLists.txt | 1 + src/logging.bif | 11 + src/logging/Manager.cc | 6 +- src/logging/writers/ElasticSearch.cc | 317 +++++++++++------- src/logging/writers/ElasticSearch.h | 13 +- src/types.bif | 1 + 11 files changed, 266 insertions(+), 127 deletions(-) create mode 100644 scripts/base/frameworks/logging/writers/elasticsearch.bro diff --git a/CMakeLists.txt b/CMakeLists.txt index 28b702ab01..404cdfeeb5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -122,6 +122,14 @@ if (LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) list(APPEND OPTLIBS ${LibXML2_LIBRARIES}) endif() +set(USE_LIBCURL false) +find_package(CURL) +if (CURL_FOUND) + set(USE_LIBCURL true) + include_directories(BEFORE ${CURL_INCLUDE_DIR}) + list(APPEND OPTLIBS ${CURL_LIBRARIES}) +endif() + if (ENABLE_PERFTOOLS_DEBUG) # Just a no op to prevent CMake from complaining about manually-specified # ENABLE_PERFTOOLS_DEBUG not being used if google perftools weren't found @@ -209,11 +217,13 @@ message( "\nBroccoli: ${INSTALL_BROCCOLI}" "\nBroctl: ${INSTALL_BROCTL}" "\nAux. Tools: ${INSTALL_AUX_TOOLS}" + "\nElasticSearch: ${INSTALL_ELASTICSEARCH}" "\n" "\nGeoIP: ${USE_GEOIP}" "\nGoogle perftools: ${USE_PERFTOOLS}" "\n debugging: ${USE_PERFTOOLS_DEBUG}" "\nDataSeries: ${USE_DATASERIES}" + "\nlibCURL: ${USE_LIBCURL}" "\n" "\n================================================================\n" ) diff --git a/config.h.in b/config.h.in index c2cb3ec1dc..66121cefbf 100644 --- a/config.h.in +++ b/config.h.in @@ -117,6 +117,9 @@ /* Use the DataSeries writer. */ #cmakedefine USE_DATASERIES +/* Build the ElasticSearch writer. */ +#cmakedefine INSTALL_ELASTICSEARCH + /* Version number of package */ #define VERSION "@VERSION@" diff --git a/configure b/configure index 3258d4abfc..7ea5613a6d 100755 --- a/configure +++ b/configure @@ -35,6 +35,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --disable-auxtools don't build or install auxiliary tools --disable-python don't try to build python bindings for broccoli --disable-ruby don't try to build ruby bindings for broccoli + --enable-elasticsearch build the elasticsearch writer Required Packages in Non-Standard Locations: --with-openssl=PATH path to OpenSSL install root @@ -98,6 +99,7 @@ append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false +append_cache_entry INSTALL_ELASTICSEARCH BOOL false append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true @@ -156,6 +158,9 @@ while [ $# -ne 0 ]; do --disable-auxtools) append_cache_entry INSTALL_AUX_TOOLS BOOL false ;; + --enable-elasticsearch) + append_cache_entry INSTALL_ELASTICSEARCH BOOL true + ;; --disable-python) append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true ;; diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.bro index 17e03e2ef7..7dafc45397 100644 --- a/scripts/base/frameworks/logging/__load__.bro +++ b/scripts/base/frameworks/logging/__load__.bro @@ -2,3 +2,4 @@ @load ./postprocessors @load ./writers/ascii @load ./writers/dataseries +@load ./writers/elasticsearch \ No newline at end of file diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro new file mode 100644 index 0000000000..82dbcc43d4 --- /dev/null +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -0,0 +1,25 @@ +module LogElasticSearch; + +export { + ## Name of the ES cluster + const cluster_name = "elasticsearch" &redef; + + ## ES Server + const server_host = "127.0.0.1" &redef; + + ## ES Port + const server_port = 9200 &redef; + + ## Name of the ES index + const index_name = "bro-logs" &redef; + + ## The ES type prefix comes before the name of the related log. + ## e.g. prefix = "bro_" would create types of bro_dns, bro_software, etc. + const type_prefix = "" &redef; + + ## The batch size is the number of messages that will be queued up before + ## they are sent to be bulk indexed. + ## Note: this is mainly a memory usage parameter. + const batch_size = 10000 &redef; +} + diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6a68d1e7c5..fbbb01fd22 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -419,6 +419,7 @@ set(bro_SRCS logging/WriterFrontend.cc logging/writers/Ascii.cc logging/writers/DataSeries.cc + logging/writers/ElasticSearch.cc logging/writers/None.cc input/Manager.cc diff --git a/src/logging.bif b/src/logging.bif index efc6ed0b4b..308ea78b7a 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -81,3 +81,14 @@ const extent_size: count; const dump_schema: bool; const use_integer_for_time: bool; const num_threads: count; + +# Options for the ElasticSearch writer. + +module LogElasticSearch; + +const cluster_name: string; +const server_host: string; +const server_port: count; +const index_name: string; +const type_prefix: string; +const batch_size: count; diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index d338ac97f8..ddfed0f70f 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -17,7 +17,7 @@ #include "writers/Ascii.h" #include "writers/None.h" -#ifdef USE_ELASTICSEARCH +#ifdef INSTALL_ELASTICSEARCH #include "writers/ElasticSearch.h" #endif @@ -40,8 +40,8 @@ WriterDefinition log_writers[] = { { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, -#ifdef USE_ELASTICSEARCH - { BifEnum::Log::WRITER_ASCII, "ElasticSearch", 0, writer::ElasticSearch::Instantiate }, +#ifdef INSTALL_ELASTICSEARCH + { BifEnum::Log::WRITER_ELASTICSEARCH, "ElasticSearch", 0, writer::ElasticSearch::Instantiate }, #endif #ifdef USE_DATASERIES diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index eb83f26542..61f3734f87 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -2,7 +2,7 @@ #include "config.h" -#ifdef USE_ELASTICSEARCH +#ifdef INSTALL_ELASTICSEARCH #include #include @@ -12,6 +12,9 @@ #include "NetVar.h" #include "threading/SerialTypes.h" +#include +#include + #include "ElasticSearch.h" using namespace logging; @@ -24,28 +27,35 @@ using threading::Field; ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) { cluster_name_len = BifConst::LogElasticSearch::cluster_name->Len(); - cluster_name = new char[cluster_name_len]; + cluster_name = new char[cluster_name_len + 1]; memcpy(cluster_name, BifConst::LogElasticSearch::cluster_name->Bytes(), cluster_name_len); + cluster_name[cluster_name_len] = 0; server_host_len = BifConst::LogElasticSearch::server_host->Len(); - server_host = new char[server_host_len]; + server_host = new char[server_host_len + 1]; memcpy(server_host, BifConst::LogElasticSearch::server_host->Bytes(), server_host_len); + server_host[server_host_len] = 0; index_name_len = BifConst::LogElasticSearch::index_name->Len(); - index_name = new char[index_name_len]; + index_name = new char[index_name_len + 1]; memcpy(index_name, BifConst::LogElasticSearch::index_name->Bytes(), index_name_len); + index_name[index_name_len] = 0; type_prefix_len = BifConst::LogElasticSearch::type_prefix->Len(); - type_prefix = new char[type_prefix_len]; + type_prefix = new char[type_prefix_len + 1]; memcpy(type_prefix, BifConst::LogElasticSearch::type_prefix->Bytes(), type_prefix_len); + type_prefix[type_prefix_len] = 0; server_port = BifConst::LogElasticSearch::server_port; batch_size = BifConst::LogElasticSearch::batch_size; - buffer = safe_malloc(MAX_EVENT_SIZE * batch_size); + buffer = (char *)safe_malloc(MAX_EVENT_SIZE * batch_size); current_offset = 0; - buffer[current_offset] = "\0"; + buffer[current_offset] = 0; counter = 0; + + curl_handle = HTTPSetup(); + curl_result = new char[1024]; } ElasticSearch::~ElasticSearch() @@ -74,115 +84,129 @@ bool ElasticSearch::DoFinish() return WriterBackend::DoFinish(); } -char* ElasticSearch::FormatField(const char* field_name, const char* field_value) -{ - char* result = new char[MAX_EVENT_SIZE]; - strcpy(result, "\""); - strcpy(result, field_name); - strcpy(result, "\":\""); - strcpy(result, field_value); - strcpy(result, "\""); - return result; - -} - bool ElasticSearch::BatchIndex() { - file = fopen("/tmp/batch.test", 'w'); - fwrite(buffer, current_offset, 1, file); - fclose(file); - file = 0; + return HTTPSend(); +} + +char* ElasticSearch::FieldToString(Value* val, const Field* field) +{ + char* result = new char[MAX_EVENT_SIZE]; + + switch ( val->type ) { + + // ElasticSearch defines bools as: 0 == false, everything else == true. So we treat it as an int. + case TYPE_BOOL: + case TYPE_INT: + sprintf(result, "%d", (int) val->val.int_val); return result; + + case TYPE_COUNT: + case TYPE_COUNTER: + sprintf(result, "%d", (int) val->val.uint_val); return result; + + case TYPE_PORT: + sprintf(result, "%d", (int) val->val.port_val.port); return result; + + case TYPE_SUBNET: + sprintf(result, "\"%s\"", Render(val->val.subnet_val).c_str()); return result; + + case TYPE_ADDR: + sprintf(result, "\"%s\"", Render(val->val.addr_val).c_str()); return result; + + case TYPE_INTERVAL: + case TYPE_TIME: + sprintf(result, "\"%d\"", (int) (val->val.double_val * 1000)); return result; + case TYPE_DOUBLE: + sprintf(result, "\"%s\"", Render(val->val.double_val).c_str()); return result; + + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_FUNC: + { + int size = val->val.string_val->size(); + const char* data = val->val.string_val->data(); + + if ( ! size ) + return 0; + sprintf(result, "\"%s\"", data); return result; + } + + case TYPE_TABLE: + { + char* tmp = new char[MAX_EVENT_SIZE]; + int tmp_offset = 0; + strcpy(tmp, "{"); + tmp_offset = 1; + bool result_seen = false; + for ( int j = 0; j < val->val.set_val.size; j++ ) + { + char* sub_field = FieldToString(val->val.set_val.vals[j], field); + if ( sub_field ){ + + if ( result_seen ){ + strcpy(tmp + tmp_offset, ","); + tmp_offset += 1; + } + else + result_seen = true; + + sprintf(tmp + tmp_offset, "\"%s\":%s", field->name.c_str(), sub_field); + tmp_offset = strlen(tmp); + } + } + strcpy(tmp + tmp_offset, "}"); + tmp_offset += 1; + sprintf(result, "%s", tmp); + return result; + } + + case TYPE_VECTOR: + { + char* tmp = new char[MAX_EVENT_SIZE]; + int tmp_offset = 0; + strcpy(tmp, "{"); + tmp_offset = 1; + bool result_seen = false; + for ( int j = 0; j < val->val.vector_val.size; j++ ) + { + char* sub_field = FieldToString(val->val.vector_val.vals[j], field); + if ( sub_field ){ + + if ( result_seen ){ + strcpy(tmp + tmp_offset, ","); + tmp_offset += 1; + } + else + result_seen = true; + + sprintf(tmp + tmp_offset, "\"%s\":%s", field->name.c_str(), sub_field); + tmp_offset = strlen(tmp); + } + } + strcpy(tmp + tmp_offset, "}"); + tmp_offset += 1; + sprintf(result, "%s", tmp); + return result; + } + + default: + { + return (char *)"{}"; + } + + } + } char* ElasticSearch::AddFieldToBuffer(Value* val, const Field* field) { if ( ! val->present ) - { - return ""; - } - - switch ( val->type ) { - - case TYPE_BOOL: - return FormatField(field->name, val->val.int_val ? "T" : "F"); - - case TYPE_INT: - return FormatField(field->name, val->val.int_val); - - case TYPE_COUNT: - case TYPE_COUNTER: - return FormatField(field->name, val->val.uint_val); - - case TYPE_PORT: - return FormatField(field->name, val->val.port_val.port); - - case TYPE_SUBNET: - return FormatField(field->name, Render(val->val.subnet_val)); - - case TYPE_ADDR: - return FormatField(field->name, Render(val->val.addr_val)); - - case TYPE_INTERVAL: - case TYPE_TIME: - case TYPE_DOUBLE: - return FormatField(field->name, val->val.double_val); - - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_FUNC: - { - int size = val->val.string_val->size(); - const char* data = val->val.string_val->data(); - - if ( ! size ) - return ""; - return FormatField(field->name, val->val.string_val->data()); - } - - case TYPE_TABLE: - { - if ( ! val->val.set_val.size ) - return ""; - - char* tmp = new char[MAX_EVENT_SIZE]; - strcpy(tmp, "{"); - for ( int j = 0; j < val->val.set_val.size; j++ ) - { - char* result = AddFieldToBuffer(val->val.set_val.vals[j], field); - bool resultSeen = false; - if ( result ){ - if ( resultSeen ) - strcpy(tmp, ","); - strcpy(tmp, result); - } - } - return FormatField(field->name, tmp); - } - - case TYPE_VECTOR: - { - if ( ! val->val.vector_val.size ) - return ""; - - char* tmp = new char[MAX_EVENT_SIZE]; - strcpy(tmp, "{"); - for ( int j = 0; j < val->val.vector_val.size; j++ ) - { - char* result = AddFieldToBuffer(val->val.vector_val.vals[j], field); - bool resultSeen = false; - if ( result ){ - if ( resultSeen ) - strcpy(tmp, ","); - strcpy(tmp, result); - } - } - return FormatField(field->name, tmp); - } - - default: - return ""; - } + return 0; + + char* result = new char[MAX_EVENT_SIZE]; + sprintf(result, "\"%s\":%s", field->name.c_str(), FieldToString(val, field)); + return result; } @@ -190,39 +214,37 @@ bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, Value** vals) { // Our action line looks like: - // {"index":"$index_name","type":"$type_prefix$path"}\n{ + // {"index":{"_index":"$index_name","_type":"$type_prefix$path"}}\n{ bool resultSeen = false; for ( int i = 0; i < num_fields; i++ ) { - char* result = DoWriteOne(vals[i], fields[i]); + char* result = AddFieldToBuffer(vals[i], fields[i]); if ( result ) { if ( ! resultSeen ) { - strcpy(buffer[current_offset], "{\"index\":\""); - strcat(buffer[current_offset], index_name); - strcat(buffer[current_offset], "\",\"type\":\""); - strcat(buffer[current_offset], type_prefix); - strcat(buffer[current_offset], Path()); - strcat(buffer[current_offset], "\"}\n{"); - current_offset = strlen(buffer); + current_offset += sprintf(buffer + current_offset, "{\"index\":{\"_index\":\"%s\",\"_type\":\"%s%s\"}\n{", index_name, type_prefix, Path().c_str()); resultSeen = true; } else { - strcat(buffer[current_offset], ","); + strcat(buffer, ","); current_offset += 1; } - strcat(buffer[current_offset], result); + strcat(buffer, result); current_offset += strlen(result); } } if ( resultSeen ) { - strcat(buffer[current_offset], "}\n"); + strcat(buffer, "}\n"); current_offset += 2; counter += 1; - if ( counter >= batch_size ) + if ( counter >= batch_size ){ BatchIndex(); + current_offset = 0; + buffer[current_offset] = 0; + counter = 0; + } } return true; } @@ -239,4 +261,55 @@ bool ElasticSearch::DoSetBuf(bool enabled) return true; } +// HTTP Functions start here. + +CURL* ElasticSearch::HTTPSetup() +{ + char URL[2048]; + CURL* handle; + struct curl_slist *headers=NULL; + + handle = curl_easy_init(); + if ( ! handle ) + return handle; + + sprintf(URL, "http://%s:%d/_bulk", server_host, (int) server_port); + curl_easy_setopt(handle, CURLOPT_URL, URL); + + headers = curl_slist_append(NULL, "Content-Type: text/json; charset=utf-8"); + curl_easy_setopt(handle, CURLOPT_HTTPHEADER, headers); + + curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, &logging::writer::ElasticSearch::HTTPReceive); // This gets called with the result. + curl_easy_setopt(handle, CURLOPT_POST, 1); // All requests are POSTs + + // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. The best (only?) way to disable that is to + // just use HTTP 1.0 + curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); + return handle; + +} + +bool ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata){ + //TODO: Do some verification on the result? + return true; +} + +bool ElasticSearch::HTTPSend(){ + CURLcode return_code; + + curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, curl_result); + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer); + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE, current_offset); + + return_code = curl_easy_perform(curl_handle); + switch(return_code) { + case CURLE_COULDNT_CONNECT: + case CURLE_COULDNT_RESOLVE_HOST: + case CURLE_WRITE_ERROR: + return false; + default: + return true; + } +} + #endif diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h index 870290a6e0..ad3729f6da 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/ElasticSearch.h @@ -5,6 +5,7 @@ #ifndef LOGGING_WRITER_ELASTICSEARCH_H #define LOGGING_WRITER_ELASTICSEARCH_H +#include #include "../WriterBackend.h" namespace logging { namespace writer { @@ -34,12 +35,20 @@ protected: private: char* AddFieldToBuffer(threading::Value* val, const threading::Field* field); - char* FormatField(const char* field_name, const char* field_value); + char* FieldToString(threading::Value* val, const threading::Field* field); bool BatchIndex(); + CURL* HTTPSetup(); + bool HTTPReceive(void* ptr, int size, int nmemb, void* userdata); + bool HTTPSend(); + + // Buffers, etc. char* buffer; int current_offset; - int counter; + uint64 counter; + + CURL* curl_handle; + char* curl_result; // From scripts char* cluster_name; diff --git a/src/types.bif b/src/types.bif index 76bac3e0e2..9b387b2c52 100644 --- a/src/types.bif +++ b/src/types.bif @@ -163,6 +163,7 @@ enum Writer %{ WRITER_NONE, WRITER_ASCII, WRITER_DATASERIES, + WRITER_ELASTICSEARCH, %} enum ID %{ From c8e770a499b3b9457bcb9908f574c4a7b4a9ddae Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 22:51:21 -0400 Subject: [PATCH 350/651] Some basic documentation in doc/logging-elasticsearch.rst --- doc/logging-elasticsearch.rst | 97 +++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 doc/logging-elasticsearch.rst diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst new file mode 100644 index 0000000000..a3fb759c85 --- /dev/null +++ b/doc/logging-elasticsearch.rst @@ -0,0 +1,97 @@ + +======================================== +Indexed Logging Output with ElasticSearch +======================================== + +.. rst-class:: opening + + Bro's default ASCII log format is not exactly the most efficient + way for storing and searching large volumes of data. ElasticSearch + is a new and exciting technology for dealing with tons of data. + ElasticSearch is a search engine built on top of Apache's Lucene + project. It scales very well, both for distributed indexing and + distributed searching. + +.. contents:: + +Installing ElasticSearch +------------------------ + +ElasticSearch requires a JRE to run. Please download the latest version +from: . Once extracted, start +ElasticSearch with:: + +# ./bin/elasticsearch + +Compiling Bro with ElasticSearch Support +---------------------------------------- + +First, ensure that you have libcurl installed. Secondly, set the +``--enable-elasticsearch`` option:: + + # ./configure --enable-elasticsearch + [...] + ====================| Bro Build Summary |===================== + [...] + ElasticSearch: true + [...] + libCURL: true + [...] + ================================================================ + +Activating ElasticSearch +------------------------ + +The direct way to use ElasticSearch is to switch *all* log files over to +ElasticSearch. To do that, just add ``redef +Log::default_writer=Log::WRITER_ELASTICSEARCH;`` to your ``local.bro``. +For testing, you can also just pass that on the command line:: + + bro -r trace.pcap Log::default_writer=Log::WRITER_ELASTICSEARCH + +With that, Bro will now write all its output into ElasticSearch. You can +inspect these using ElasticSearch's REST-ful interface. For more +information, see: . + +There is also a rudimentary web interface to ElasticSearch, available at: +. + +You can also switch only individual files over to ElasticSearch by adding +code like this to your ``local.bro``:: + +.. code:: bro + + event bro_init() + { + local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log. + f$writer = Log::WRITER_ELASTICSEARCH; # Change writer type. + Log::add_filter(Conn::LOG, f); # Replace filter with adapted version. + } + +Configuring ElasticSearch +------------------------- + +Bro's ElasticSearch writer comes with a few configuraiton options:: + +- cluster_name:: Currently unused. +- server_host:: Where to send the data. Default localhost. +- server_port:: What port to send the data to. Default 9200. +- index_name:: ElasticSearch indexes are like databases in a standard DB model. +This is the name of the index to which to send the data. Default bro-logs. +- type_prefix:: ElasticSearch types are like tables in a standard DB model. +This is a prefix that gets prepended to Bro log names. +Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. +Default: none. +- batch_size:: How many messages to buffer before sending to ElasticSearch. +This is mainly a memory optimization - changing this doesn't seem to affect +indexing performance that much. Default: 10,000. + +TODO +---- + +Lots. + +- Perform multicast discovery for server. +- Better error detection. +- Dynamic index names. +- Better defaults (don't index loaded-plugins, for instance). From d09fc15b2d376da9bfab3c5b45eff0049589f217 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 22:54:19 -0400 Subject: [PATCH 351/651] Minor documentation formatting change --- doc/logging-elasticsearch.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index a3fb759c85..6c490a8b23 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -59,7 +59,7 @@ There is also a rudimentary web interface to ElasticSearch, available at: You can also switch only individual files over to ElasticSearch by adding code like this to your ``local.bro``:: -.. code:: bro +.. code::bro event bro_init() { From b3216906fe53e807070d6ce8587a7a01f9fea4f1 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 22:56:38 -0400 Subject: [PATCH 352/651] Minor documentation formatting change --- doc/logging-elasticsearch.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index 6c490a8b23..7a16acb0f1 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -77,14 +77,14 @@ Bro's ElasticSearch writer comes with a few configuraiton options:: - server_host:: Where to send the data. Default localhost. - server_port:: What port to send the data to. Default 9200. - index_name:: ElasticSearch indexes are like databases in a standard DB model. -This is the name of the index to which to send the data. Default bro-logs. + This is the name of the index to which to send the data. Default bro-logs. - type_prefix:: ElasticSearch types are like tables in a standard DB model. -This is a prefix that gets prepended to Bro log names. -Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. -Default: none. + This is a prefix that gets prepended to Bro log names. + Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. + Default: none. - batch_size:: How many messages to buffer before sending to ElasticSearch. -This is mainly a memory optimization - changing this doesn't seem to affect -indexing performance that much. Default: 10,000. + This is mainly a memory optimization - changing this doesn't seem to affect + indexing performance that much. Default: 10,000. TODO ---- From 360d7e2eda7e2a4b14f43021e197f6b65b28328e Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 22:59:29 -0400 Subject: [PATCH 353/651] Minor documentation formatting change --- doc/logging-elasticsearch.rst | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index 7a16acb0f1..4229748854 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -73,18 +73,12 @@ Configuring ElasticSearch Bro's ElasticSearch writer comes with a few configuraiton options:: -- cluster_name:: Currently unused. -- server_host:: Where to send the data. Default localhost. -- server_port:: What port to send the data to. Default 9200. -- index_name:: ElasticSearch indexes are like databases in a standard DB model. - This is the name of the index to which to send the data. Default bro-logs. -- type_prefix:: ElasticSearch types are like tables in a standard DB model. - This is a prefix that gets prepended to Bro log names. - Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. - Default: none. -- batch_size:: How many messages to buffer before sending to ElasticSearch. - This is mainly a memory optimization - changing this doesn't seem to affect - indexing performance that much. Default: 10,000. +- cluster_name: Currently unused. +- server_host: Where to send the data. Default localhost. +- server_port: What port to send the data to. Default 9200. +- index_name: ElasticSearch indexes are like databases in a standard DB model. This is the name of the index to which to send the data. Default bro-logs. +- type_prefix: ElasticSearch types are like tables in a standard DB model. This is a prefix that gets prepended to Bro log names. Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. Default: none. +- batch_size: How many messages to buffer before sending to ElasticSearch. This is mainly a memory optimization - changing this doesn't seem to affect indexing performance that much. Default: 10,000. TODO ---- From 3d8b86c00a7d5cb4a4dd52ef08e6d06d42ee88a3 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 23:00:10 -0400 Subject: [PATCH 354/651] Minor documentation formatting change --- doc/logging-elasticsearch.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index 4229748854..3b630c4bff 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -76,7 +76,8 @@ Bro's ElasticSearch writer comes with a few configuraiton options:: - cluster_name: Currently unused. - server_host: Where to send the data. Default localhost. - server_port: What port to send the data to. Default 9200. -- index_name: ElasticSearch indexes are like databases in a standard DB model. This is the name of the index to which to send the data. Default bro-logs. +- index_name: ElasticSearch indexes are like databases in a standard DB model. + This is the name of the index to which to send the data. Default bro-logs. - type_prefix: ElasticSearch types are like tables in a standard DB model. This is a prefix that gets prepended to Bro log names. Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. Default: none. - batch_size: How many messages to buffer before sending to ElasticSearch. This is mainly a memory optimization - changing this doesn't seem to affect indexing performance that much. Default: 10,000. From bf852b51f5bac6ba6b0dd14e4f6ab5fb73b68195 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 23:01:18 -0400 Subject: [PATCH 355/651] Minor documentation formatting change --- doc/logging-elasticsearch.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index 3b630c4bff..f891212ccd 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -76,8 +76,10 @@ Bro's ElasticSearch writer comes with a few configuraiton options:: - cluster_name: Currently unused. - server_host: Where to send the data. Default localhost. - server_port: What port to send the data to. Default 9200. + - index_name: ElasticSearch indexes are like databases in a standard DB model. This is the name of the index to which to send the data. Default bro-logs. + - type_prefix: ElasticSearch types are like tables in a standard DB model. This is a prefix that gets prepended to Bro log names. Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. Default: none. - batch_size: How many messages to buffer before sending to ElasticSearch. This is mainly a memory optimization - changing this doesn't seem to affect indexing performance that much. Default: 10,000. From 5915a2d304fd4d90f799057bfeb7425ee29e95ef Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Sun, 3 Jun 2012 23:02:07 -0400 Subject: [PATCH 356/651] Minor documentation formatting change --- doc/logging-elasticsearch.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index f891212ccd..b3cf062de4 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -71,16 +71,19 @@ code like this to your ``local.bro``:: Configuring ElasticSearch ------------------------- -Bro's ElasticSearch writer comes with a few configuraiton options:: +Bro's ElasticSearch writer comes with a few configuration options:: - cluster_name: Currently unused. + - server_host: Where to send the data. Default localhost. + - server_port: What port to send the data to. Default 9200. - index_name: ElasticSearch indexes are like databases in a standard DB model. This is the name of the index to which to send the data. Default bro-logs. - type_prefix: ElasticSearch types are like tables in a standard DB model. This is a prefix that gets prepended to Bro log names. Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. Default: none. + - batch_size: How many messages to buffer before sending to ElasticSearch. This is mainly a memory optimization - changing this doesn't seem to affect indexing performance that much. Default: 10,000. TODO From 9851591317c822a763795f63365bbfab16a65d83 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 4 Jun 2012 16:14:30 -0500 Subject: [PATCH 357/651] Tunnel support performance optimization. Looks better to allocate Encapsulation objects on-demand when tunnels are discovered rather than always have an automatic, empty one for every packet. --- src/Conn.cc | 37 +++++++++++++++++++++--- src/Conn.h | 15 +++------- src/Sessions.cc | 64 +++++++++++++++++++++++++++--------------- src/Sessions.h | 7 +++-- src/Teredo.cc | 19 ++++++------- src/Tunnels.h | 8 ++++++ src/ayiya-analyzer.pac | 25 +++++++---------- 7 files changed, 110 insertions(+), 65 deletions(-) diff --git a/src/Conn.cc b/src/Conn.cc index 53abcc26eb..ec62a1b944 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -113,7 +113,7 @@ unsigned int Connection::external_connections = 0; IMPLEMENT_SERIAL(Connection, SER_CONNECTION); Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, - uint32 flow, const Encapsulation& arg_encap) + uint32 flow, const Encapsulation* arg_encap) { sessions = s; key = k; @@ -161,7 +161,10 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, uid = 0; // Will set later. - encapsulation = arg_encap; + if ( arg_encap ) + encapsulation = new Encapsulation(arg_encap); + else + encapsulation = 0; if ( conn_timer_mgr ) { @@ -190,12 +193,38 @@ Connection::~Connection() delete key; delete root_analyzer; delete conn_timer_mgr; + delete encapsulation; --current_connections; if ( conn_timer_mgr ) --external_connections; } +void Connection::CheckEncapsulation(const Encapsulation* arg_encap) + { + if ( encapsulation && arg_encap ) + { + if ( *encapsulation != *arg_encap ) + { + Event(tunnel_changed, 0, arg_encap->GetVectorVal()); + delete encapsulation; + encapsulation = new Encapsulation(arg_encap); + } + } + else if ( encapsulation ) + { + Encapsulation empty; + Event(tunnel_changed, 0, empty.GetVectorVal()); + delete encapsulation; + encapsulation = new Encapsulation(arg_encap); + } + else if ( arg_encap ) + { + Event(tunnel_changed, 0, arg_encap->GetVectorVal()); + encapsulation = new Encapsulation(arg_encap); + } + } + void Connection::Done() { finished = 1; @@ -352,8 +381,8 @@ RecordVal* Connection::BuildConnVal() char tmp[20]; conn_val->Assign(9, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62))); - if ( encapsulation.Depth() > 0 ) - conn_val->Assign(10, encapsulation.GetVectorVal()); + if ( encapsulation && encapsulation->Depth() > 0 ) + conn_val->Assign(10, encapsulation->GetVectorVal()); } if ( root_analyzer ) diff --git a/src/Conn.h b/src/Conn.h index b7911b84fb..f2efa2971d 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -52,17 +52,10 @@ class Analyzer; class Connection : public BroObj { public: Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, - uint32 flow, const Encapsulation& arg_encap); + uint32 flow, const Encapsulation* arg_encap); virtual ~Connection(); - void CheckEncapsulation(const Encapsulation& arg_encap) - { - if ( encapsulation != arg_encap ) - { - Event(tunnel_changed, 0, arg_encap.GetVectorVal()); - encapsulation = arg_encap; - } - } + void CheckEncapsulation(const Encapsulation* arg_encap); // Invoked when connection is about to be removed. Use Ref(this) // inside Done to keep the connection object around (though it'll @@ -254,7 +247,7 @@ public: uint64 GetUID() const { return uid; } - const Encapsulation& GetEncapsulation() const + const Encapsulation* GetEncapsulation() const { return encapsulation; } void CheckFlowLabel(bool is_orig, uint32 flow_label); @@ -294,7 +287,7 @@ protected: double inactivity_timeout; RecordVal* conn_val; LoginConn* login_conn; // either nil, or this - Encapsulation encapsulation; // tunnels + const Encapsulation* encapsulation; // tunnels int suppress_event; // suppress certain events to once per conn. unsigned int installed_status_timer:1; diff --git a/src/Sessions.cc b/src/Sessions.cc index 56df65d6af..5c825218d5 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -179,8 +179,6 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, if ( record_all_packets ) DumpPacket(hdr, pkt); - Encapsulation encapsulation; - if ( pkt_elem && pkt_elem->IPHdr() ) // Fast path for "normal" IP packets if an IP_Hdr is // already extracted when doing PacketSort. Otherwise @@ -188,7 +186,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, // difference here is that header extraction in // PacketSort does not generate Weird events. - DoNextPacket(t, hdr, pkt_elem->IPHdr(), pkt, hdr_size, encapsulation); + DoNextPacket(t, hdr, pkt_elem->IPHdr(), pkt, hdr_size, 0); else { @@ -213,7 +211,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, if ( ip->ip_v == 4 ) { IP_Hdr ip_hdr(ip, false); - DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size, encapsulation); + DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size, 0); } else if ( ip->ip_v == 6 ) @@ -225,7 +223,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, } IP_Hdr ip_hdr((const struct ip6_hdr*) (pkt + hdr_size), false, caplen); - DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size, encapsulation); + DoNextPacket(t, hdr, &ip_hdr, pkt, hdr_size, 0); } else if ( ARP_Analyzer::IsARP(pkt, hdr_size) ) @@ -347,7 +345,7 @@ int NetSessions::CheckConnectionTag(Connection* conn) void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, - int hdr_size, Encapsulation& encapsulation) + int hdr_size, const Encapsulation* encapsulation) { uint32 caplen = hdr->caplen - hdr_size; const struct ip* ip4 = ip_hdr->IP4_Hdr(); @@ -525,23 +523,19 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, case IPPROTO_IPV4: case IPPROTO_IPV6: { - if ( encapsulation.Depth() >= BifConst::Tunnel::max_depth ) + if ( encapsulation && + encapsulation->Depth() >= BifConst::Tunnel::max_depth ) { reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "tunnel_depth"); Remove(f); return; } - IP_Hdr* inner_ip; - if ( proto == IPPROTO_IPV6 ) - inner_ip = new IP_Hdr((const struct ip6_hdr*) data, false, caplen); - else - inner_ip = new IP_Hdr((const struct ip*) data, false); - - struct pcap_pkthdr fake_hdr; - fake_hdr.caplen = fake_hdr.len = caplen; - fake_hdr.ts = hdr->ts; + Encapsulation* outer = new Encapsulation(encapsulation); + // Look up to see if we've already seen this IP tunnel, identified + // by the pair of IP addresses, so that we can always associate the + // same UID with it. IPPair tunnel_idx; if ( ip_hdr->SrcAddr() < ip_hdr->DstAddr() ) tunnel_idx = IPPair(ip_hdr->SrcAddr(), ip_hdr->DstAddr()); @@ -555,21 +549,22 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), BifEnum::Tunnel::IP); ip_tunnels[tunnel_idx] = ec; - encapsulation.Add(ec); + outer->Add(ec); } else - encapsulation.Add(it->second); + outer->Add(it->second); - DoNextPacket(t, &fake_hdr, inner_ip, data, 0, encapsulation); + DoNextInnerPacket(t, hdr, caplen, data, proto, outer); - delete inner_ip; + delete outer; Remove(f); return; } case IPPROTO_NONE: { - if ( encapsulation.LastType() == BifEnum::Tunnel::TEREDO ) + if ( encapsulation && + encapsulation->LastType() == BifEnum::Tunnel::TEREDO ) { // TODO: raise bubble packet event } @@ -680,6 +675,31 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } } +void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, + int caplen, const u_char* pkt, int proto, + const Encapsulation* outer_encap) + { + IP_Hdr* inner_ip = 0; + + if ( proto == IPPROTO_IPV6 ) + inner_ip = new IP_Hdr((const struct ip6_hdr*) pkt, false, caplen); + else if ( proto == IPPROTO_IPV4 ) + inner_ip = new IP_Hdr((const struct ip*) pkt, false); + else + reporter->InternalError("Bad IP protocol version in DoNextInnerPacket"); + + struct pcap_pkthdr fake_hdr; + fake_hdr.caplen = fake_hdr.len = caplen; + if ( hdr ) + fake_hdr.ts = hdr->ts; + else + fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; + + DoNextPacket(t, &fake_hdr, inner_ip, pkt, 0, outer_encap); + + delete inner_ip; + } + bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, const struct pcap_pkthdr* h, const u_char* p) { @@ -1013,7 +1033,7 @@ void NetSessions::GetStats(SessionStats& s) const Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, const u_char* data, int proto, uint32 flow_label, - const Encapsulation& encapsulation) + const Encapsulation* encapsulation) { // FIXME: This should be cleaned up a bit, it's too protocol-specific. // But I'm not yet sure what the right abstraction for these things is. diff --git a/src/Sessions.h b/src/Sessions.h index e2bc0d704e..45c1e0750a 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -136,7 +136,10 @@ public: void DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, - int hdr_size, Encapsulation& encapsulation); + int hdr_size, const Encapsulation* encapsulation); + + void DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, int caplen, + const u_char* pkt, int proto, const Encapsulation* outer_encap); unsigned int ConnectionMemoryUsage(); unsigned int ConnectionMemoryUsageConnVals(); @@ -150,7 +153,7 @@ protected: Connection* NewConn(HashKey* k, double t, const ConnID* id, const u_char* data, int proto, uint32 flow_lable, - const Encapsulation& encapsulation); + const Encapsulation* encapsulation); // Check whether the tag of the current packet is consistent with // the given connection. Returns: diff --git a/src/Teredo.cc b/src/Teredo.cc index 39ecef286f..08eb7d0d2b 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -92,7 +92,9 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, { Analyzer::DeliverPacket(len, data, orig, seq, ip, caplen); - if ( Conn()->GetEncapsulation().Depth() >= BifConst::Tunnel::max_depth ) + const Encapsulation* e = Conn()->GetEncapsulation(); + + if ( e && e->Depth() >= BifConst::Tunnel::max_depth ) { reporter->Weird(Conn(), "tunnel_depth"); return; @@ -107,20 +109,15 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, return; } - IP_Hdr inner_ip((const struct ip6_hdr*) te.InnerIP(), false, len); - ProtocolConfirmation(); // TODO: raise Teredo-specific events - struct pcap_pkthdr fake_hdr; - fake_hdr.caplen = fake_hdr.len = len; - fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; - - Encapsulation encap(Conn()->GetEncapsulation()); + Encapsulation* outer = new Encapsulation(e); EncapsulatingConn ec(Conn(), BifEnum::Tunnel::TEREDO); - encap.Add(ec); + outer->Add(ec); - sessions->DoNextPacket(network_time, &fake_hdr, &inner_ip, te.InnerIP(), 0, - encap); + sessions->DoNextInnerPacket(network_time, 0, len, te.InnerIP(), + IPPROTO_IPV6, outer); + delete outer; } diff --git a/src/Tunnels.h b/src/Tunnels.h index 0f9c4f4107..3365c8d0ca 100644 --- a/src/Tunnels.h +++ b/src/Tunnels.h @@ -78,6 +78,14 @@ public: conns = 0; } + Encapsulation(const Encapsulation* other) + { + if ( other && other->conns ) + conns = new vector(*(other->conns)); + else + conns = 0; + } + Encapsulation& operator=(const Encapsulation& other) { if ( this == &other ) return *this; diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 1a91cb1229..2fb787a4e5 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -12,8 +12,9 @@ flow AYIYA_Flow function process_ayiya(pdu: PDU): bool %{ Connection *c = connection()->bro_analyzer()->Conn(); + const Encapsulation* e = c->GetEncapsulation(); - if ( c->GetEncapsulation().Depth() >= BifConst::Tunnel::max_depth ) + if ( e && e->Depth() >= BifConst::Tunnel::max_depth ) { reporter->Weird(c, "tunnel_depth"); return false; @@ -25,12 +26,8 @@ flow AYIYA_Flow return false; } - IP_Hdr* inner_ip; - if ( ${pdu.next_header} == IPPROTO_IPV6 ) - inner_ip = new IP_Hdr((const struct ip6_hdr*) ${pdu.packet}.data(), false, ${pdu.packet}.length()); - else if ( ${pdu.next_header} == IPPROTO_IPV4 ) - inner_ip = new IP_Hdr((const struct ip*) ${pdu.packet}.data(), false); - else + if ( ${pdu.next_header} != IPPROTO_IPV6 && + ${pdu.next_header} != IPPROTO_IPV4 ) { reporter->Weird(c, "ayiya_tunnel_non_ip"); return false; @@ -38,17 +35,15 @@ flow AYIYA_Flow connection()->bro_analyzer()->ProtocolConfirmation(); - struct pcap_pkthdr fake_hdr; - fake_hdr.caplen = fake_hdr.len = ${pdu.packet}.length(); - fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; - - Encapsulation encap(c->GetEncapsulation()); + Encapsulation* outer = new Encapsulation(e); EncapsulatingConn ec(c, BifEnum::Tunnel::AYIYA); - encap.Add(ec); + outer->Add(ec); - sessions->DoNextPacket(network_time(), &fake_hdr, inner_ip, ${pdu.packet}.data(), 0, encap); + sessions->DoNextInnerPacket(network_time(), 0, ${pdu.packet}.length(), + ${pdu.packet}.data(), ${pdu.next_header}, + outer); - delete inner_ip; + delete outer; return true; %} From ae85bd1b957d6493764d8e3c1d6bec2143428bfb Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 4 Jun 2012 16:57:46 -0500 Subject: [PATCH 358/651] Suppress Teredo weirds unless decapsulation was successful once before. --- src/Analyzer.h | 5 +++++ src/Teredo.cc | 34 +++++++++++++++++----------------- src/Teredo.h | 20 +++++++++++++++++--- 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/src/Analyzer.h b/src/Analyzer.h index 7797e215fe..ef596ac696 100644 --- a/src/Analyzer.h +++ b/src/Analyzer.h @@ -215,6 +215,11 @@ public: // analyzer, even if the method is called multiple times. virtual void ProtocolConfirmation(); + // Return whether the analyzer previously called ProtocolConfirmation() + // at least once before. + virtual bool ProtocolConfirmed() const + { return protocol_confirmed; } + // Report that we found a significant protocol violation which might // indicate that the analyzed data is in fact not the expected // protocol. The protocol_violation event is raised once per call to diff --git a/src/Teredo.cc b/src/Teredo.cc index 08eb7d0d2b..c97d4fb8af 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -13,7 +13,8 @@ bool TeredoEncapsulation::DoParse(const u_char* data, int& len, { if ( len < 2 ) { - reporter->Weird(conn, "truncated_Teredo"); + Weird("truncated_Teredo"); + return false; } uint16 tag = ntohs((*((const uint16*)data))); @@ -27,7 +28,7 @@ bool TeredoEncapsulation::DoParse(const u_char* data, int& len, if ( len < 8 ) { - reporter->Weird(conn, "truncated_Teredo_origin_indication"); + Weird("truncated_Teredo_origin_indication"); return false; } @@ -46,7 +47,7 @@ bool TeredoEncapsulation::DoParse(const u_char* data, int& len, if ( len < 4 ) { - reporter->Weird(conn, "truncated_Teredo_authentication"); + Weird("truncated_Teredo_authentication"); return false; } @@ -56,7 +57,7 @@ bool TeredoEncapsulation::DoParse(const u_char* data, int& len, if ( len < tot_len ) { - reporter->Weird(conn, "truncated_Teredo_authentication"); + Weird("truncated_Teredo_authentication"); return false; } @@ -70,13 +71,13 @@ bool TeredoEncapsulation::DoParse(const u_char* data, int& len, // IPv6 if ( len < 40 ) { - reporter->Weird(conn, "truncated_IPv6_in_Teredo"); + Weird("truncated_IPv6_in_Teredo"); return false; } if ( len - 40 != ntohs(((const struct ip6_hdr*)data)->ip6_plen) ) { - reporter->Weird(conn, "Teredo_payload_len_mismatch"); + Weird("Teredo_payload_len_mismatch"); return false; } @@ -92,25 +93,24 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, { Analyzer::DeliverPacket(len, data, orig, seq, ip, caplen); - const Encapsulation* e = Conn()->GetEncapsulation(); - - if ( e && e->Depth() >= BifConst::Tunnel::max_depth ) - { - reporter->Weird(Conn(), "tunnel_depth"); - return; - } - - TeredoEncapsulation te(Conn()); + TeredoEncapsulation te(this); if ( ! te.Parse(data, len) ) { - ProtocolViolation("Invalid Teredo encapsulation", (const char*)data, - len); + ProtocolViolation("Bad Teredo encapsulation", (const char*)data, len); return; } ProtocolConfirmation(); + const Encapsulation* e = Conn()->GetEncapsulation(); + + if ( e && e->Depth() >= BifConst::Tunnel::max_depth ) + { + Weird("tunnel_depth"); + return; + } + // TODO: raise Teredo-specific events Encapsulation* outer = new Encapsulation(e); diff --git a/src/Teredo.h b/src/Teredo.h index 0662099233..d5422cdef4 100644 --- a/src/Teredo.h +++ b/src/Teredo.h @@ -24,6 +24,17 @@ public: //TODO: specific option to turn off Teredo analysis? { return BifConst::Tunnel::max_depth > 0; } + /** + * Emits a weird only if the analyzer has previously been able to + * decapsulate a Teredo packet since otherwise the weirds could happen + * frequently enough to be less than helpful. + */ + void Weird(const char* name) const + { + if ( ProtocolConfirmed() ) + reporter->Weird(Conn(), name); + } + protected: friend class AnalyzerTimer; void ExpireTimer(double t); @@ -31,8 +42,8 @@ protected: class TeredoEncapsulation { public: - TeredoEncapsulation(Connection* c) - : inner_ip(0), origin_indication(0), auth(0), conn(c) + TeredoEncapsulation(const Teredo_Analyzer* ta) + : inner_ip(0), origin_indication(0), auth(0), analyzer(ta) {} /** @@ -54,10 +65,13 @@ public: protected: bool DoParse(const u_char* data, int& len, bool found_orig, bool found_au); + void Weird(const char* name) const + { analyzer->Weird(name); } + const u_char* inner_ip; const u_char* origin_indication; const u_char* auth; - Connection* conn; + const Teredo_Analyzer* analyzer; }; #endif From 8540c4d0cdc240cda422d5db323eff2d722c2b03 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 5 Jun 2012 09:58:44 -0500 Subject: [PATCH 359/651] Add more sanity checks before recursing on encapsulated IP packets. i.e. the IP protocol version and payload length get checked for consistency before attempting further analysis. --- src/Sessions.cc | 31 ++++++++++++++++++++++++++----- src/Sessions.h | 25 +++++++++++++++++++++++-- src/Teredo.cc | 15 ++++++++++----- src/ayiya-analyzer.pac | 39 +++++++++++++++++++++++++++++++++------ 4 files changed, 92 insertions(+), 18 deletions(-) diff --git a/src/Sessions.cc b/src/Sessions.cc index 5c825218d5..d873b269fe 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -554,7 +554,13 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, else outer->Add(it->second); - DoNextInnerPacket(t, hdr, caplen, data, proto, outer); + int result = DoNextInnerPacket(t, hdr, caplen, data, proto, outer); + if ( result < 0 ) + reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), + "truncated_inner_IP"); + else if ( result > 0 ) + reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), + "inner_IP_payload_mismatch"); delete outer; Remove(f); @@ -675,19 +681,33 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } } -void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, - int caplen, const u_char* pkt, int proto, - const Encapsulation* outer_encap) +int NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, + int caplen, const u_char* const pkt, int proto, + const Encapsulation* outer) { IP_Hdr* inner_ip = 0; if ( proto == IPPROTO_IPV6 ) + { + if ( caplen < (int)sizeof(struct ip6_hdr) ) + return -1; inner_ip = new IP_Hdr((const struct ip6_hdr*) pkt, false, caplen); + } else if ( proto == IPPROTO_IPV4 ) + { + if ( caplen < (int)sizeof(struct ip) ) + return -1; inner_ip = new IP_Hdr((const struct ip*) pkt, false); + } else reporter->InternalError("Bad IP protocol version in DoNextInnerPacket"); + if ( (uint32)caplen != inner_ip->TotalLen() ) + { + delete inner_ip; + return (uint32)caplen < inner_ip->TotalLen() ? -1 : 1; + } + struct pcap_pkthdr fake_hdr; fake_hdr.caplen = fake_hdr.len = caplen; if ( hdr ) @@ -695,9 +715,10 @@ void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, else fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; - DoNextPacket(t, &fake_hdr, inner_ip, pkt, 0, outer_encap); + DoNextPacket(t, &fake_hdr, inner_ip, pkt, 0, outer); delete inner_ip; + return 0; } bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, diff --git a/src/Sessions.h b/src/Sessions.h index 45c1e0750a..c374dcb667 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -138,8 +138,29 @@ public: const IP_Hdr* ip_hdr, const u_char* const pkt, int hdr_size, const Encapsulation* encapsulation); - void DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, int caplen, - const u_char* pkt, int proto, const Encapsulation* outer_encap); + /** + * Wrapper that recurses on DoNextPacket for encapsulated IP packets, if + * they appear to be valid based on whether \a pkt is long enough to be an + * IP header and also that the payload length field of that header matches + * matches the actual length of \a pkt given by \a caplen. + * + * @param t Network time. + * @param hdr If the outer pcap header is available, this pointer can be set + * so that the fake pcap header passed to DoNextPacket will use + * the same timeval. The caplen and len fields of the fake pcap + * header are always set to \a caplen. + * @param caplen The length of \a pkt in bytes. + * @param pkt The inner IP packet data. + * @param proto Either IPPROTO_IPV6 or IPPROTO_IPV4 to indicate which IP + * protocol \a pkt corresponds to. + * @param outer_encap The encapsulation information for the inner IP packet. + * @return 0 If the inner IP packet was valid and passed to DoNextPacket, + * else -1 if the \a caplen was greater than the supposed IP + * packet's payload length field or 1 if \a caplen was less than + * the supposed IP packet's payload length. + */ + int DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, int caplen, + const u_char* const pkt, int proto, const Encapsulation* outer); unsigned int ConnectionMemoryUsage(); unsigned int ConnectionMemoryUsageConnVals(); diff --git a/src/Teredo.cc b/src/Teredo.cc index c97d4fb8af..c7131866f9 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -97,12 +97,10 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, if ( ! te.Parse(data, len) ) { - ProtocolViolation("Bad Teredo encapsulation", (const char*)data, len); + ProtocolViolation("Bad Teredo encapsulation", (const char*) data, len); return; } - ProtocolConfirmation(); - const Encapsulation* e = Conn()->GetEncapsulation(); if ( e && e->Depth() >= BifConst::Tunnel::max_depth ) @@ -117,7 +115,14 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, EncapsulatingConn ec(Conn(), BifEnum::Tunnel::TEREDO); outer->Add(ec); - sessions->DoNextInnerPacket(network_time, 0, len, te.InnerIP(), - IPPROTO_IPV6, outer); + int result = sessions->DoNextInnerPacket(network_time, 0, len, te.InnerIP(), + IPPROTO_IPV6, outer); + if ( result == 0 ) + ProtocolConfirmation(); + else if ( result < 0 ) + ProtocolViolation("Truncated Teredo", (const char*) data, len); + else + ProtocolViolation("Teredo payload length", (const char*) data, len); + delete outer; } diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 2fb787a4e5..361d5f8c66 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -32,19 +32,46 @@ flow AYIYA_Flow reporter->Weird(c, "ayiya_tunnel_non_ip"); return false; } - - connection()->bro_analyzer()->ProtocolConfirmation(); + + if ( ${pdu.packet}.length() < (int)sizeof(struct ip) ) + { + connection()->bro_analyzer()->ProtocolViolation( + "Truncated AYIYA", (const char*) ${pdu.packet}.data(), + ${pdu.packet}.length()); + return false; + } + + const struct ip* ip = (const struct ip*) ${pdu.packet}.data(); + + if ( ( ${pdu.next_header} == IPPROTO_IPV6 && ip->ip_v != 6 ) || + ( ${pdu.next_header} == IPPROTO_IPV4 && ip->ip_v != 4) ) + { + connection()->bro_analyzer()->ProtocolViolation( + "AYIYA next header mismatch", (const char*)${pdu.packet}.data(), + ${pdu.packet}.length()); + return false; + } Encapsulation* outer = new Encapsulation(e); EncapsulatingConn ec(c, BifEnum::Tunnel::AYIYA); outer->Add(ec); - sessions->DoNextInnerPacket(network_time(), 0, ${pdu.packet}.length(), - ${pdu.packet}.data(), ${pdu.next_header}, - outer); + int result = sessions->DoNextInnerPacket(network_time(), 0, + ${pdu.packet}.length(), ${pdu.packet}.data(), + ${pdu.next_header}, outer); + if ( result == 0 ) + connection()->bro_analyzer()->ProtocolConfirmation(); + else if ( result < 0 ) + connection()->bro_analyzer()->ProtocolViolation( + "Truncated AYIYA", (const char*) ${pdu.packet}.data(), + ${pdu.packet}.length()); + else + connection()->bro_analyzer()->ProtocolViolation( + "AYIYA payload length", (const char*) ${pdu.packet}.data(), + ${pdu.packet}.length()); delete outer; - return true; + return (result == 0) ? true : false; %} }; From 976e8db1559f30d4733fa346515bc03247a20825 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 5 Jun 2012 10:17:41 -0500 Subject: [PATCH 360/651] Add independent options to toggle the different decapsulation methods --- scripts/base/init-bare.bro | 11 ++++++++++- src/AYIYA.h | 4 ++-- src/Sessions.cc | 7 +++++++ src/Teredo.h | 4 ++-- src/const.bif | 3 +++ 5 files changed, 24 insertions(+), 5 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 920f4a47c2..70905824f3 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2650,8 +2650,17 @@ const ignore_keep_alive_rexmit = F &redef; module Tunnel; export { ## The maximum depth of a tunnel to decapsulate until giving up. - ## Setting this to zero will disable tunnel decapsulation. + ## Setting this to zero will disable all types of tunnel decapsulation. const max_depth: count = 2 &redef; + + ## Toggle whether to do IPv{4,6}-in-IPv{4,6} decapsulation. + const enable_ip = T &redef; + + ## Toggle whether to do IPv{4,6}-in-AYIYA decapsulation. + const enable_ayiya = T &redef; + + ## Toggle whether to do IPv6-in-Teredo decapsulation. + const enable_teredo = T &redef; } # end export module GLOBAL; diff --git a/src/AYIYA.h b/src/AYIYA.h index 2122cafee6..79b41553c7 100644 --- a/src/AYIYA.h +++ b/src/AYIYA.h @@ -16,8 +16,8 @@ public: { return new AYIYA_Analyzer(conn); } static bool Available() - // TODO: specific option to turn off AYIYA analysis - { return BifConst::Tunnel::max_depth > 0; } + { return BifConst::Tunnel::enable_ayiya && + BifConst::Tunnel::max_depth > 0; } protected: friend class AnalyzerTimer; diff --git a/src/Sessions.cc b/src/Sessions.cc index d873b269fe..9738f380d7 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -523,6 +523,13 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, case IPPROTO_IPV4: case IPPROTO_IPV6: { + if ( ! BifConst::Tunnel::enable_ip ) + { + reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "IP_tunnel"); + Remove(f); + return; + } + if ( encapsulation && encapsulation->Depth() >= BifConst::Tunnel::max_depth ) { diff --git a/src/Teredo.h b/src/Teredo.h index d5422cdef4..554e97f29a 100644 --- a/src/Teredo.h +++ b/src/Teredo.h @@ -21,8 +21,8 @@ public: { return new Teredo_Analyzer(conn); } static bool Available() - //TODO: specific option to turn off Teredo analysis? - { return BifConst::Tunnel::max_depth > 0; } + { return BifConst::Tunnel::enable_teredo && + BifConst::Tunnel::max_depth > 0; } /** * Emits a weird only if the analyzer has previously been able to diff --git a/src/const.bif b/src/const.bif index 553e8b6d58..3e8fe4b53b 100644 --- a/src/const.bif +++ b/src/const.bif @@ -12,5 +12,8 @@ const NFS3::return_data_max: count; const NFS3::return_data_first_only: bool; const Tunnel::max_depth: count; +const Tunnel::enable_ip: bool; +const Tunnel::enable_ayiya: bool; +const Tunnel::enable_teredo: bool; const Threading::heartbeat_interval: interval; From 89cb103a2c07aede9969ee586225c4d7b0411a29 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 5 Jun 2012 11:25:10 -0400 Subject: [PATCH 361/651] Fixed a bug with the MIME analyzer not removing whitespace on wrapped headers. - No test due to lack of tracefile with wrapped header. --- src/MIME.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/MIME.cc b/src/MIME.cc index 4a7c0268b0..11f764266d 100644 --- a/src/MIME.cc +++ b/src/MIME.cc @@ -426,7 +426,8 @@ void MIME_Entity::ContHeader(int len, const char* data) return; } - current_header_line->append(len, data); + int ws = MIME_count_leading_lws(len, data); + current_header_line->append(len - ws, data + ws); } void MIME_Entity::FinishHeader() From b52436a53bfc849afbab7b1ae9b1015736df230f Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 5 Jun 2012 12:23:16 -0500 Subject: [PATCH 362/651] Refactor some of the NetSessions routines that recurse on IP packets. Separating the IP packet validation/parsing from the recursive call to DoNextPacket to make it easier for analyzers to get access to the inner IP_Hdr. --- src/Sessions.cc | 102 ++++++++++++++++++++++++----------------- src/Sessions.h | 33 ++++++++----- src/Teredo.cc | 24 ++++++---- src/ayiya-analyzer.pac | 22 +++++---- 4 files changed, 110 insertions(+), 71 deletions(-) diff --git a/src/Sessions.cc b/src/Sessions.cc index 9738f380d7..4e81ba1661 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -538,6 +538,23 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } + // Check for a valid inner packet first. + IP_Hdr* inner = 0; + int result = ParseIPPacket(caplen, data, proto, inner); + + if ( result < 0 ) + reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), + "truncated_inner_IP"); + else if ( result > 0 ) + reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), + "inner_IP_payload_mismatch"); + + if ( result != 0 ) + { + Remove(f); + return; + } + Encapsulation* outer = new Encapsulation(encapsulation); // Look up to see if we've already seen this IP tunnel, identified @@ -561,14 +578,9 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, else outer->Add(it->second); - int result = DoNextInnerPacket(t, hdr, caplen, data, proto, outer); - if ( result < 0 ) - reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), - "truncated_inner_IP"); - else if ( result > 0 ) - reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), - "inner_IP_payload_mismatch"); + DoNextInnerPacket(t, hdr, inner, outer); + delete inner; delete outer; Remove(f); return; @@ -576,12 +588,11 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, case IPPROTO_NONE: { - if ( encapsulation && - encapsulation->LastType() == BifEnum::Tunnel::TEREDO ) - { - // TODO: raise bubble packet event - } - else + // If the packet is encapsulated in Teredo, then it was a bubble and + // the Teredo analyzer may have raised an event for that, else we're + // not sure the reason for the No Next header in the packet. + if ( ! ( encapsulation && + encapsulation->LastType() == BifEnum::Tunnel::TEREDO ) ) Weird("ipv6_no_next", hdr, pkt); Remove(f); @@ -688,43 +699,50 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } } -int NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, - int caplen, const u_char* const pkt, int proto, - const Encapsulation* outer) +void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, + const IP_Hdr* inner, const Encapsulation* outer) { - IP_Hdr* inner_ip = 0; - - if ( proto == IPPROTO_IPV6 ) - { - if ( caplen < (int)sizeof(struct ip6_hdr) ) - return -1; - inner_ip = new IP_Hdr((const struct ip6_hdr*) pkt, false, caplen); - } - else if ( proto == IPPROTO_IPV4 ) - { - if ( caplen < (int)sizeof(struct ip) ) - return -1; - inner_ip = new IP_Hdr((const struct ip*) pkt, false); - } - else - reporter->InternalError("Bad IP protocol version in DoNextInnerPacket"); - - if ( (uint32)caplen != inner_ip->TotalLen() ) - { - delete inner_ip; - return (uint32)caplen < inner_ip->TotalLen() ? -1 : 1; - } - struct pcap_pkthdr fake_hdr; - fake_hdr.caplen = fake_hdr.len = caplen; + fake_hdr.caplen = fake_hdr.len = inner->TotalLen(); if ( hdr ) fake_hdr.ts = hdr->ts; else fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; - DoNextPacket(t, &fake_hdr, inner_ip, pkt, 0, outer); + const u_char* pkt = 0; + if ( inner->IP4_Hdr() ) + pkt = (const u_char*) inner->IP4_Hdr(); + else + pkt = (const u_char*) inner->IP6_Hdr(); + + DoNextPacket(t, &fake_hdr, inner, pkt, 0, outer); + } + +int NetSessions::ParseIPPacket(int caplen, const u_char* const pkt, int proto, + IP_Hdr*& inner) + { + if ( proto == IPPROTO_IPV6 ) + { + if ( caplen < (int)sizeof(struct ip6_hdr) ) + return -1; + inner = new IP_Hdr((const struct ip6_hdr*) pkt, false, caplen); + } + else if ( proto == IPPROTO_IPV4 ) + { + if ( caplen < (int)sizeof(struct ip) ) + return -1; + inner = new IP_Hdr((const struct ip*) pkt, false); + } + else + reporter->InternalError("Bad IP protocol version in DoNextInnerPacket"); + + if ( (uint32)caplen != inner->TotalLen() ) + { + delete inner; + inner = 0; + return (uint32)caplen < inner->TotalLen() ? -1 : 1; + } - delete inner_ip; return 0; } diff --git a/src/Sessions.h b/src/Sessions.h index c374dcb667..9273a02787 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -139,28 +139,37 @@ public: int hdr_size, const Encapsulation* encapsulation); /** - * Wrapper that recurses on DoNextPacket for encapsulated IP packets, if - * they appear to be valid based on whether \a pkt is long enough to be an - * IP header and also that the payload length field of that header matches - * matches the actual length of \a pkt given by \a caplen. + * Wrapper that recurses on DoNextPacket for encapsulated IP packets. * * @param t Network time. * @param hdr If the outer pcap header is available, this pointer can be set * so that the fake pcap header passed to DoNextPacket will use * the same timeval. The caplen and len fields of the fake pcap - * header are always set to \a caplen. + * header are always set to the TotalLength() of \a inner. + * @param outer The encapsulation information for the inner IP packet. + */ + void DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, + const IP_Hdr* inner, const Encapsulation* outer); + + /** + * Returns a wrapper IP_Hdr object if \a pkt appears to be a valid IPv4 + * or IPv6 header based on whether it's long enough to contain such a header + * and also that the payload length field of that header matches the actual + * length of \a pkt given by \a caplen. + * * @param caplen The length of \a pkt in bytes. * @param pkt The inner IP packet data. * @param proto Either IPPROTO_IPV6 or IPPROTO_IPV4 to indicate which IP * protocol \a pkt corresponds to. - * @param outer_encap The encapsulation information for the inner IP packet. - * @return 0 If the inner IP packet was valid and passed to DoNextPacket, - * else -1 if the \a caplen was greater than the supposed IP - * packet's payload length field or 1 if \a caplen was less than - * the supposed IP packet's payload length. + * @param inner The inner IP packet wrapper pointer to be allocated/assigned + * if \a pkt looks like a valid IP packet. + * @return 0 If the inner IP packet appeared valid in which case the caller + * is responsible for deallocating \a inner, else -1 if \a caplen + * is greater than the supposed IP packet's payload length field or + * 1 if \a caplen is less than the supposed packet's payload length. */ - int DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, int caplen, - const u_char* const pkt, int proto, const Encapsulation* outer); + int ParseIPPacket(int caplen, const u_char* const pkt, int proto, + IP_Hdr*& inner); unsigned int ConnectionMemoryUsage(); unsigned int ConnectionMemoryUsageConnVals(); diff --git a/src/Teredo.cc b/src/Teredo.cc index c7131866f9..92cdc7f64f 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -109,20 +109,26 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, return; } - // TODO: raise Teredo-specific events + IP_Hdr* inner = 0; + int rslt = sessions->ParseIPPacket(len, te.InnerIP(), IPPROTO_IPV6, inner); + + if ( rslt == 0 ) + ProtocolConfirmation(); + else if ( rslt < 0 ) + ProtocolViolation("Truncated Teredo", (const char*) data, len); + else + ProtocolViolation("Teredo payload length", (const char*) data, len); + + if ( rslt != 0 ) return; + + // TODO: raise Teredo-specific events for bubbles, origin/authentication Encapsulation* outer = new Encapsulation(e); EncapsulatingConn ec(Conn(), BifEnum::Tunnel::TEREDO); outer->Add(ec); - int result = sessions->DoNextInnerPacket(network_time, 0, len, te.InnerIP(), - IPPROTO_IPV6, outer); - if ( result == 0 ) - ProtocolConfirmation(); - else if ( result < 0 ) - ProtocolViolation("Truncated Teredo", (const char*) data, len); - else - ProtocolViolation("Teredo payload length", (const char*) data, len); + sessions->DoNextInnerPacket(network_time, 0, inner, outer); + delete inner; delete outer; } diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 361d5f8c66..a0b9de5926 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -51,14 +51,11 @@ flow AYIYA_Flow ${pdu.packet}.length()); return false; } - - Encapsulation* outer = new Encapsulation(e); - EncapsulatingConn ec(c, BifEnum::Tunnel::AYIYA); - outer->Add(ec); - - int result = sessions->DoNextInnerPacket(network_time(), 0, - ${pdu.packet}.length(), ${pdu.packet}.data(), - ${pdu.next_header}, outer); + + IP_Hdr* inner = 0; + int result = sessions->ParseIPPacket(${pdu.packet}.length(), + ${pdu.packet}.data(), ${pdu.next_header}, inner); + if ( result == 0 ) connection()->bro_analyzer()->ProtocolConfirmation(); else if ( result < 0 ) @@ -69,7 +66,16 @@ flow AYIYA_Flow connection()->bro_analyzer()->ProtocolViolation( "AYIYA payload length", (const char*) ${pdu.packet}.data(), ${pdu.packet}.length()); + + if ( result != 0 ) return false; + + Encapsulation* outer = new Encapsulation(e); + EncapsulatingConn ec(c, BifEnum::Tunnel::AYIYA); + outer->Add(ec); + sessions->DoNextInnerPacket(network_time(), 0, inner, outer); + + delete inner; delete outer; return (result == 0) ? true : false; %} From 854c6252753a2a5618c3cc3a86ddb7c3d06da68a Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 5 Jun 2012 15:07:56 -0500 Subject: [PATCH 363/651] Add Teredo-specific events. These are called "teredo_packet", "teredo_authentication", "teredo_origin_indication", and "teredo_bubble" and may be raised on a per-packet basis depending on Teredo encapsulation method. --- scripts/base/init-bare.bro | 36 ++++++++ src/Teredo.cc | 68 +++++++++++++- src/Teredo.h | 2 + src/event.bif | 55 ++++++++++++ .../Baseline/core.tunnels.teredo/conn.log | 28 ++++++ .../Baseline/core.tunnels.teredo/http.log | 11 +++ .../btest/Baseline/core.tunnels.teredo/output | 83 ++++++++++++++++++ .../Baseline/core.tunnels.teredo/tunnel.log | 13 +++ testing/btest/Traces/tunnels/Teredo.pcap | Bin 0 -> 26297 bytes testing/btest/core/tunnels/teredo.bro | 35 ++++++++ 10 files changed, 330 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/core.tunnels.teredo/conn.log create mode 100644 testing/btest/Baseline/core.tunnels.teredo/http.log create mode 100644 testing/btest/Baseline/core.tunnels.teredo/output create mode 100644 testing/btest/Baseline/core.tunnels.teredo/tunnel.log create mode 100644 testing/btest/Traces/tunnels/Teredo.pcap create mode 100644 testing/btest/core/tunnels/teredo.bro diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 70905824f3..5ca9cdf330 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1347,6 +1347,42 @@ type pkt_hdr: record { icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. }; +## A Teredo origin indication header. See :rfc:`4380` for more information +## about the Teredo protocol. +## +## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication +## teredo_hdr +type teredo_auth: record { + id: string; ##< Teredo client identifier. + value: string; ##< HMAC-SHA1 over shared secret key between client and + ##< server, nonce, confirmation byte, origin indication + ##< (if present), and the IPv6 packet. + nonce: count; ##< Nonce chosen by Teredo client to be repeated by + ##< Teredo server. + confirm: count; ##< Confirmation byte to be set to 0 by Teredo client + ##< and non-zero by server if client needs new key. +}; + +## A Teredo authentication header. See :rfc:`4380` for more information +## about the Teredo protocol. +## +## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication +## teredo_hdr +type teredo_origin: record { + p: port; ##< Unobfuscated UDP port of Teredo client. + a: addr; ##< Unobfuscated IPv4 address of Teredo client. +}; + +## A Teredo packet header. See :rfc:`4380` for more information about the +## Teredo protocol. +## +## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication +type teredo_hdr: record { + auth: teredo_auth &optional; ##< Teredo authentication header. + origin: teredo_origin &optional; ##< Teredo origin indication header. + hdr: pkt_hdr; ##< IPv6 and transport protocol headers. +}; + ## Definition of "secondary filters". A secondary filter is a BPF filter given as ## index in this table. For each such filter, the corresponding event is raised for ## all matching packets. diff --git a/src/Teredo.cc b/src/Teredo.cc index 92cdc7f64f..945e54ee18 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -88,6 +88,51 @@ bool TeredoEncapsulation::DoParse(const u_char* data, int& len, return false; } +RecordVal* TeredoEncapsulation::BuildVal(const IP_Hdr* inner) const + { + static RecordType* teredo_hdr_type = 0; + static RecordType* teredo_auth_type = 0; + static RecordType* teredo_origin_type = 0; + + if ( ! teredo_hdr_type ) + { + teredo_hdr_type = internal_type("teredo_hdr")->AsRecordType(); + teredo_auth_type = internal_type("teredo_auth")->AsRecordType(); + teredo_origin_type = internal_type("teredo_origin")->AsRecordType(); + } + + RecordVal* teredo_hdr = new RecordVal(teredo_hdr_type); + + if ( auth ) + { + RecordVal* teredo_auth = new RecordVal(teredo_auth_type); + uint8 id_len = *((uint8*)(auth + 2)); + uint8 au_len = *((uint8*)(auth + 3)); + uint64 nonce = ntohll(*((uint64*)(auth + 4 + id_len + au_len))); + uint8 conf = *((uint8*)(auth + 4 + id_len + au_len + 8)); + teredo_auth->Assign(0, new StringVal( + new BroString(auth + 4, id_len, 1))); + teredo_auth->Assign(1, new StringVal( + new BroString(auth + 4 + id_len, au_len, 1))); + teredo_auth->Assign(2, new Val(nonce, TYPE_COUNT)); + teredo_auth->Assign(3, new Val(conf, TYPE_COUNT)); + teredo_hdr->Assign(0, teredo_auth); + } + + if ( origin_indication ) + { + RecordVal* teredo_origin = new RecordVal(teredo_origin_type); + uint16 port = ntohs(*((uint16*)(origin_indication + 2))) ^ 0xFFFF; + uint32 addr = ntohl(*((uint32*)(origin_indication + 4))) ^ 0xFFFFFFFF; + teredo_origin->Assign(0, new PortVal(port, TRANSPORT_UDP)); + teredo_origin->Assign(1, new AddrVal(htonl(addr))); + teredo_hdr->Assign(1, teredo_origin); + } + + teredo_hdr->Assign(2, inner->BuildPktHdrVal()); + return teredo_hdr; + } + void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, int seq, const IP_Hdr* ip, int caplen) { @@ -121,7 +166,28 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, if ( rslt != 0 ) return; - // TODO: raise Teredo-specific events for bubbles, origin/authentication + Val* teredo_hdr = 0; + + if ( teredo_packet ) + { + teredo_hdr = te.BuildVal(inner); + Conn()->Event(teredo_packet, 0, teredo_hdr); + } + if ( te.Authentication() && teredo_authentication ) + { + teredo_hdr = teredo_hdr ? teredo_hdr->Ref() : te.BuildVal(inner); + Conn()->Event(teredo_authentication, 0, teredo_hdr); + } + if ( te.OriginIndication() && teredo_origin_indication ) + { + teredo_hdr = teredo_hdr ? teredo_hdr->Ref() : te.BuildVal(inner); + Conn()->Event(teredo_origin_indication, 0, teredo_hdr); + } + if ( inner->NextProto() == IPPROTO_NONE && teredo_bubble ) + { + teredo_hdr = teredo_hdr ? teredo_hdr->Ref() : te.BuildVal(inner); + Conn()->Event(teredo_bubble, 0, teredo_hdr); + } Encapsulation* outer = new Encapsulation(e); EncapsulatingConn ec(Conn(), BifEnum::Tunnel::TEREDO); diff --git a/src/Teredo.h b/src/Teredo.h index 554e97f29a..84ff8ddf38 100644 --- a/src/Teredo.h +++ b/src/Teredo.h @@ -62,6 +62,8 @@ public: const u_char* Authentication() const { return auth; } + RecordVal* BuildVal(const IP_Hdr* inner) const; + protected: bool DoParse(const u_char* data, int& len, bool found_orig, bool found_au); diff --git a/src/event.bif b/src/event.bif index c4ed03e013..8d39af0ba2 100644 --- a/src/event.bif +++ b/src/event.bif @@ -511,6 +511,61 @@ event esp_packet%(p: pkt_hdr%); ## .. bro:see:: new_packet tcp_packet ipv6_ext_headers event mobile_ipv6_message%(p: pkt_hdr%); +## Genereated for any IPv6 packet encapsulated in a Teredo tunnel. +## See :rfc:`4380` for more information about the Teredo protocol. +## +## outer: The Teredo tunnel connection. +## +## inner: The Teredo-encapsulated IPv6 packet header and transport header. +## +## .. bro:see:: teredo_authentication teredo_origin_indication teredo_bubble +## +## .. note:: Since this event may be raised on a per-packet basis, handling +## it may become particular expensive for real-time analysis. +event teredo_packet%(outer: connection, inner: teredo_hdr%); + +## Genereated for IPv6 packets encapsulated in a Teredo tunnel that +## use the Teredo authentication encapsulation method. +## See :rfc:`4380` for more information about the Teredo protocol. +## +## outer: The Teredo tunnel connection. +## +## inner: The Teredo-encapsulated IPv6 packet header and transport header. +## +## .. bro:see:: teredo_packet teredo_origin_indication teredo_bubble +## +## .. note:: Since this event may be raised on a per-packet basis, handling +## it may become particular expensive for real-time analysis. +event teredo_authentication%(outer: connection, inner: teredo_hdr%); + +## Genereated for IPv6 packets encapsulated in a Teredo tunnel that +## use the Teredo origin indication encapsulation method. +## See :rfc:`4380` for more information about the Teredo protocol. +## +## outer: The Teredo tunnel connection. +## +## inner: The Teredo-encapsulated IPv6 packet header and transport header. +## +## .. bro:see:: teredo_packet teredo_authentication teredo_bubble +## +## .. note:: Since this event may be raised on a per-packet basis, handling +## it may become particular expensive for real-time analysis. +event teredo_origin_indication%(outer: connection, inner: teredo_hdr%); + +## Genereated for Teredo bubble packets. That is, IPv6 packets encapsulated +## in a Teredo tunnel that have a Next Header value of :bro:id:`IPPROTO_NONE`. +## See :rfc:`4380` for more information about the Teredo protocol. +## +## outer: The Teredo tunnel connection. +## +## inner: The Teredo-encapsulated IPv6 packet header and transport header. +## +## .. bro:see:: teredo_packet teredo_authentication teredo_origin_indication +## +## .. note:: Since this event may be raised on a per-packet basis, handling +## it may become particular expensive for real-time analysis. +event teredo_bubble%(outer: connection, inner: teredo_hdr%); + ## Generated for every packet that has non-empty transport-layer payload. This is a ## very low-level and expensive event that should be avoided when at all possible. ## It's usually infeasible to handle when processing even medium volumes of diff --git a/testing/btest/Baseline/core.tunnels.teredo/conn.log b/testing/btest/Baseline/core.tunnels.teredo/conn.log new file mode 100644 index 0000000000..151230886b --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo/conn.log @@ -0,0 +1,28 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1210953047.736921 arKYeMETxOg 192.168.2.16 1576 75.126.130.163 80 tcp - 0.000357 0 0 SHR - 0 fA 1 40 1 40 (empty) +1210953050.867067 k6kgXLOoSKl 192.168.2.16 1577 75.126.203.78 80 tcp - 0.000387 0 0 SHR - 0 fA 1 40 1 40 (empty) +1210953057.833364 5OKnoww6xl4 192.168.2.16 1577 75.126.203.78 80 tcp - 0.079208 0 0 SH - 0 Fa 1 40 1 40 (empty) +1210953058.007081 VW0XPVINV8a 192.168.2.16 1576 75.126.130.163 80 tcp - - - - RSTOS0 - 0 R 1 40 0 0 (empty) +1210953057.834454 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 tcp http 0.407908 790 171 RSTO - 0 ShADadR 6 1038 4 335 (empty) +1210953058.350065 fRFu0wcOle6 192.168.2.16 1920 192.168.2.1 53 udp dns 0.223055 66 438 SF - 0 Dd 2 122 2 494 (empty) +1210953058.577231 qSsw6ESzHV4 192.168.2.16 137 192.168.2.255 137 udp dns 1.499261 150 0 S0 - 0 D 3 234 0 0 (empty) +1210953074.264819 Tw8jXtpTGu6 192.168.2.16 1920 192.168.2.1 53 udp dns 0.297723 123 598 SF - 0 Dd 3 207 3 682 (empty) +1210953061.312379 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 tcp http 12.810848 1675 10467 S1 - 0 ShADad 10 2279 12 11191 GSxOnSLghOa +1210953076.058333 EAr0uf4mhq 192.168.2.16 1578 75.126.203.78 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) +1210953074.055744 h5DsfNtYzi1 192.168.2.16 1577 75.126.203.78 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) +1210953074.057124 P654jzLoe3a 192.168.2.16 1576 75.126.130.163 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) +1210953074.570439 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 tcp http 0.466677 469 3916 SF - 0 ShADadFf 7 757 6 4164 (empty) +1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 udp teredo 8.928880 129 48 SF - 0 Dd 2 185 1 76 (empty) +1210953060.829233 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 udp teredo 13.293994 2359 11243 SF - 0 Dd 12 2695 13 11607 (empty) +1210953058.933954 iE6yhOq3SF 0.0.0.0 68 255.255.255.255 67 udp - - - - S0 - 0 D 1 328 0 0 (empty) +1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 udp teredo - - - SHR - 0 d 0 0 1 137 (empty) +1210953046.591933 UWkUyAuUGXf 192.168.2.16 138 192.168.2.255 138 udp - 28.448321 416 0 S0 - 0 D 2 472 0 0 (empty) +1210953052.324629 FrJExwHcSal fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 TEfuqmmG4bh +1210953060.829303 qCaWGmzFtM5 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.463615 4 4 OTH - 0 - 1 52 1 52 GSxOnSLghOa,nQcgTWjvg4c +1210953052.202579 j4u32Pc5bif fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 nQcgTWjvg4c diff --git a/testing/btest/Baseline/core.tunnels.teredo/http.log b/testing/btest/Baseline/core.tunnels.teredo/http.log new file mode 100644 index 0000000000..b3cf832083 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo/http.log @@ -0,0 +1,11 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file +1210953057.917183 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 1 POST download913.avast.com /cgi-bin/iavs4stats.cgi - Syncer/4.80 (av_pro-1169;f) 589 0 204 - - - (empty) - - - text/plain - - +1210953061.585996 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - +1210953073.381474 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - +1210953074.674817 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 1 GET www.wireshark.org / http://ipv6.google.com/search?hl=en&q=Wireshark+%21&btnG=Google+Search Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 11845 200 OK - - - (empty) - - - text/xml - - diff --git a/testing/btest/Baseline/core.tunnels.teredo/output b/testing/btest/Baseline/core.tunnels.teredo/output new file mode 100644 index 0000000000..02d5a41e74 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo/output @@ -0,0 +1,83 @@ +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] +auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=12, nxt=58, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=12, nxt=58, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=24, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=24, nxt=6, hlim=245, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=817, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=514, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=898, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=812, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=717, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] diff --git a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log new file mode 100644 index 0000000000..5549d66a29 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log @@ -0,0 +1,13 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user +#types time string addr port addr port enum enum string +1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO - +1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO - +1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO - +1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO - +1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO - +1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO - diff --git a/testing/btest/Traces/tunnels/Teredo.pcap b/testing/btest/Traces/tunnels/Teredo.pcap new file mode 100644 index 0000000000000000000000000000000000000000..2eff14469d6edb8f411e072b20f8dcd744b9faf3 GIT binary patch literal 26297 zcmeFa2UJwc(m%S10+Nv=AVZKOIp>^n&UqMM$YF?rWDo=dR20c70wRcl2qHNuNrHkP zC>cZ~gMh%>8;|FB@La!p-~GS0)_dz4=vlja_pYk0uKw+=qPrPtd6JI~U;;lrM@ImF z24;*)xm2ZaaDff*8cIjUAAq7yE(?YOC;`B1Tq6L4lU!J6EJ7y+(-DvW{`9{UBSDX~ zgUY~MEdaojl$8~gm6a8gbd=PU6%@3Uw3OtP<&>3_bd}{4z#p)Nwz8skpM*7-n`dpfFx`sNs04Ek22I(n)2+imLU;?TjL;5rz3=)LW_r?Qs=I{H~ zj#a?M@~xDab!q=03;XK4UYB3d z##LPshH!M3?^G^H{zT(wF&%V`_D6X*u*Ol-$wm%|LB5Fz+yw7I>6h+_7b^z7~=CQ#MypHr_fFlA-G4-RPPfCdk? z5o+@v6#zR>Ov7jRfA!q2I&VMLdGtT(oCMVQ2yjd#y%ryk2FXI{7oGFQbsST<_$L(( zKvz`qHY-%w3tbjSc>w_Em{^r)Xqf1bOfNqFWCzE?`e(}q0LYr$G)`DkH)snfy2PM0 zA-~IXVUqw52RT6Lmk!|kgNn*eR5;f9vDHAi*lbBDUcc=-mgylRj}}Pj&yn`4jyjK9 z7kQ#}Y#=pt$S#`{zsu8bNB}_HaNHFqsjS7lgs9xA{zocQip1=z0HCX5U z&f+j3Q85BMBVU9Mmz)#$h&aq3$ODe>;SuB(<%O}?`P+E;cyjUa35!WMvJ>E`disJ@ z96SR&+&t|Z#P|fb?fmV0QQUA(_hZr?2sjGq=^+l&Kp?!hR3BgDyR-JV*r5Ki9}XP*4O9O7Y|Af(CVb3#7_>oRj~9;rE>39~{5ueh`74I(b%ouq&*rAXfPJd(}k`u~&KP~C5=#jHli;t`D^N%?2 zgVuLcLLmZCJYH^gNDl}PAhi|K{G_&%@W>X|1>wOI$9Ft9F%dZf&I$ie+YuM~UtRbs zmF{DxzVvrg$(9CDWlNV>0a4HWUHQ+SVEm3s&oPzIf20D+f|l}~inj?CpaJ$ClujZX zAQd+RfD;j}V2BEs&`yN|LIC??Zh1KPmIi;J4<^#fUl`lT)6>ZffdQf{0MG(xkg~c< zumM-F5|mC_2bxvtIa%O{#DNGR3y}wePKBeXalVs>M}&i<(ScLIsedH>YatdS3PT@& zdwdJct&ycs5)uLwkrN<>p{d#e$I}W(TEMwc4kV5Spz*>1OfXaWZMU2@7QiYf{^N~)ljr3-#*fu5F*k|N}D$;kl# z?7M2@WeEXeP=8SRgV))`Y=dWzvxBC}F9}ZPNK`;K;`?1d5l}!AY{)@F!v<_=kT0O} zUp~jr|5Ez@*#dZ^0bB?TL+TV7aNII}Oi=Tr_!uTgqzE<|1~w5b!|}}XUo{X8_NdlA z=I?rR__#-%|N9)=vaW(Orju06AELwW0J22i>^$5Qh^^r{w)oTc|d^ z`2WZu9B2y}^sUN-q(gw*qCkh!9DEc?zW{|MX&dBQ-pGPvhT3%OTZ#^406^Hqhen6* zY@w$B3E(jN_yX`sI*gJ#Q$TvMoe(E!boAe8Zv0qt3V+uerLFBTVF}N?snH!AG#lhjXg$HE45LoT{1gA4#$bkMQA138hHtx!8ZFX8;f z`D8mSnJyo1##i`fgY z!&DG(S5F>j4$UjTFU-#iQ%3qA96bYh1h{$ag$VHE;BbT&=q(>lbe&P|Zk%>rUT#P@ z=!1BA@B~677XnX~y1PkuOYw4xadPl*9AAn4AjG8!dT)MqPT(9K6vfY%6X9`O;ZIIn zN*-`e2hhI}hdBi!y*M2Zj&9(T|Igz##mDJ~a^w;{CL_Z6 zouA0xC-?#SPF9Lx<3-ISf3L_rNB+!5~f2p`aA zL;3h2Bwz+0X+=FbIXP80_@x-+?%~KUVq)On?k#GpA)<)Vb5&F@3-B@&4pLI(gNu3z z!+m^^K8891s=lV${Mt5NI&c#%Ls5P=xOR}gn~|!MDbiTi*WAoURoKwOMbF0x4F77n z7#Zj&__%2*I}54t$~&mIAUy+v-Ccd1OniJC5n9GvK_d2ne1?3Qx(Zr~Iwrg(t^vHJ z%HAeAURrLh@(QZH4k&(OxUaJvuM0n~v6q@%fN+qL1WX&0osU<_Ma#@Y+toAJ*G*2) z$WGr+G&o4vKvT)XR}^Vt94x5f0IJo|)!W!z$U)hFSD7~$1s8QzbkkPXcXPA1F+rFi z#7qK(?L^HyMYKG8l!Ltk%;CXKrp7`DE`4Qt1s@-EF$YH@AtzrEF-;qP0eJ%lKZKf- zjgF3^if`Aq4)gLs`X7&tfAC46fhhrle;+MUMuy5<;28W-@9$&me{!lt|L#;f1^E6& z>4!(nFCqX&;0W;OuRzmk5VMb-nz;vLR+ApqH^`qCu?|T>8w)Do>lJn%?Ae~3^QiSK z6nUl+4!jBH#gVY?I66pX_VO|a5TW_xJ#1VDyen<+O3ba;%c}ZY;%$; z*MgKN%jPN)uO;s6aN-maA2*wJBB>Liffagc#zTD4g2+ z%a#Nh*9PXJcI)PE8DwN<+d?EIdpxsw?QUHqBlS_qYxWXLMiQrVua(PO!81lQRdbMz z-=Wrd*f(;AJy{hwi*=eK_jE(FonwxY)%C}hAN;uf~~S_M1brOU-KDc)YZDW%`Wl86>-L%*DUO%h8F!p zL3;T%r* zt*{9x>h%TVjR$Jf=OEY>Gw@|v0-1_4)j4+px{>Wk$17=5jCbCBrTW-p`rN~PM~*5s z`N`A-`n+_#k}13I>!yX)M`4rsaW9cwjS;qit}VHtx3Ol7P3Vtc2 zGX-=}0V?YB^R25^eTk<@Z+|48eeR#q4<$p1G`Du~C z)(C;gA&;wgF%RkGOSrw=8g&b^)-0VLC&2c+uTT#=&Z7kUwL6Tm$O@72u`j{X3l_a+ zwCcx5b!o-x#dua~+SGZSA)F?+{Vif%IY!L({|+%-1%BW(M>B}D^aG=d3p*BV36+?h zJ-g#{rcT^m`Xq6&jr7KtB{Dk>3-o;wBI7mtYYo|%VG{=}{8gR#lYYamXvQ)8=;VfJ zM*^MEi$0)4dWA2M(kZA$j|&Mez4n{-)xxY7e(ON&N+_(v?iZDl{vm;6<`K=Srgo8_ zO>%5ErfB*;)yV+I5D%)Ct5dUFJmq`>MPs~^iXLEh^;}-PiAy3tSyCc9jM>-Nb$|+b zycd2qK=k}`<2lhNt~YDrRxR7;iqG#~706|#yfH5qDW!&$ucI2EW$Chqm6dhR_`b{q zyceV;32%(!(yASUZsfWOasqg!f!Hg2(?%|xm(tmo$;o1gb(C#QUomt3H}L%sLjs_&fXn5g8K z-qtSMe=rnK7ARWy78$O5rw_k)e3Q&FJyIezmb@E@1HM%^);?PyO zU1Z31ro@a&zHnEWcAUUNAc#w9(L-1fE%TEXvYDq}4W7a$`APioKJ$d=Q!#eE81 zQgv7Qnr3bRFIn4eP*!8p>xlb=PfQfFDsh~Pg2k^Q-({;`eH9;bBy5f+b&*CRaZLB6 zbkQ@|S1K4R-{6trpdX^5}cQu=~oOnN-ypU*;Ox|H$t=H&W`w|f8&{$RxbX>Ia4hIHfgdF z(h>H}gu&jX!E0m|GkLUMo_Tz2e2!(wVLDb{ReL3L=8(f!3R_taRr{EleD{mC@_wWB z%coc6HmEX}!-Fb#rUX8m@@Ghw#xB`}r-#QubK_!^A=f2*YgGYnyk~2liiH{9 z#RV2S(6_M|zE1nte08TS{@G2f$D5g^x~R#5y@MSsQn9D@pr|}mUH*|cM!6vAlk4F8)>sEHi3jgzty_!AW0DQ2VhU6}jq{A>vM z$X1EwHLOk5U!m;Vs9l(b{CH=8;!LPzT}o)oR|;JX*PUF<-ekgO0X{VE7I5}>nR!Ob zzAp5m7$>e@_dJgHc5yHK>}&%6v5V{Bz*OkD0=c+^ z;2aa;IuCL+1pj;qrIRyTbY-SQ2&Xl!{fyJ-{9k-+p=loazdN(_Tsg*RDjK$L;QEHX zA>GV+tNguP*M$1~c>+`bxeTR^CDJ&x-QYHl_UQWPrj#2ty57^ZWdaj=#t8fBI2PS` zOHqZk*zuG-)f3#O8w%S=sNDR&!SICuT!*;lRS z8M3rt1Z0sPv2PBcopQWP;t;{30@KfnNw8SBZbv0(O-rvWkzq2IA}X1}bNv=05yH*P z)>O9Q#S|W|cFNbYUmoaORkd!)rxN66uaEJe#vR}8&oJj#VLbnSdb-_@++rbg`SL(M z^3^+v^o|M>ywZm+uCFWm1RUJdD66lU`Lut=ij-RYOFRm5nomoCLU~z`%$;g?bVsz6f>T#32_G{m|ve(S& zePXLZtH{f&u+>V~f%NK%gwqed=H?us<;P_5uTp~n8_>q&(9nKo*)zwM&Gz40w)eO$ zv8*{{+1?;m#sMr~2n^gn>6hS5d)y+#_2$!`xnew7;({z2Bl6!3-az9COnNkHsG4p(3Sn{I{?R2FDfuH`eq2R;))K z;fU}-_=v-tQ7A8Q9-b3HH6F;Z{oiQN|97H6&=`LbjrY5_OwzHJ(fae->pzdnc#nr% z^d10LWjPWl{yz|xF+iZesLpr8Gcb_S4;JsN;k+e z(9mB@TC*T6Y!@BWlyL9M*hUp*XN(W_^pe122~;to>4w+BK3u?c^P2kdwPia?+^gpK zPQR}Wa}y`2nq;mL=KhA6Jf?CgG4kqrO4MXv7H+em&O`mkS2P{OGjf}=yfi^1y*`Xt zAEb}o*YeK^$SG;gg}qLkS)C!w&N~;}5=2F|8UL~SieqRge^(pc;a8L$-iJ!6=1)$~ z^TfzoZ7xdAz1u|bQn_)aG8Hqu@Jk5Ied6^X8s84 z;E+ywbZqLa=aSeQRr;keX?r!Ru1`w(-_2bwm}7_A$_VJd&-q+0r1FrurII=uE;xJU ziC9I~7>>Y8feFVEs)eFMjM7Yl)TvbVu#Owaw_ly!|#GbIHIs?$Fh z*_?@5RK;dNF_B$T#t9T7Mq7!I!HwPa(2sgGC!1tbNb3{lBizP~&yM-HAch2U;;kJG zA+}RyDP_;WrhHUE5(+P}@hzJ_&SxvLm%Of`9DJ|nC4KlZh5*8dBDz>lsaGNEFg^T6 zc7_|?bt*vYW87E(=$TDLgFr46q=*AC0@%Sbc5 z#;5if>m}xv*ZUSef_G;gy4)#GPN>zg7MYtEud04XqL$)^_9jo%@KuuJvs`SYjwyBH zl%cYI{viS7>IpJ`-u6}5s&_d$k~ww=qc>^i3djpd;%WpVahrJ{_O0^`dU&J@(MEA`#ZWn-pko3MVNVb1M;zF3`x;Pniz8 z2`Y?xY{izZXrCJ0I@NcksJl$=E@E!CpHC)yN?p5DO(A~zV@PH-jCC-i?vZ8BlFRT? zoJ*6A<@1i<&;vFj6(Pr&t%?XkY&}n(rypchMz6jf{QL~<=Kh768`rkr1&%f9r$qDD zToMEl-~t_-D{^6lXsG88koZPjqCE$WedW2F2Zf@g#g_7+eS5)VyDgkM{I?T!NnNsd zN_E{uv$w{V0t{N}=r8NgxAWL!KU#RxrL#gW&3`m2gTL`*OGc)7deQ;4)=aD2P6_`S zBeOcQ%wrJ63x^-+p6yO=#>Jac^B*l+%xuN)>BQF@HREw03&7c^*15F5HLgSZmIXU$ zZY_1@oOb-8SQ$Bjxg5rCSeayS7w#Gl-(+Yg^Zkr-cWnDn5=CKhf17)QfKa0LGcDN} zJDZo6(<5Yy_I0h6oKjz48+>Zt=dWHiO}Wjtty+7u;vplLRMDSwR1XH+tI3KolbqsLsj;ygfg>w{2ph*srBo5Zn|pE& zBQFGs4fyQhw7-5;Els&R%$J&$`9`9_T{S1-X0~X9S!ny@sjQSL1`D?6$|QRF3ssbP z1CJ%UB}qS=;igT@x~51PEHCalrQ6k&H=R0Qx{pYoyg_@Tv`V*1I+sWEd-N1mSiCZaa%sAzPW5{?@#SO!DU+R1Gn)qf zlu-Jb{85y+&mis*G4nHzm0qgFD+5P?(otCGa-SU>dz}(%S1(LQ4oS*MVrFj0T$Wg> z{irB?`!4HQszq#$4@R2%vv^j*m%66a*lUvQoqI|zI_)dpxj4W<>NIjkv7y3QZs*`N z=}62vzb?5pF5#%Yg`(g}_Xl>13hBeHK+-JX*PZ)!%Uk2hU!2nDfY~{`lVx1FD&I`V zunVLo%RRB=UXmS1+>c>R#vr3b-(a6Y;z`X^!}_bKna}P#BC63&+@&e@9%=a+rj@}@ zvV*&{VQ6eJ@jIX0VEAyp;ls9i;n5BQ(=6&8zhC2O;%(|lxvnE(8z|fE) z?#_CxU?4to4Vk-fayl&*t0d-p(i1=Yb2m3?v1!kX;%Nor^dt%T;oo|HB@my&#Y??k zFMK^p=w1>X278{S2l=NIuq36EK~=^Tt`dWv>?|Hf<2|N+6@Bg;eop0%m1%i*lfuFN zaDP|w zsjTVq-b>QcMUR-~9YZqcZ<_AJ6@8`hZo2wFdITvV-qcj%3qO4D{s#T5?NyhN8uXUe z*IurdKf1wpWVCpYtG{;f%CKdbj%t}o(T&Od$T5PY(7LCC9h2X>9q5i^ArDh=X8@0IxUAF0vqCYR)&x4Gt;eUW*o zQ;fcOaT#qd5GPMLYNN!(VHsm3J)dKu$b+BfeRR4qK?Z{|;*S1@fv0f|axpGO#NxM% zuhay&EPZglv-7wig$C=W&nMNBn@Ch``N?PN^n|g#eq!EERd-JlF)v95v)owI>))tN^Q{NwsGyMw6Dz$taWb<>?es!m5|l(cT4i9 zf?v#Qap-o`BJ`TBSKAG7ls~a;Sjkbk^KdCYCN|er+kSPioN>}V)VIG|%GA~&u-`>Y zzsRKVxmQ!N5GGghq@;_Mtt77u%kJwN?x!+WM+nm&(x)(BHfG)!8<=hi-dMU%q~o9W#Hg{nK7&I-!HWkY zC3Qiv;1;=XNH-M~N(?NYZ>GDzLiOq$4IAEt5eN9};h5gR+49qL#os)I@u?y+@NLoL z3+tJ;f)4`ZYI)M}{gg@;xso0Ub^FwdRQ5K{5N3hXjiRnFkTr79F zU`#W*BQ<=7_c|TrT2)m#2~QWrAbD#6E%xWeQGjvjKK!LzT_&0dX7(#=Z#=xqGiZe0 zP_AfJ7wP7p!T)PDENwgp>c141SfaF1+B3qCx_&5)y&{S zk4de$Rbs1)ysbl_Bmrp;d)!_vRy}o-X4x9O>{~4p&OxuhBR9IzcqawZF&zg#@$25K z+(K(f16Ms=2}Rboe5P7y?Ca0mRM&tHDa0cMS{O|NJkvJ589pV$vTu$>!bZ&P>u@h~y^v zs?YzFIhC_=Hyn1_PDAPHBhLgb=Jj`q92r{7@4A z7nWPVx2cYb2+ZvIX#TwLeDqFfj9`3*MnM%_&u8o}hHZ9To*Q+V4PB${uT2IlyDje* za7dW4cKBJm)a>X>7(G|^LX7VwX-2%F05x{q+ZOkS2?~1^A*WvCSeJcSj6gFJ7YItb zOm&-kq%+~cu0!};quYy*whqVg6=^<&-;c1(%x9d~l;EdKV`N;nOr(o_SxD#fHMu^~ zO+qnIit>DBD<9R>tBUEW9g){zD*PDar+47N5l=665OW3-Xhk-Xlm+OTk3JHugN-#z zs{7xle>coB{&2AryT>YQJ#CS16hcga$G=6))MLbK{O=Gmiv0&-mcIgkE=HSK)d(gk z{#z!|o;R;(W{ftZiTH6mDlejH)9dNcsEF;@+;y@RDJZx{;VWH!_)Z$_pt}6ty6?W3 z*dVLV+t|z(9$$>N`cd8{G#i?O2zrU?_r2za`Cf_)3#t zeYY_FjutLa+jm9`YiCgU*F~9ymsLrxt1@f%uqwy5oMG}~8F&#@9(GNk>jV63Ah`+m zZRuiKe!I~9F@?+_k`^5~w{Y#FwbA`cWW`XUpp(;ecxxIa3tn`0Cx(TR$v zMg=aByj;NO4;}Dz*hr~Sut)1Y9r+laTKKgH>$? zp9a3W$5WJKmJioinh!K{``D#s{z-v(+1B^Q)eo8Z$<OWHrHIh%;o9X_Y5 z5KFRoXj~FJ{7&}%`#Rs-iR1M9HfjS?Ia}ALE!IQyiXO%lB7}w^!WVcu4lq7qGXFZn-Z2z6W>?;f>U=khkARCHcd;-x{KFa ze#+w#j>5djUgD)e6AdCtng+eDcWGj+Nh>R+qqDr%4~g;V%;3WY?|nS(1{7{J8@gJ+ z3-~?7M5oV8zwJ!w39c2K-TjIT< z>valetFYO7kmC`MW3X2#{)(!!cAhYIxo)N_eQc?f={0KOJ-yrRLw;kE=JI>j?_cvz z<_@)_H6@nUuny|Wc}riKPU0S_aXa);)Dx;9iYm5@@e<*0E2Akwjm}r)$Ur{S{;AL!K@R z?BHP26P)U~jMH@KMZ8C_OhMQu$;PCc>3l;1@<5HWwV0^-Z@Ec@JlKg*7rg*@$ba4QkS z^}_St;&j?EPB;8_I2||p1E>AK6~(@yLj(~-3RPQN=C||knML>SEwR8%=PcU%W#T0D9z--Ney)f_YN+C-KPE zuQyN}ZW82=IODyUO(5{8rY8&)J9LnVmN|v-aD6H58Kx>HDyw6FnaAhut2-`4t&@z_ zE;{cd#1PwfvvW_LOprTBKVx4m%W69q>93c(x<9$vfD?w&N;OK0A8BHEz#CLeFpg%4 z%Jt$mwQKC5QXApF$c%s;3VdYc4(*6nLkJW*NZS>^b@I9pP^r+ucixxZRAT$i*9Gb4 zJ$B141y(gY&k$K4>)VFzXPU1sCU0ey=)c@bWNs@hdZFmTAANDPd3)dW(-y1BCyCT= zVW9+(1p}8W+^*Zdt7R%0@mzn3m72x2D$F)|=KTs*eVT~kdDfA|)4ru&$bP*qg zU}R{V;pcsNqiIFoMbQ-KB*0!dZ69)%M~8(t`}NXlBj08kuSvhMTIjWLdS-)%VZ*Y` z6G?RHwI;J?ZqF=ccfhhV;beid-1&%^ZV{h*ZMmJbBtWr1VxEARNIGPt6P;Z_xT9@Y!!fc{0TWpi)f0J zm{2c$ov_R{#aWu&TR|_rfOQ=QNVm*^D=Y@SMl=_Fx3(&DSU$!tJAX-UKGWg|t}Bjv zG1pTdgN5M}2n&6RSu(#`S9{LHyTpEaA8Rf^tgU9BxJ~Sqvyc(E;@8^F&xB+0-TkhL zPjJ`6Z~V7!dna=hb7k@QAHpvUd8+!r2Nvh6Ozw%^6rH|#XABGdTeIMmtWZ6nuI4kr zOt-QRCGNeV?}g3obyqGsE!FMXi@U`ui`>v-c^gtoG3}^JOYr2bx#zkir?!yxv)q6; zrDqB`AHBckq1B{rWJ~zenNWuO*>=&rPI+As1~uY*+Gxufs*kuuu|@V;*TXCyNZUlS zgeaxI$kR{CE8yO}&CEK{Q#xV#)|bt&ngWNN-6?g+;j^W9_mhdIrCoMxM+{WrPa1+M zxR<@s;6^czt|tWdn<20rOuFL@<~oe-SC;h+)$4z`%`xV5PhP9RK7WtjW`Esn$%^C> z!cbo)4@FSLfKR!FYa7^yq3)Gv)Gol}aqHk6OeGD*@D_+_I-5>5@5vg*&gy zDn)!wZ*nKTeU|fJf-$$5Nn^4<=__i{0JG~kiM01*&e;c<$t5*87rEWVpXtzByeWB- z+vI4{pQfdpwXI#X0Eb&A3&#-U_cV=itXCHVVUJvMGpWOBDYJAPclBa9qP^-r;)O@< zQDVFiiZrL@Hcpi4wWM~R@G8uG#z#-HbA)Choy4lP4X|0n1S7+D5oNrx40^6StP zybm%%OldsaZ8P^UMxCQqY4O*y8NQd3D^p8!RFH@$S1A%cKwcEg@M)Z$Ey83T;g))Y zC5yg^)z@j~{A7ju7C>_~Cuuct)kxnA_f?CmQKt@#tUzDCrZe*?)+dOs8rF-<7@zmA zSVM>@*@yC`dx~g;^6jW-*3<3JtSsro z2lTa?4G;1Qn=6M0wm(-|_&GoOTsa1QFIP(VIR|{MtOdW9D=+#v?|iO25U&iCSiVFz z8Y9VOl=TqD-)H{>%@+_JA)ZKy8TC3t*P#$EkJl9Myf%xYJ} zFf_U>J5bqhhX0*%mH5;RSnXTIXP)>J>qp&ajWl`o1h(_Gx@f`#?a~inIi_oqykq=J zd{qdn*4LX>hm%`cBoC@}EnamcE4pc<$*sKrYzP%Z2-DhbQ!0}Mz6=;}l*L7}X1>hg zfUnhiBq?6Sc~`Uz>y3If{I>Pih(|*78oQU!$B|@olVn~bvr%fDeV^f9H7htfk-~-j zpCewZvjw{AEZ`;VEWGfk)L>FRP0BH6G;?dYKX_V6?2cFht?2BOMd`Aa{3`Ra)(6WL z%s6_8e4V(PU%X<)St4W(G>q+LA!u zKd1F@s*7X!D^q{0Hk0!vbMCE93DUtrBf{(1%pV;~Y%jh1w+nDBptxOfycU<2}p(l3(h#esV>LCNL*Cb?9j zf^+ciBtPW@Zk7tL3&P@u8ie72K>gX4<~eMD16=(IrIToePMgXEwFR#6b6e1HM2nn* zV~$TCfMontQU8J+RQZ2D|G&@xGzu2ye;NfRH)6}g?HwDc{@)Dsf>KG$-0ut(8hvOG zagQ4~0Jx^~IGr?0Kqr;?c!*K{Y?gooiA~Jh@el(!{F7N86K}+aSO9>iEP$yn;}Hlk zQ>n!H&Dm#{(PhDdYYxhxmOxKna{{{3_k*J$CSKq@XvOW1v(F%E!RmN_Q9FqaywIs- z{MdsP5G9Brpo`5Zy_d}jQjGBePc5?m-}fN3htNrC5VazZ+Mj>FIH_f}%NWP9Jv$*= z3`mg;I=RgJkH3PTY2pE=!4VFnlc0ija2XS#vOoEcRKTOUVuk+I)@%?Xx&OpSZV)5? zeiqsPZqEL{zd8GVTCn>cuCyk=Gvxxi1KK(Zt^`KGkGB*<2KH$8r{g0}nk!q;B4n-{ z11HRtP5OQa_`aovL8Abo;*X0DOo3a3p>%S*Vht62f~cH7|F5V_YyZR2(%+nS1+Mhu z`}<1IALm_t-?Y!A5AL-`_=>~qp~J`s@Qm$zg1|GejyLrGxeWAQul_XUGH~;BvICF7 zQ}Kj3fE(ty|El|A+pmDI5I9l(KY5lGv~T(6jl=-P8}p@r(Q4A8oCbNnKKI&rY)#cn z7UG41XvtpEw6WLzI|j++SVgH^kC@}i!w#}j=|?2d7bl2n4_;H5o$X1STgyz(1U5aZ=V!EiJw$9#j~G3g3z)^snbz`E^a zgJ5d1E3P{$HT${SNs7y3S|}_c`pfp{yZo9lL%gixvC63*&k1=smJRHE&?&l8Y(M?v ztbmcJd#v025g|b=rl)zAaBF1z! zRLOL*fZ^AO9oC;E(@s;FB6ZM1Z@wc9rn!9%+(F?eb zhB0|JXS>DW&lflkYW%VXmEl^2T%QH5Id~`XP}*xAsn&?C zd^1^rMHE`iyuT7r|G3|CiLvQvlhN>X1BEUCjw|`!B6zU$1b0u>Pw(kT_Fq_% z=^(Q|qv_~EkU>6ePT4!np{N2AG9=!RElP?r@VR<3SLeB;)Lj|?_i9#;zeHiiHtSCH z>O^MyB*upCj>D%?7D~RIcgZgI^6|Ie)%VmKX8Sdc4C`=Dz2OiWOo=Z{M9uoI*JxA= z3iF3=;%RPAUKO74?T&dQ!cj%cYh7ksJ92~X5e_^3lx2g>Bh?RtZV1VI-Yd@M2Z$Ee zSDt0RS{{Ei6DgN)SNTCw_q^Swd8zF!HsKvd^`v^Df|_U5?0s0doi`$w&RuA?u6x}o z^@*?LMX}JTs+!Dp<3%R!dj$Tu_GIr}sV`i8y1qQ|8s;)}TG7(`^*SRDJ^IL*%FpW| zY#4kRcUNV1nJrW58nmhcMxTjp95Ld|a;!U}WbUI;Zsgu%E2v-`eDzg8->>?4OwG*u z^>vfhLGS4Dw@+!JyA21>uJ<87OfCTY->mreOrj>x6+;PXN-erW+J5pyZ! z@CU+%)SeGuJ#`gcYrQ`Po^WWbacZPnveNZs~z{Y(=yKBk7f3I$XVR2%!!I~)D zuFMvGeL!2Hc2|1g1FCmGtZgP`;MrS@c;-{>u5&Y>Z>$!&i86`qksHbu-SH(ps^7V!r1>WtBtqG+#dMY#nT-n1*X@ z6FsAeHGQ`InMjL9k(I}O_T+BQ&81qu2Wo9q9r3}|6AntDsd?BVgqu(B?rz`luoxiY zh@rW#+(WnPPETx0C9`}@g1u-eIQR8yR#M(5b-l1kt98V?bffahPl6m*CIwb!({QdB zOrNDPH_VSw?QD6RrFK4q2B&!aKJj6tD@;-`7n|!~&SGiiX(H+5(;D9RK2Y7ZmO&{ zs$wLZ8CSo^qC4IvrO={cCjLY{_Ugi|-d65%|LL}~`zc^i{?ky)L!GDHzdRQ4KePFZ&}8P9qyv8_Q)d zut?!P&cZd@Nz7}i2Q6_~4a|7cUoyX*b)Q<|K8Ho@rJu7J z^0wmM@{2@vt&O_DX)QttxPzgCLq!f60be%vd!;v->_L%H*>i?>0u99u1J@rPKECvA zP3oIXSXem?!)sv2S6qU7iqj_5pCZ8Qe3;7(){fKT0eda&OXkbf#w9{Ldr47q@7_FX zUy9mMVeP1!dv@uWHwqOP=ri!yQdlyUV&q#$zkAkLSkI-&8VuhERf4#ULc8hLhpyIH zM6vHaw!PVMm)?5f{k8tx(S&ID!G2Ow&wEUqp3Td9Wd57ynshfjHZ~oPC=??c{QTaI ztYO|~G{M_`N2y^s-NJKsRR;g8XteQRNS3<&Ljup@v60FTFkug24U+c*i&>#dq14vn zRs`I}aJDU?tFAiqOH`=0_g-zip|qvjuJrA{v6;~2M|qtl_M+iK3e)U+>l_hA;a@!D z3vPQ^e4(YM6z_nkVXM?;dtAkj*d=aWi5AGd^X`qe(;p~~>&dlMCt*GF^-gW0&1{-XK&dkX;t$JdvZsDie=7k&?W|&UTjYvL@MVrz} zyF>m?bFx$9i$Jk2KfDO<^qm)_bQY3IM!@%=SwY8r^cDG&xh&+6ooNsz_nr}z6vBNS(j9vG9eE6C9Fo`?m z5zlk`))s2if^SYk}3tSTvME?AJa7@^Ky*t89QC( zdHz__V!^LpO?mrLGKoJcA2xQla(|>kTf8?*>V~_2&hh%Sg&?9s>44E})zsk7To@TI zg!rc@e*50QK7Mah{@oRY-Wx^X(0fCx`ulssxtIXpzy!lqU^?kjv#I+o!79Uyj84tjHQ^I`Z>89uCn*GG$WQ%5kgjrVCnkbb0X9AzYyzh4Kn zjqc@{h>N)>LCxv^Yrhif`D-6Vwe5TKz+>B~4X>GgG1q9Eusr_t+Ka_5D_(hteH5D4 zG(|PLL2dJno!qR!5uQdKs>!Ps38jXnM0mao6N_Hi)(a}kB-V18U0B=eFLKX{ z!N}lptKX^=4|xSK;XSGh)(4}0&0orGG?npm^PfJU8}HTb+}b;@Y1KPNYp-3BwsMoc zKRBjXv8;~K(|)hv6}?$6HeBEPCwy;N_0|27kM8R~@Z9=1>q&p2w17+cvgvO=YV=xvLpcwO)49Bkg5BHHQ3f zoRpnTn%eo!nu+=R#kxdyUEtmipdhpG}q|GcU53BcOfjZ1Ik&yFaH+2ulj~yIs_N zUORV_%q!c=-#00wC7)k?<$6?J)y)n2uDo&j9<|e4V{hzU?+w|f%2}V5Yj0<{EmOUG zbz^hX!@LJC4j;?PD*k&lF3V$D^=|21|GksefB9Q}V);`s^MrkID|VP~wY~dv-@e>t z-9MMNrCD;@hTSQ0H#~LsjnO}v2TZI$)!cV&K-`9 zj#p~gp!@5igp%LB;At=3SodmNY`&Z0=K4nM(xQx0O->tU)U;0i?_>A+r@HEuuNjN} zY`t`z<9qp@8OOdT8^{LFcT?Lx?z7+-^%_jTd=oWKA1o2yIA4AGA;M5{O!lp zr|!CZ$J+O@kKS@r7fX4`*b<+yV1x4dq;p|YzUq~u_;<-<3iCIl{j zR$!JtYf1tH9b|mB0~as=OR+x_@GUD|@fJL4at^ipSyKWmf1*KGca&^`n4$(;28U;G zPZ;c$;zOEpx1&@=M85Eeg7z7v^7!output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log + +function print_teredo(name: string, outer: connection, inner: teredo_hdr) + { + print fmt("%s: %s", name, outer$id); + print fmt(" ip6: %s", inner$hdr$ip6); + if ( inner?$auth ) + print fmt(" auth: %s", inner$auth); + if ( inner?$origin ) + print fmt(" origin: %s", inner$origin); + } + +event teredo_packet(outer: connection, inner: teredo_hdr) + { + print_teredo("packet", outer, inner); + } + +event teredo_authentication(outer: connection, inner: teredo_hdr) + { + print_teredo("auth", outer, inner); + } + +event teredo_origin_indication(outer: connection, inner: teredo_hdr) + { + print_teredo("origin", outer, inner); + } + +event teredo_bubble(outer: connection, inner: teredo_hdr) + { + print_teredo("bubble", outer, inner); + } From 727b7783f94d138e23026ece7424bfad7ca42229 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 5 Jun 2012 15:17:27 -0500 Subject: [PATCH 364/651] Add AYIYA tunnel decapsulation unit test. --- .../btest/Baseline/core.tunnels.ayiya/conn.log | 15 +++++++++++++++ .../btest/Baseline/core.tunnels.ayiya/http.log | 10 ++++++++++ .../Baseline/core.tunnels.ayiya/tunnel.log | 11 +++++++++++ testing/btest/Traces/tunnels/ayiya3.trace | Bin 0 -> 13440 bytes testing/btest/core/tunnels/ayiya.test | 4 ++++ 5 files changed, 40 insertions(+) create mode 100644 testing/btest/Baseline/core.tunnels.ayiya/conn.log create mode 100644 testing/btest/Baseline/core.tunnels.ayiya/http.log create mode 100644 testing/btest/Baseline/core.tunnels.ayiya/tunnel.log create mode 100644 testing/btest/Traces/tunnels/ayiya3.trace create mode 100644 testing/btest/core/tunnels/ayiya.test diff --git a/testing/btest/Baseline/core.tunnels.ayiya/conn.log b/testing/btest/Baseline/core.tunnels.ayiya/conn.log new file mode 100644 index 0000000000..5c23b4c404 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.ayiya/conn.log @@ -0,0 +1,15 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1257655301.595604 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 tcp http 2.101052 2981 4665 S1 - 0 ShADad 10 3605 11 5329 k6kgXLOoSKl +1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 udp ayiya 20.879001 5129 6109 SF - 0 Dd 21 5717 13 6473 (empty) +1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 udp ayiya - - - SHR - 0 d 0 0 1 176 (empty) +1257655296.585333 FrJExwHcSal :: 135 ff02::1:ff00:2 136 icmp - - - - OTH - 0 - 1 64 0 0 k6kgXLOoSKl +1257655293.629048 arKYeMETxOg 2001:4978:f:4c::1 128 2001:4978:f:4c::2 129 icmp - 23.834987 168 56 OTH - 0 - 3 312 1 104 UWkUyAuUGXf,k6kgXLOoSKl +1257655296.585188 TEfuqmmG4bh fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff00:2 130 icmp - 0.919988 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl +1257655296.585151 j4u32Pc5bif fe80::216:cbff:fe9a:4cb9 131 ff02::2:f901:d225 130 icmp - 0.719947 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl +1257655296.585034 nQcgTWjvg4c fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff9a:4cb9 130 icmp - 4.922880 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl diff --git a/testing/btest/Baseline/core.tunnels.ayiya/http.log b/testing/btest/Baseline/core.tunnels.ayiya/http.log new file mode 100644 index 0000000000..7cef1a1b8e --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.ayiya/http.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file +1257655301.652206 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 10102 200 OK - - - (empty) - - - text/html - - +1257655302.514424 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 2 GET ipv6.google.com /csi?v=3&s=webhp&action=&tran=undefined&e=17259,19771,21517,21766,21887,22212&ei=BUz2Su7PMJTglQfz3NzCAw&rt=prt.77,xjs.565,ol.645 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - +1257655303.603569 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 3 GET ipv6.google.com /gen_204?atyp=i&ct=fade&cad=1254&ei=BUz2Su7PMJTglQfz3NzCAw&zx=1257655303600 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - diff --git a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log new file mode 100644 index 0000000000..69e331b477 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log @@ -0,0 +1,11 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user +#types time string addr port addr port enum enum string +1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - +1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - +1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - +1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - diff --git a/testing/btest/Traces/tunnels/ayiya3.trace b/testing/btest/Traces/tunnels/ayiya3.trace new file mode 100644 index 0000000000000000000000000000000000000000..83193050dcf1160bfbee5c947567bd37b0094249 GIT binary patch literal 13440 zcmeI3c{r5a|Ho%6*|JMYYIsVLnK5P`Ci}kcTV=)=3}cKjV@s4MNm7YODJp40rHw+- zN<@XS6e%jAM2YGE9Pzka{p_r4t0J@+~HIj^(5KWEP8%#+&}j`2YRAV05( z2?zuVd>FSJwjgARKyCu}V1O+CWEF*lELi293loMILm)*Eh#mwZZ6?+%;VoZrik~i9 z->?#rohG2~Aic(7*m4{OHIINuLae}-feZTT;cfB68-nUB%8kkcw=??V_nrMh0b}$? zdb88c`pmZla>RPV!V1C%BnQbV3&g-T=YLrZTUhi7LyiD%fMKd0Q+1lGO=|a207N=N z3aklOqXwF6JtX>J?Nn{7AhU_~Fi&I#!HSvgC8wi9? z{^7)rEURl1AokP+od7Wm}>m&2)vua_(O0dl|=DQY%COi~ch$6d1aE|Iqqh0$=%sb}}BM<@F0g(t$WI z{5BF2qnCrUkVP|Sxx6ED(Nf}?GaN2A4hc#t2XuTP=pMtkTMYj`UgcZwWeK+4%j-!*$2JQ2 zM#?unP6kUMS^lY%QcJLu%GLjADIR|;C0-XSDFq7D5A) zpWt-ALTC3u)g7U)_K~JzapRrtl~Ou#xlCsV3fLEa9)k=~iK z^+I#eKDcmqMT%G@u%_v3x=)Hgac8Hy)`b~2Xq=;`~ zg5?N^lz>1Ll%F38g$j$3j;c)B00H0~klb-l^*`aIapyY#4HC}A`mL_~90P?L`ZS+$w&!%#cQf}Vu zYXOkP+DT1Jw0xRs$Mc^>AP8Vt1BTgz0x)C@NNBF~E1|%`qOiFL#ZM<7uE^N88My7r z8GBig(RU{;1SI4y`j>HU(BMd-BYif#%8V0ji zbQ)NF5-bpmq=J#)^WZ=RkjT!#*o16uL`I_snp7WeEi8_PLu;YvBxf=aL%`8}wdi;) z8jeoHp-E2GWRw;bgCc4X@tP#7sZUy3XdDhlawL13yIM!u8l!QxI3M32Zh$t-2FO>7 zK;{?)G0jc9-FYEqk(z94j*B5XD#F^=5^v9p4E8s`=(9p1jo9I#R=y}-Lkm}rs6ZEE z78~vq6>4Pd>cRE1u=l{2YuRG0*j@pwU>ACxOk&!K4>pD znzO%+8xiki8t&q+MQ{o*wehy%618ED0Em&jzP`R02KZ;B>1^UnbJFyHvvI~KILE*g zhcR(OhgcK6*aUq`lp)H9=g+_}>>a6eb4TY;jz5hSLJKg1Te;fPEkZ1#>=;HKgg|GE zdx)VH#?{Xx3eSynB~YE%5hiFR9Si5$I$N{-XjGa%&&1Wt7sv6^H^F!_Oxgaya28N3 zA>2HS?e6M1)HxKzRKSTA%@y z7;GC5$+h7++F6T>ii?8!mf#@vkG{=T;rMA4#?Me;HqUVm^?-JKu@D~WX;mI7fQuyg zKZ-ab^$ACzncjHX+S$+xan6yL5=4| za}gqUEv?%k{T5EKzrYT>lAjkJqpS-O3e+$7*MyN@AdsKR1_QJPW@{-fY6MI>63mgu z(tv3aU^c;FUKdwIshS9_Tue!nA93A0yoM7(H*$$K|F1liuFwi=HJ)Ag43JgDV{X)n0 zwQ4s?YT=c8%BTrA(t z$2I=G7|HuWh2u|TKPLz5Jy?~;&n&@Y9(S}^-3#XpX>%QEkvv zNLF3=rF6@J8<`h8O-CAx>W!E%=!UgXDTtoKJu0g_KdbBuRnIPeq#N`Bs#0lWWty{U zbldrNN^GQGrb4Y-XFl5UahQBp*ZyNaUKM_?Y~hZ)=>7PXI!3ZbLy|i?5XnjZjM}yMRi~N1<%gZDej6*AyA3nTnLb2$!;JC5>gCpH+hVRPb zesRKj!waM)hUupojqrug%~~H04Qd`#JwjnLpyVz0<4XHAZOCyO)~e_Plq_v)lE|&; zd{updIJo^rcSFq6f?COjvQSG-ZFtm4zfF%4Y@*wqY>>=aI3bJ~AsD?*Wtzpqam#c& z+LRZ@Tq|!vBFDl%B7$hfRap6iz3aWMW;{Bx^b4o;f%n=&pH79o%33LzFRLjL!_deu zxr;uE`rs;lz*8q=XbCUR>dK)N0*C6ewRaI#{a`C^7ekmKwjYjvbZHNadRw%BAaqoM z;`Ll52bm?T^(u}37Wt+u#@4JgfUI=UF7R!|bD63>Zb>*iyN(HDxiV*qz$&J)+7mmvsN1SvEYivf=cLnSa(X{P6 zu_#pe)$6QFcR#e$cDwVZ-iOx;el{!9_m)>R{BnQeg_Vbr?s&M%KqaFm0{EW)Jno!^ z?do2Y+Qhq6(ajev4^nR3=A|MA?@*7}$i{wE73SL_xnUoYPtzHd%z#-8FW`4F=`kr< z*o34b8ko&O#|AOJH;3=M7zy7NW1TSW-H=<+aPeXqy4ByX^Vac#qqt55=2n<*&Em&J z9q(@#BNU3>tJERx`hJRce$65q40I&;z7}OI@k8vghe?0loYg#{ShTuFso@=#VV!i~ zoD03MM(ms&+}_X;zEq1X(sr)v1HX;1T_hv_MBmZKr*iAN(5Ag=Er+UGv!w9)gjJya z?|=R8)wL2am%8pKmMA^6wv_$O()(e&hK+=bmD@OaL{{QFzzSTH%uGLX{ zxh{Yn8j{0zAcj$V?Rgc&Sy<~7^RA$u4no@Ed&5epy@^Fi_re@6dzOrkB)-$LKk$B_ zAmdF#_1YI{{@bOiddiLwTRQ~}?7Wqf34is*OYFPUdtf8x^UWJst>6w^cJ&C_v4OX8Y2A-g+Pl+UZyeef zYkFEAt9>tfS8NGlqEs>KI;*VFQ$|TM+YwbOd#-%FeDDqW=KD*|e&yvSid6^rq-_*G z=Cp-(zSDuUNGaQoH4?Qz;Jr$E&Ee_>MEl2V^>_#L+1#G7IFFigE`l;};q_V=D&nn{ z*&B3mmZep|f;S~ZoFN*w`VQYV<-`L78)?4Xf@`)Z-$+Ph4z_BitTFN1Bwu_mZejPr z0_EGaYnHsIQkGg8y5(EeDEFGv=*gWzH_CtL5G^W2WxtLN=v_0_Qg4(!-VW^-}a!Trh9!Td%9rL+rl3)$r!BrCr*v zG^IMXHNz9_$8a4I>&*N`Ia zWA9W0Z##d(*N%232Oe$_lifpc8%T0l z4=|(x_qsyL_EiQHsz`6jjR*SI)vfP7(GNB3UAc@a)3dt%F5zX|K+b~5du}`(+fwHn zeSPa%Lb16~`Y>ptfFt5wPi5jp*T};!0`#4gKNHs$vCcwB%N&rambkUc;5mY~R8t*8 z`1hB{M)xWBb8Je_tmDdZHZ97hUJs<3H$TG;)IWe~AIgfdudHi{ABCnvea$d$w3qv3 zaXoCvr?x3r9DT42ieLL?cM?i2Z57JxjMUpp&dGsI0uSS|txl!ub=BN@PZ4()&p7`6 zK$UtFO3qHx;`6dg#YyJB>Y21nxT@;r^LmN7q80LD{yRe{Hi?uoHp;( zP77RyD)0ZEqN)TBToE$?^{Mid}f7>84%*lUr(=R#d+< z^N73pGTqK;V@+9c6{~F^B~bw%(0oEND z3wJ4Ua(b{%IfI`I2ky&AhcgyDIoLvh(X9>K%mVNqA#(icarZy_p@eHULWK4}~u`7Dk3 zb)Sz=UY*QCkI1VG66Gf{fGh86*BMLgUUBV9$^I7Jvdd1 zomyRo_WyhR&vclB5Zbx|lBBnPiy@HZPC-0_ zBO47Pohr^%XX*#l-A}&GtFb3B{3qVfZp~_t4 z)OvURuCkOtTaimKo+$ksdf>&B*9(;=_+17abFSS=l5jB0$uHq>(AGi5*Y(bOUwxNm zBN^y#eMF5Ak!vS7DxHRrPSXw<+ha$Hg~4x+-?msvKbcda_2FXCfwq0kEzCATUD5KJ zZDVrP!Fb(<*L{>7`Hii4^)#wZ`jXLC`5&81y>yj^M`EJI48ofx{`w|87E-?HisGruvW-{%(Nho?Wc@nBsuWO~}i!IN_V$P#I9>ai4bPtuA#W zZDs0NrPC~>GYxxOAnR`ZNVZSEn5V*XzU!*H&F(G!#+~I25<2y3YF(EgT%dVxP8q~s zS)nOrTG1b>Am~mwipgHIBhza?HKdB&#eSfCEcULkO+m#dHX(}8k*K8+wpsUN>KDW3 zYab>cT*zgf25!n?NlN(UmS;)1ez9JTLjE?AN$68Mu0*LcNbHYqYslUoeg4GOFx7$p zzkTXXb`gc2EK{$_P;%8hlT>gvv@%Yj^Iik?w|A0tXY_-5D5ss@pVkyTS_4D)Kg|nS zzbpk>*W4BHD4q!QmJ7YeOOzXd8b0w2jX(>G3{py6i#Np2S6! zYR?8b@7sS_nwy}czkfmUrc#v0oF1f2F=dw ze5w~UZ9`{=cmHdzMR(SW4EGc%z?*RY2)efrN;}FAs6zS>ey%ctxAgt3Qym=_p8(*d%lSA zib|EIQRUqXVfs~$uA;|g*SA*|wyIc-Y6?a?xE8kE@3n$gj-IR2diE9h_EU17&)RPzzA&p= zXQ|1Z2bE#t%leATeBV_qmh7XXI2QNoUsv<5BoAohmxwEFNs>jV7cMF+Q`_Rcs)E(4T!CIJi6)qw&*C&9Sd_9gRebTycrlHydddh@kWL(1iLC~zxw)4fm@(kXsPt&n4+Yl z?&FoHbX8`d>)OYp@pqh~QsvieVxykg=md5S^*PraC}0F_ITs;TGy3XW3+v&;`2H0+ z_iKNA4W1aN8~*{AOwjL))_se)*|EA_B6T;bL8x6S1FCP za{kOZ5&pk+?zievs5k>(7QP;R>x?v=d$V_INEYo<{inM!sYedh_@-=M^L~7A4w#+#p!vA#&p&ef?O#E&P|xIGcQ|%L%|Jvae zE-6`+Rj#|rZj($#IK^k%(vJb--%KbAH&L|1B@)8=1uVbF9^|D9SPtVWY68w9t_GIm zx;#|u8(L63?s~N5f~$v;&jTf;rZa~YZ;ZU=6s@=-=PYZ_!fQT72qj0^wkPznx4fQ# z$VYIT{kwIBZY1cX zxU+t0odF$P2r+{K`(;3wJrkg$shME9_1BpIRc#-dgAf|K!8HH$_DjUbzV6-ODFrVy=kkjcvH4=tNV5}xq}nh3Xd+qEnTl{aq| zkbevvuDQCcX%qRPWZHlbvhYbE!{s}Fndt%Q#=`cq+Ru8p$sQ&QZx z`wxDKds4LKl85*nsj6MC<+^(9OkItoY6=O#2=v41QJ(y#!uW3!HmhOSu+>xTDnGXH zpZ(k4R+v$n!4)R-`plj$)SjbV`J(ld+Uw&?PCnWxYj*xrQ;hJ*J>!$@`ma`)dPuP6 zt4{*%C}3lpAwtH(W;^3aJsiYTa`NL?;xvkIaC&z z#{!%w3>Mu-l}<+CwXj4D6cLX{X=nj{B|PvSkHZ1~2?XF?OADo?N@tJ_oTIcHd3ZZ( z3nxFOy>ArSCdyDhT$RHmvpHM@9Hn?+%z^Gp=YX5){#-7bghbBrX(IpYBKtq!A_E1iH~W_(0P}Q_z52OD zI$OY?o6em$V_H)EDr#Xveo_R#;pHpkz0(pqPY0A9(o_A8pq-TJszcsW68pee6cPg% zRxtc#T-npc)pCY$U5|v$A+b>A%|fzbQFK_Yal%VwmF_KNb*YCyLMUXu#&w!dL3Pzu z!VDJ~25^YMrkzy3JE8!G)0FqjmF}Yf(?Y>4s4UQXhJm4oBrJ-A)|~R5*>R|TK~xfq z6$}S`q@Xi;4j0-#DgO4jP3Dcl!yM^sAX_{ZD2IgBBB7@9{^>!Rej^8{zUgmiu z=Z+wer>_W_^EJuSgn&st4;;3gG|9kG5HBrFj4qWM$tE*YXuQ0YLyMsIn9vDKke;N>fw+Wgn5UPjKld-5R?B%>SA9iK_4^;~q!2kNGE-`) za8wk61S~5UezUI1)7CX>hIJJ`8ao%Eg5Wh(-_9RsZ9${)SH`5jbW_jcCkcz^Yh9-a z6%aWtar3aQvn<(fXGZ>fU29k+5 zhyr)>zrc5H>Hboi+3j5@fCk_v< heXdtCM{S_9-UWV{GgJ%q4Y+0k$sE@JL-$`?{{ye9y%PWc literal 0 HcmV?d00001 diff --git a/testing/btest/core/tunnels/ayiya.test b/testing/btest/core/tunnels/ayiya.test new file mode 100644 index 0000000000..043e06c621 --- /dev/null +++ b/testing/btest/core/tunnels/ayiya.test @@ -0,0 +1,4 @@ +# @TEST-EXEC: bro -r $TRACES/tunnels/ayiya3.trace +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log From f36d96144da10acf0a4ea104de393fb87427f4d9 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 5 Jun 2012 15:34:30 -0500 Subject: [PATCH 365/651] Add a config.h definition for IPPROTO_IPV4. Some older systems may only have IPPROTO_IPIP, the same value, but less clear to read. --- config.h.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config.h.in b/config.h.in index c2cb3ec1dc..5368d6824e 100644 --- a/config.h.in +++ b/config.h.in @@ -165,6 +165,10 @@ #ifndef HAVE_IPPROTO_IPV6 #define IPPROTO_IPV6 41 #endif +#cmakedefine HAVE_IPPROTO_IPV4 +#ifndef HAVE_IPPROTO_IPV4 +#define IPPROTO_IPV4 4 +#endif #cmakedefine HAVE_IPPROTO_ROUTING #ifndef HAVE_IPPROTO_ROUTING #define IPPROTO_ROUTING 43 From 5db027e39f35c189130628bc03b0417dad951f17 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Tue, 5 Jun 2012 17:23:50 -0400 Subject: [PATCH 366/651] Fix timestamp overflow bug. --- src/logging/writers/ElasticSearch.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 61f3734f87..a073ea7e7c 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -115,7 +115,7 @@ char* ElasticSearch::FieldToString(Value* val, const Field* field) case TYPE_INTERVAL: case TYPE_TIME: - sprintf(result, "\"%d\"", (int) (val->val.double_val * 1000)); return result; + sprintf(result, "\"%llu\"", (unsigned long long) (val->val.double_val * 1000)); return result; case TYPE_DOUBLE: sprintf(result, "\"%s\"", Render(val->val.double_val).c_str()); return result; From a3b330dbc6ddb0502a004e37a8977d22395d8ef9 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Tue, 5 Jun 2012 17:24:13 -0400 Subject: [PATCH 367/651] Make default index name 'bro'. --- doc/logging-elasticsearch.rst | 2 +- scripts/base/frameworks/logging/writers/elasticsearch.bro | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index b3cf062de4..4fce470d4a 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -80,7 +80,7 @@ Bro's ElasticSearch writer comes with a few configuration options:: - server_port: What port to send the data to. Default 9200. - index_name: ElasticSearch indexes are like databases in a standard DB model. - This is the name of the index to which to send the data. Default bro-logs. + This is the name of the index to which to send the data. Default bro. - type_prefix: ElasticSearch types are like tables in a standard DB model. This is a prefix that gets prepended to Bro log names. Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. Default: none. diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index 82dbcc43d4..7f968d0042 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -11,7 +11,7 @@ export { const server_port = 9200 &redef; ## Name of the ES index - const index_name = "bro-logs" &redef; + const index_name = "bro" &redef; ## The ES type prefix comes before the name of the related log. ## e.g. prefix = "bro_" would create types of bro_dns, bro_software, etc. From 894dec006909477bc9bae177fde6a739bdf73173 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Tue, 5 Jun 2012 23:01:36 -0400 Subject: [PATCH 368/651] No quotes for times and doubles, since ES won't interpret them as numbers then. --- src/logging/writers/ElasticSearch.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index a073ea7e7c..3a7635a6a2 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -115,9 +115,9 @@ char* ElasticSearch::FieldToString(Value* val, const Field* field) case TYPE_INTERVAL: case TYPE_TIME: - sprintf(result, "\"%llu\"", (unsigned long long) (val->val.double_val * 1000)); return result; + sprintf(result, "%llu", (unsigned long long) (val->val.double_val * 1000)); return result; case TYPE_DOUBLE: - sprintf(result, "\"%s\"", Render(val->val.double_val).c_str()); return result; + sprintf(result, "%s", Render(val->val.double_val).c_str()); return result; case TYPE_ENUM: case TYPE_STRING: From 5e05e548ff0e5b640a448c29d6c64f1c378bdb10 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Tue, 5 Jun 2012 23:20:28 -0400 Subject: [PATCH 369/651] Change time printf format to use the more compatible PRIu64. --- src/logging/writers/ElasticSearch.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 3a7635a6a2..1817ce63ef 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -115,7 +115,7 @@ char* ElasticSearch::FieldToString(Value* val, const Field* field) case TYPE_INTERVAL: case TYPE_TIME: - sprintf(result, "%llu", (unsigned long long) (val->val.double_val * 1000)); return result; + sprintf(result, "%"PRIu64"", (uint64) (val->val.double_val * 1000)); return result; case TYPE_DOUBLE: sprintf(result, "%s", Render(val->val.double_val).c_str()); return result; From 0bdbeb89e230c041e4803db79e090866a3a42f33 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 6 Jun 2012 10:41:36 -0500 Subject: [PATCH 370/651] Memory leak fixes --- scripts/base/init-bare.bro | 5 +- src/IP.cc | 3 +- src/Tunnels.h | 4 +- .../btest/Baseline/core.leaks.ayiya/conn.log | 15 ++++ .../btest/Baseline/core.leaks.ayiya/http.log | 10 +++ .../Baseline/core.leaks.ayiya/tunnel.log | 11 +++ .../btest/Baseline/core.leaks.ip-in-ip/output | 13 +++ .../btest/Baseline/core.leaks.teredo/conn.log | 28 +++++++ .../btest/Baseline/core.leaks.teredo/http.log | 11 +++ .../btest/Baseline/core.leaks.teredo/output | 83 +++++++++++++++++++ .../Baseline/core.leaks.teredo/tunnel.log | 13 +++ testing/btest/core/leaks/ayiya.test | 10 +++ testing/btest/core/leaks/ip-in-ip.test | 33 ++++++++ testing/btest/core/leaks/teredo.bro | 41 +++++++++ 14 files changed, 276 insertions(+), 4 deletions(-) create mode 100644 testing/btest/Baseline/core.leaks.ayiya/conn.log create mode 100644 testing/btest/Baseline/core.leaks.ayiya/http.log create mode 100644 testing/btest/Baseline/core.leaks.ayiya/tunnel.log create mode 100644 testing/btest/Baseline/core.leaks.ip-in-ip/output create mode 100644 testing/btest/Baseline/core.leaks.teredo/conn.log create mode 100644 testing/btest/Baseline/core.leaks.teredo/http.log create mode 100644 testing/btest/Baseline/core.leaks.teredo/output create mode 100644 testing/btest/Baseline/core.leaks.teredo/tunnel.log create mode 100644 testing/btest/core/leaks/ayiya.test create mode 100644 testing/btest/core/leaks/ip-in-ip.test create mode 100644 testing/btest/core/leaks/teredo.bro diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 5ca9cdf330..f23a4e9714 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1264,6 +1264,9 @@ type ip6_ext_hdr: record { mobility: ip6_mobility_hdr &optional; }; +## A type alias for a vector of IPv6 extension headers. +type ip6_ext_hdr_chain: vector of ip6_ext_hdr; + ## Values extracted from an IPv6 header. ## ## .. bro:see:: pkt_hdr ip4_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts @@ -1278,7 +1281,7 @@ type ip6_hdr: record { hlim: count; ##< Hop limit. src: addr; ##< Source address. dst: addr; ##< Destination address. - exts: vector of ip6_ext_hdr; ##< Extension header chain. + exts: ip6_ext_hdr_chain; ##< Extension header chain. }; ## Values extracted from an IPv4 header. diff --git a/src/IP.cc b/src/IP.cc index f5598600d5..7113c4a678 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -583,7 +583,8 @@ VectorVal* IPv6_Hdr_Chain::BuildVal() const ip6_mob_type = internal_type("ip6_mobility_hdr")->AsRecordType(); } - VectorVal* rval = new VectorVal(new VectorType(ip6_ext_hdr_type->Ref())); + VectorVal* rval = new VectorVal( + internal_type("ip6_ext_hdr_chain")->AsVectorType()); for ( size_t i = 1; i < chain.size(); ++i ) { diff --git a/src/Tunnels.h b/src/Tunnels.h index 3365c8d0ca..c2f070c168 100644 --- a/src/Tunnels.h +++ b/src/Tunnels.h @@ -118,8 +118,8 @@ public: VectorVal* GetVectorVal() const { - VectorVal* vv = new VectorVal(new VectorType( - BifType::Record::Tunnel::EncapsulatingConn->Ref())); + VectorVal* vv = new VectorVal( + internal_type("EncapsulatingConnVector")->AsVectorType()); if ( conns ) for ( size_t i = 0; i < conns->size(); ++i ) vv->Assign(i, (*conns)[i].GetRecordVal(), 0); diff --git a/testing/btest/Baseline/core.leaks.ayiya/conn.log b/testing/btest/Baseline/core.leaks.ayiya/conn.log new file mode 100644 index 0000000000..5c23b4c404 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.ayiya/conn.log @@ -0,0 +1,15 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1257655301.595604 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 tcp http 2.101052 2981 4665 S1 - 0 ShADad 10 3605 11 5329 k6kgXLOoSKl +1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 udp ayiya 20.879001 5129 6109 SF - 0 Dd 21 5717 13 6473 (empty) +1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 udp ayiya - - - SHR - 0 d 0 0 1 176 (empty) +1257655296.585333 FrJExwHcSal :: 135 ff02::1:ff00:2 136 icmp - - - - OTH - 0 - 1 64 0 0 k6kgXLOoSKl +1257655293.629048 arKYeMETxOg 2001:4978:f:4c::1 128 2001:4978:f:4c::2 129 icmp - 23.834987 168 56 OTH - 0 - 3 312 1 104 UWkUyAuUGXf,k6kgXLOoSKl +1257655296.585188 TEfuqmmG4bh fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff00:2 130 icmp - 0.919988 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl +1257655296.585151 j4u32Pc5bif fe80::216:cbff:fe9a:4cb9 131 ff02::2:f901:d225 130 icmp - 0.719947 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl +1257655296.585034 nQcgTWjvg4c fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff9a:4cb9 130 icmp - 4.922880 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl diff --git a/testing/btest/Baseline/core.leaks.ayiya/http.log b/testing/btest/Baseline/core.leaks.ayiya/http.log new file mode 100644 index 0000000000..7cef1a1b8e --- /dev/null +++ b/testing/btest/Baseline/core.leaks.ayiya/http.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file +1257655301.652206 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 10102 200 OK - - - (empty) - - - text/html - - +1257655302.514424 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 2 GET ipv6.google.com /csi?v=3&s=webhp&action=&tran=undefined&e=17259,19771,21517,21766,21887,22212&ei=BUz2Su7PMJTglQfz3NzCAw&rt=prt.77,xjs.565,ol.645 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - +1257655303.603569 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 3 GET ipv6.google.com /gen_204?atyp=i&ct=fade&cad=1254&ei=BUz2Su7PMJTglQfz3NzCAw&zx=1257655303600 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - diff --git a/testing/btest/Baseline/core.leaks.ayiya/tunnel.log b/testing/btest/Baseline/core.leaks.ayiya/tunnel.log new file mode 100644 index 0000000000..69e331b477 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.ayiya/tunnel.log @@ -0,0 +1,11 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user +#types time string addr port addr port enum enum string +1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - +1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - +1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - +1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - diff --git a/testing/btest/Baseline/core.leaks.ip-in-ip/output b/testing/btest/Baseline/core.leaks.ip-in-ip/output new file mode 100644 index 0000000000..d8c6bee223 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.ip-in-ip/output @@ -0,0 +1,13 @@ +new_connection: tunnel + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +new_connection: tunnel + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + encap: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf], [cid=[orig_h=babe::beef, orig_p=0/unknown, resp_h=dead::babe, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=arKYeMETxOg]] +new_connection: tunnel + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + encap: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] +tunnel_changed: + conn_id: [orig_h=dead::beef, orig_p=30000/udp, resp_h=cafe::babe, resp_p=13000/udp] + old: [[cid=[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=0/unknown, resp_h=2001:4f8:4:7:2e0:81ff:fe52:9a6b, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=UWkUyAuUGXf]] + new: [[cid=[orig_h=feed::beef, orig_p=0/unknown, resp_h=feed::cafe, resp_p=0/unknown], tunnel_type=Tunnel::IP, uid=k6kgXLOoSKl]] diff --git a/testing/btest/Baseline/core.leaks.teredo/conn.log b/testing/btest/Baseline/core.leaks.teredo/conn.log new file mode 100644 index 0000000000..151230886b --- /dev/null +++ b/testing/btest/Baseline/core.leaks.teredo/conn.log @@ -0,0 +1,28 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1210953047.736921 arKYeMETxOg 192.168.2.16 1576 75.126.130.163 80 tcp - 0.000357 0 0 SHR - 0 fA 1 40 1 40 (empty) +1210953050.867067 k6kgXLOoSKl 192.168.2.16 1577 75.126.203.78 80 tcp - 0.000387 0 0 SHR - 0 fA 1 40 1 40 (empty) +1210953057.833364 5OKnoww6xl4 192.168.2.16 1577 75.126.203.78 80 tcp - 0.079208 0 0 SH - 0 Fa 1 40 1 40 (empty) +1210953058.007081 VW0XPVINV8a 192.168.2.16 1576 75.126.130.163 80 tcp - - - - RSTOS0 - 0 R 1 40 0 0 (empty) +1210953057.834454 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 tcp http 0.407908 790 171 RSTO - 0 ShADadR 6 1038 4 335 (empty) +1210953058.350065 fRFu0wcOle6 192.168.2.16 1920 192.168.2.1 53 udp dns 0.223055 66 438 SF - 0 Dd 2 122 2 494 (empty) +1210953058.577231 qSsw6ESzHV4 192.168.2.16 137 192.168.2.255 137 udp dns 1.499261 150 0 S0 - 0 D 3 234 0 0 (empty) +1210953074.264819 Tw8jXtpTGu6 192.168.2.16 1920 192.168.2.1 53 udp dns 0.297723 123 598 SF - 0 Dd 3 207 3 682 (empty) +1210953061.312379 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 tcp http 12.810848 1675 10467 S1 - 0 ShADad 10 2279 12 11191 GSxOnSLghOa +1210953076.058333 EAr0uf4mhq 192.168.2.16 1578 75.126.203.78 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) +1210953074.055744 h5DsfNtYzi1 192.168.2.16 1577 75.126.203.78 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) +1210953074.057124 P654jzLoe3a 192.168.2.16 1576 75.126.130.163 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) +1210953074.570439 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 tcp http 0.466677 469 3916 SF - 0 ShADadFf 7 757 6 4164 (empty) +1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 udp teredo 8.928880 129 48 SF - 0 Dd 2 185 1 76 (empty) +1210953060.829233 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 udp teredo 13.293994 2359 11243 SF - 0 Dd 12 2695 13 11607 (empty) +1210953058.933954 iE6yhOq3SF 0.0.0.0 68 255.255.255.255 67 udp - - - - S0 - 0 D 1 328 0 0 (empty) +1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 udp teredo - - - SHR - 0 d 0 0 1 137 (empty) +1210953046.591933 UWkUyAuUGXf 192.168.2.16 138 192.168.2.255 138 udp - 28.448321 416 0 S0 - 0 D 2 472 0 0 (empty) +1210953052.324629 FrJExwHcSal fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 TEfuqmmG4bh +1210953060.829303 qCaWGmzFtM5 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.463615 4 4 OTH - 0 - 1 52 1 52 GSxOnSLghOa,nQcgTWjvg4c +1210953052.202579 j4u32Pc5bif fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 nQcgTWjvg4c diff --git a/testing/btest/Baseline/core.leaks.teredo/http.log b/testing/btest/Baseline/core.leaks.teredo/http.log new file mode 100644 index 0000000000..b3cf832083 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.teredo/http.log @@ -0,0 +1,11 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file +1210953057.917183 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 1 POST download913.avast.com /cgi-bin/iavs4stats.cgi - Syncer/4.80 (av_pro-1169;f) 589 0 204 - - - (empty) - - - text/plain - - +1210953061.585996 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - +1210953073.381474 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - +1210953074.674817 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 1 GET www.wireshark.org / http://ipv6.google.com/search?hl=en&q=Wireshark+%21&btnG=Google+Search Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 11845 200 OK - - - (empty) - - - text/xml - - diff --git a/testing/btest/Baseline/core.leaks.teredo/output b/testing/btest/Baseline/core.leaks.teredo/output new file mode 100644 index 0000000000..02d5a41e74 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.teredo/output @@ -0,0 +1,83 @@ +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] +auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=12, nxt=58, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=12, nxt=58, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=24, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=24, nxt=6, hlim=245, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=817, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=514, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=898, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=812, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=717, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] diff --git a/testing/btest/Baseline/core.leaks.teredo/tunnel.log b/testing/btest/Baseline/core.leaks.teredo/tunnel.log new file mode 100644 index 0000000000..5549d66a29 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.teredo/tunnel.log @@ -0,0 +1,13 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user +#types time string addr port addr port enum enum string +1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO - +1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO - +1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO - +1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO - +1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO - +1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO - diff --git a/testing/btest/core/leaks/ayiya.test b/testing/btest/core/leaks/ayiya.test new file mode 100644 index 0000000000..adad42a822 --- /dev/null +++ b/testing/btest/core/leaks/ayiya.test @@ -0,0 +1,10 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/tunnels/ayiya3.trace +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/core/leaks/ip-in-ip.test b/testing/btest/core/leaks/ip-in-ip.test new file mode 100644 index 0000000000..64fdf739f6 --- /dev/null +++ b/testing/btest/core/leaks/ip-in-ip.test @@ -0,0 +1,33 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r $TRACES/tunnels/6in6.pcap %INPUT >>output +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r $TRACES/tunnels/6in6in6.pcap %INPUT >>output +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r $TRACES/tunnels/6in6-tunnel-change.pcap %INPUT >>output +# @TEST-EXEC: btest-diff output + +event new_connection(c: connection) + { + if ( c?$tunnel ) + { + print "new_connection: tunnel"; + print fmt(" conn_id: %s", c$id); + print fmt(" encap: %s", c$tunnel); + } + else + { + print "new_connection: no tunnel"; + } + } + +event tunnel_changed(c: connection, e: EncapsulatingConnVector) + { + print "tunnel_changed:"; + print fmt(" conn_id: %s", c$id); + if ( c?$tunnel ) + print fmt(" old: %s", c$tunnel); + print fmt(" new: %s", e); + } diff --git a/testing/btest/core/leaks/teredo.bro b/testing/btest/core/leaks/teredo.bro new file mode 100644 index 0000000000..9902f1258b --- /dev/null +++ b/testing/btest/core/leaks/teredo.bro @@ -0,0 +1,41 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/tunnels/Teredo.pcap %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log + +function print_teredo(name: string, outer: connection, inner: teredo_hdr) + { + print fmt("%s: %s", name, outer$id); + print fmt(" ip6: %s", inner$hdr$ip6); + if ( inner?$auth ) + print fmt(" auth: %s", inner$auth); + if ( inner?$origin ) + print fmt(" origin: %s", inner$origin); + } + +event teredo_packet(outer: connection, inner: teredo_hdr) + { + print_teredo("packet", outer, inner); + } + +event teredo_authentication(outer: connection, inner: teredo_hdr) + { + print_teredo("auth", outer, inner); + } + +event teredo_origin_indication(outer: connection, inner: teredo_hdr) + { + print_teredo("origin", outer, inner); + } + +event teredo_bubble(outer: connection, inner: teredo_hdr) + { + print_teredo("bubble", outer, inner); + } From 7599ac8f31fa9a4b0943408c3041be9ba7ece3d3 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 6 Jun 2012 11:50:15 -0500 Subject: [PATCH 371/651] Memory leak fixes for bad usages of VectorVal ctor. Many usages of the VectorVal ctor didn't account for the fact that it automatically Ref's the VectorType argument and end up leaking it. --- scripts/base/init-bare.bro | 12 ++++-- src/IP.cc | 12 +++--- src/bro.bif | 8 ++-- src/strings.bif | 4 +- .../core.leaks.ipv6_ext_headers/output | 4 ++ .../core.leaks.vector-val-bifs/output | 10 +++++ .../btest/core/leaks/ipv6_ext_headers.test | 37 +++++++++++++++++++ testing/btest/core/leaks/vector-val-bifs.test | 28 ++++++++++++++ 8 files changed, 100 insertions(+), 15 deletions(-) create mode 100644 testing/btest/Baseline/core.leaks.ipv6_ext_headers/output create mode 100644 testing/btest/Baseline/core.leaks.vector-val-bifs/output create mode 100644 testing/btest/core/leaks/ipv6_ext_headers.test create mode 100644 testing/btest/core/leaks/vector-val-bifs.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index da2b742725..515cbde6cb 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -977,6 +977,9 @@ type ip6_option: record { data: string; ##< Option data. }; +## A type alias for a vector of IPv6 options. +type ip6_options: vector of ip6_option; + ## Values extracted from an IPv6 Hop-by-Hop options extension header. ## ## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option @@ -987,7 +990,7 @@ type ip6_hopopts: record { ## Length of header in 8-octet units, excluding first unit. len: count; ## The TLV encoded options; - options: vector of ip6_option; + options: ip6_options; }; ## Values extracted from an IPv6 Destination options extension header. @@ -1000,7 +1003,7 @@ type ip6_dstopts: record { ## Length of header in 8-octet units, excluding first unit. len: count; ## The TLV encoded options; - options: vector of ip6_option; + options: ip6_options; }; ## Values extracted from an IPv6 Routing extension header. @@ -1245,6 +1248,9 @@ type ip6_ext_hdr: record { mobility: ip6_mobility_hdr &optional; }; +## A type alias for a vector of IPv6 extension headers +type ip6_ext_hdr_chain: vector of ip6_ext_hdr; + ## Values extracted from an IPv6 header. ## ## .. bro:see:: pkt_hdr ip4_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts @@ -1259,7 +1265,7 @@ type ip6_hdr: record { hlim: count; ##< Hop limit. src: addr; ##< Source address. dst: addr; ##< Destination address. - exts: vector of ip6_ext_hdr; ##< Extension header chain. + exts: ip6_ext_hdr_chain; ##< Extension header chain. }; ## Values extracted from an IPv4 header. diff --git a/src/IP.cc b/src/IP.cc index f5598600d5..45afd593a9 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -36,13 +36,12 @@ static inline RecordType* hdrType(RecordType*& type, const char* name) static VectorVal* BuildOptionsVal(const u_char* data, int len) { - VectorVal* vv = new VectorVal(new VectorType( - hdrType(ip6_option_type, "ip6_option")->Ref())); + VectorVal* vv = new VectorVal(internal_type("ip6_options")->AsVectorType()); while ( len > 0 ) { const struct ip6_opt* opt = (const struct ip6_opt*) data; - RecordVal* rv = new RecordVal(ip6_option_type); + RecordVal* rv = new RecordVal(hdrType(ip6_option_type, "ip6_option")); rv->Assign(0, new Val(opt->ip6o_type, TYPE_COUNT)); if ( opt->ip6o_type == 0 ) @@ -87,8 +86,8 @@ RecordVal* IPv6_Hdr::BuildRecordVal(VectorVal* chain) const rv->Assign(5, new AddrVal(IPAddr(ip6->ip6_src))); rv->Assign(6, new AddrVal(IPAddr(ip6->ip6_dst))); if ( ! chain ) - chain = new VectorVal(new VectorType( - hdrType(ip6_ext_hdr_type, "ip6_ext_hdr")->Ref())); + chain = new VectorVal( + internal_type("ip6_ext_hdr_chain")->AsVectorType()); rv->Assign(7, chain); } break; @@ -583,7 +582,8 @@ VectorVal* IPv6_Hdr_Chain::BuildVal() const ip6_mob_type = internal_type("ip6_mobility_hdr")->AsRecordType(); } - VectorVal* rval = new VectorVal(new VectorType(ip6_ext_hdr_type->Ref())); + VectorVal* rval = new VectorVal( + internal_type("ip6_ext_hdr_chain")->AsVectorType()); for ( size_t i = 1; i < chain.size(); ++i ) { diff --git a/src/bro.bif b/src/bro.bif index e1521adee8..5417ba3591 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1494,8 +1494,8 @@ function sort%(v: any, ...%) : any ## .. bro:see:: sort function order%(v: any, ...%) : index_vec %{ - VectorVal* result_v = - new VectorVal(new VectorType(base_type(TYPE_COUNT))); + VectorVal* result_v = new VectorVal( + internal_type("index_vec")->AsVectorType()); if ( v->Type()->Tag() != TYPE_VECTOR ) { @@ -2331,7 +2331,7 @@ function is_v6_addr%(a: addr%): bool ## Returns: The vector of addresses contained in the routing header data. function routing0_data_to_addrs%(s: string%): addr_vec %{ - VectorVal* rval = new VectorVal(new VectorType(base_type(TYPE_ADDR))); + VectorVal* rval = new VectorVal(internal_type("addr_vec")->AsVectorType()); int len = s->Len(); const u_char* bytes = s->Bytes(); @@ -2362,7 +2362,7 @@ function routing0_data_to_addrs%(s: string%): addr_vec ## .. bro:see:: counts_to_addr function addr_to_counts%(a: addr%): index_vec %{ - VectorVal* rval = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + VectorVal* rval = new VectorVal(internal_type("index_vec")->AsVectorType()); const uint32* bytes; int len = a->AsAddr().GetBytes(&bytes); diff --git a/src/strings.bif b/src/strings.bif index 27c11b4013..4c3b331b8a 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -875,8 +875,8 @@ function str_split%(s: string, idx: index_vec%): string_vec indices[i] = (*idx_v)[i]->AsCount(); BroString::Vec* result = s->AsString()->Split(indices); - VectorVal* result_v = - new VectorVal(new VectorType(base_type(TYPE_STRING))); + VectorVal* result_v = new VectorVal( + internal_type("string_vec")->AsVectorType()); if ( result ) { diff --git a/testing/btest/Baseline/core.leaks.ipv6_ext_headers/output b/testing/btest/Baseline/core.leaks.ipv6_ext_headers/output new file mode 100644 index 0000000000..5c2177718c --- /dev/null +++ b/testing/btest/Baseline/core.leaks.ipv6_ext_headers/output @@ -0,0 +1,4 @@ +weird routing0_hdr from 2001:4f8:4:7:2e0:81ff:fe52:ffff to 2001:78:1:32::2 +[orig_h=2001:4f8:4:7:2e0:81ff:fe52:ffff, orig_p=53/udp, resp_h=2001:78:1:32::2, resp_p=53/udp] +[ip=, ip6=[class=0, flow=0, len=59, nxt=0, hlim=64, src=2001:4f8:4:7:2e0:81ff:fe52:ffff, dst=2001:4f8:4:7:2e0:81ff:fe52:9a6b, exts=[[id=0, hopopts=[nxt=43, len=0, options=[[otype=1, len=4, data=\0\0\0\0]]], dstopts=, routing=, fragment=, ah=, esp=, mobility=], [id=43, hopopts=, dstopts=, routing=[nxt=17, len=4, rtype=0, segleft=2, data=\0\0\0\0 ^A\0x\0^A\02\0\0\0\0\0\0\0^A ^A\0x\0^A\02\0\0\0\0\0\0\0^B], fragment=, ah=, esp=, mobility=]]], tcp=, udp=[sport=53/udp, dport=53/udp, ulen=11], icmp=] +[2001:78:1:32::1, 2001:78:1:32::2] diff --git a/testing/btest/Baseline/core.leaks.vector-val-bifs/output b/testing/btest/Baseline/core.leaks.vector-val-bifs/output new file mode 100644 index 0000000000..4a57d29a71 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.vector-val-bifs/output @@ -0,0 +1,10 @@ +[1, 3, 0, 2] +[2374950123] +[1, 3, 0, 2] +[2374950123] +[1, 3, 0, 2] +[2374950123] +[1, 3, 0, 2] +[3353991673] +[1, 3, 0, 2] +[3353991673] diff --git a/testing/btest/core/leaks/ipv6_ext_headers.test b/testing/btest/core/leaks/ipv6_ext_headers.test new file mode 100644 index 0000000000..3b2497655c --- /dev/null +++ b/testing/btest/core/leaks/ipv6_ext_headers.test @@ -0,0 +1,37 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +# Just check that the event is raised correctly for a packet containing +# extension headers. +event ipv6_ext_headers(c: connection, p: pkt_hdr) + { + print p; + } + +# Also check the weird for routing type 0 extensions headers +event flow_weird(name: string, src: addr, dst: addr) + { + print fmt("weird %s from %s to %s", name, src, dst); + } + +# And the connection for routing type 0 packets with non-zero segments left +# should use the last address in that extension header. +event new_connection(c: connection) + { + print c$id; + } + +event ipv6_ext_headers(c: connection, p: pkt_hdr) + { + for ( h in p$ip6$exts ) + if ( p$ip6$exts[h]$id == IPPROTO_ROUTING ) + if ( p$ip6$exts[h]$routing$rtype == 0 ) + print routing0_data_to_addrs(p$ip6$exts[h]$routing$data); + } + diff --git a/testing/btest/core/leaks/vector-val-bifs.test b/testing/btest/core/leaks/vector-val-bifs.test new file mode 100644 index 0000000000..d42e273bc5 --- /dev/null +++ b/testing/btest/core/leaks/vector-val-bifs.test @@ -0,0 +1,28 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# The BIFS used in this test originally didn't call the VectorVal() ctor right, +# assuming that it didn't automatically Ref the VectorType argument and thus +# leaked that memeory. +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r $TRACES/ftp-ipv4.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +function myfunc(aa: interval, bb: interval): int + { + if ( aa < bb ) + return -1; + else + return 1; + } + +event new_connection(c: connection) + { + local a = vector( 5, 2, 8, 3 ); + print order(a); + str_split("this is a test string", a); + print addr_to_counts(c$id$orig_h); + } From f0db2db9146aaaa29680e278693a35518953fa2a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 6 Jun 2012 11:55:15 -0700 Subject: [PATCH 372/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index 892b60edb9..589cb04c3d 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 892b60edb967bb456872638f22ba994e84530137 +Subproject commit 589cb04c3d7e28a81aa07454e2b9b6b092f0e1af From 9a86a5e21f0ce305e2fc7bab44ac68d62c1d29a4 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 6 Jun 2012 12:21:24 -0700 Subject: [PATCH 373/651] Revert "Fixed a bug with the MIME analyzer not removing whitespace on wrapped headers." This reverts commit 89cb103a2c07aede9969ee586225c4d7b0411a29. --- src/MIME.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/MIME.cc b/src/MIME.cc index 11f764266d..4a7c0268b0 100644 --- a/src/MIME.cc +++ b/src/MIME.cc @@ -426,8 +426,7 @@ void MIME_Entity::ContHeader(int len, const char* data) return; } - int ws = MIME_count_leading_lws(len, data); - current_header_line->append(len - ws, data + ws); + current_header_line->append(len, data); } void MIME_Entity::FinishHeader() From beacf581d331adc6e3f5a7886bfb1ae22c0112ed Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 6 Jun 2012 14:40:11 -0500 Subject: [PATCH 374/651] Just some cleanup/documentation of new tunnel-handling code. --- scripts/base/frameworks/tunnels/main.bro | 3 +- src/CMakeLists.txt | 2 +- src/Conn.cc | 2 +- src/Conn.h | 2 +- src/Sessions.cc | 5 +- src/Sessions.h | 2 +- src/{Tunnels.cc => TunnelEncapsulation.cc} | 25 ++---- src/{Tunnels.h => TunnelEncapsulation.h} | 80 +++++++++++++++++-- .../Baseline/core.leaks.ayiya/tunnel.log | 12 +-- .../Baseline/core.leaks.teredo/tunnel.log | 16 ++-- .../Baseline/core.tunnels.ayiya/tunnel.log | 12 +-- .../Baseline/core.tunnels.teredo/tunnel.log | 16 ++-- 12 files changed, 112 insertions(+), 65 deletions(-) rename src/{Tunnels.cc => TunnelEncapsulation.cc} (68%) rename src/{Tunnels.h => TunnelEncapsulation.h} (51%) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 4076e79cd5..32270cf898 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -2,7 +2,7 @@ ##! AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6). ##! ##! For any connection that occurs over a tunnel, information about its -##! its encapsulating tunnels is also found in the *tunnel* field of +##! encapsulating tunnels is also found in the *tunnel* field of ##! :bro:type:`connection`. module Tunnel; @@ -35,7 +35,6 @@ export { action: Action &log; ## The type of tunnel. tunnel_type: Tunnel::Type &log; - user: string &log &optional; }; ## Logs all tunnels in an ecapsulation chain with action diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0a6c2a5c76..50e58d87e3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -402,7 +402,7 @@ set(bro_SRCS Timer.cc Traverse.cc Trigger.cc - Tunnels.cc + TunnelEncapsulation.cc Type.cc UDP.cc Val.cc diff --git a/src/Conn.cc b/src/Conn.cc index ec62a1b944..9780b15dfc 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -13,7 +13,7 @@ #include "Timer.h" #include "PIA.h" #include "binpac.h" -#include "Tunnels.h" +#include "TunnelEncapsulation.h" void ConnectionTimer::Init(Connection* arg_conn, timer_func arg_timer, int arg_do_expire) diff --git a/src/Conn.h b/src/Conn.h index f2efa2971d..52c14598be 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -13,7 +13,7 @@ #include "RuleMatcher.h" #include "AnalyzerTags.h" #include "IPAddr.h" -#include "Tunnels.h" +#include "TunnelEncapsulation.h" class Connection; class ConnectionTimer; diff --git a/src/Sessions.cc b/src/Sessions.cc index 4e81ba1661..77ccd7aeb6 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -30,7 +30,7 @@ #include "DPM.h" #include "PacketSort.h" -#include "Tunnels.h" +#include "TunnelEncapsulation.h" // These represent NetBIOS services on ephemeral ports. They're numbered // so that we can use a single int to hold either an actual TCP/UDP server @@ -570,8 +570,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( it == ip_tunnels.end() ) { - EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), - BifEnum::Tunnel::IP); + EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr()); ip_tunnels[tunnel_idx] = ec; outer->Add(ec); } diff --git a/src/Sessions.h b/src/Sessions.h index 9273a02787..b98fc7e432 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -11,7 +11,7 @@ #include "PacketFilter.h" #include "Stats.h" #include "NetVar.h" -#include "Tunnels.h" +#include "TunnelEncapsulation.h" #include struct pcap_pkthdr; diff --git a/src/Tunnels.cc b/src/TunnelEncapsulation.cc similarity index 68% rename from src/Tunnels.cc rename to src/TunnelEncapsulation.cc index 7ae87912d7..e7ee3e27d1 100644 --- a/src/Tunnels.cc +++ b/src/TunnelEncapsulation.cc @@ -1,13 +1,13 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "Tunnels.h" +#include "TunnelEncapsulation.h" #include "util.h" #include "Conn.h" EncapsulatingConn::EncapsulatingConn(Connection* c, BifEnum::Tunnel::Type t) : src_addr(c->OrigAddr()), dst_addr(c->RespAddr()), src_port(c->OrigPort()), dst_port(c->RespPort()), - type(t), uid(c->GetUID()) + proto(c->ConnTransport()), type(t), uid(c->GetUID()) { if ( ! uid ) { @@ -18,28 +18,13 @@ EncapsulatingConn::EncapsulatingConn(Connection* c, BifEnum::Tunnel::Type t) RecordVal* EncapsulatingConn::GetRecordVal() const { - RecordVal *rv = - new RecordVal(BifType::Record::Tunnel::EncapsulatingConn); - TransportProto tproto; - switch ( type ) { - case BifEnum::Tunnel::AYIYA: - case BifEnum::Tunnel::TEREDO: - tproto = TRANSPORT_UDP; - break; - case BifEnum::Tunnel::SOCKS: - tproto = TRANSPORT_TCP; - break; - case BifEnum::Tunnel::IP: - default: - tproto = TRANSPORT_UNKNOWN; - break; - } // end switch + RecordVal *rv = new RecordVal(BifType::Record::Tunnel::EncapsulatingConn); RecordVal* id_val = new RecordVal(conn_id); id_val->Assign(0, new AddrVal(src_addr)); - id_val->Assign(1, new PortVal(ntohs(src_port), tproto)); + id_val->Assign(1, new PortVal(ntohs(src_port), proto)); id_val->Assign(2, new AddrVal(dst_addr)); - id_val->Assign(3, new PortVal(ntohs(dst_port), tproto)); + id_val->Assign(3, new PortVal(ntohs(dst_port), proto)); rv->Assign(0, id_val); rv->Assign(1, new EnumVal(type, BifType::Enum::Tunnel::Type)); char tmp[20]; diff --git a/src/Tunnels.h b/src/TunnelEncapsulation.h similarity index 51% rename from src/Tunnels.h rename to src/TunnelEncapsulation.h index c2f070c168..f0d07b0501 100644 --- a/src/Tunnels.h +++ b/src/TunnelEncapsulation.h @@ -11,30 +11,71 @@ class Connection; +/** + * Represents various types of tunnel "connections", that is, a pair of + * endpoints whose communication encapsulates inner IP packets. This could + * mean IP packets nested inside IP packets or IP packets nested inside a + * transport layer protocol. EncapsulatingConn's are assigned a UID, which can + * be shared with Connection's in the case the tunnel uses a transport-layer. + */ class EncapsulatingConn { public: + /** + * Default tunnel connection constructor. + */ EncapsulatingConn() - : src_port(0), dst_port(0), type(BifEnum::Tunnel::NONE), uid(0) + : src_port(0), dst_port(0), proto(TRANSPORT_UNKNOWN), + type(BifEnum::Tunnel::NONE), uid(0) {} - EncapsulatingConn(const IPAddr& s, const IPAddr& d, - BifEnum::Tunnel::Type t) - : src_addr(s), dst_addr(d), src_port(0), dst_port(0), type(t) + /** + * Construct an IP tunnel "connection" with its own UID. + * The assignment of "source" and "destination" addresses here can be + * arbitrary, comparison between EncapsulatingConn objects will treat IP + * tunnels as equivalent as long as the same two endpoints are involved. + * + * @param s The tunnel source address, likely taken from an IP header. + * @param d The tunnel destination address, likely taken from an IP header. + */ + EncapsulatingConn(const IPAddr& s, const IPAddr& d) + : src_addr(s), dst_addr(d), src_port(0), dst_port(0), + proto(TRANSPORT_UNKNOWN), type(BifEnum::Tunnel::IP) { uid = calculate_unique_id(); } + /** + * Construct a tunnel connection using information from an already existing + * transport-layer-aware connection object. + * + * @param c The connection from which endpoint information can be extracted. + * If it already has a UID associated with it, that gets inherited, + * otherwise a new UID is created for this tunnel and \a c. + * @param t The type of tunneling that is occurring over the connection. + */ EncapsulatingConn(Connection* c, BifEnum::Tunnel::Type t); + /** + * Copy constructor. + */ EncapsulatingConn(const EncapsulatingConn& other) : src_addr(other.src_addr), dst_addr(other.dst_addr), src_port(other.src_port), dst_port(other.dst_port), - type(other.type), uid(other.uid) + proto(other.proto), type(other.type), uid(other.uid) {} + /** + * Destructor. + */ ~EncapsulatingConn() {} + BifEnum::Tunnel::Type Type() const + { return type; } + + /** + * Returns record value of type "EncapsulatingConn" representing the tunnel. + */ RecordVal* GetRecordVal() const; friend bool operator==(const EncapsulatingConn& ec1, @@ -43,12 +84,13 @@ public: if ( ec1.type != ec2.type ) return false; if ( ec1.type == BifEnum::Tunnel::IP ) - return ec1.uid == ec2.uid && + // Reversing endpoints is still same tunnel. + return ec1.uid == ec2.uid && ec1.proto == ec2.proto && ((ec1.src_addr == ec2.src_addr && ec1.dst_addr == ec2.dst_addr) || (ec1.src_addr == ec2.dst_addr && ec1.dst_addr == ec2.src_addr)); return ec1.src_addr == ec2.src_addr && ec1.dst_addr == ec2.dst_addr && ec1.src_port == ec2.src_port && ec1.dst_port == ec2.dst_port && - ec1.uid == ec2.uid; + ec1.uid == ec2.uid && ec1.proto == ec2.proto; } friend bool operator!=(const EncapsulatingConn& ec1, @@ -57,14 +99,19 @@ public: return ! ( ec1 == ec2 ); } +protected: IPAddr src_addr; IPAddr dst_addr; uint16 src_port; uint16 dst_port; + TransportProto proto; BifEnum::Tunnel::Type type; uint64 uid; }; +/** + * Abstracts an arbitrary amount of nested tunneling. + */ class Encapsulation { public: Encapsulation() : conns(0) @@ -99,6 +146,11 @@ public: ~Encapsulation() { delete conns; } + /** + * Add a new inner-most tunnel to the Encapsulation. + * + * @param c The new inner-most tunnel to append to the tunnel chain. + */ void Add(const EncapsulatingConn& c) { if ( ! conns ) @@ -106,16 +158,27 @@ public: conns->push_back(c); } + /** + * Return how many nested tunnels are involved in a encapsulation, zero + * meaning no tunnels are present. + */ size_t Depth() const { return conns ? conns->size() : 0; } + /** + * Return the tunnel type of the inner-most tunnel. + */ BifEnum::Tunnel::Type LastType() const { - return conns ? (*conns)[conns->size()-1].type : BifEnum::Tunnel::NONE; + return conns ? (*conns)[conns->size()-1].Type() : BifEnum::Tunnel::NONE; } + /** + * Get the value of type "EncapsulatingConnVector" represented by the + * entire encapsulation chain. + */ VectorVal* GetVectorVal() const { VectorVal* vv = new VectorVal( @@ -133,6 +196,7 @@ public: return ! ( e1 == e2 ); } +protected: vector* conns; }; diff --git a/testing/btest/Baseline/core.leaks.ayiya/tunnel.log b/testing/btest/Baseline/core.leaks.ayiya/tunnel.log index 69e331b477..512f49b6ee 100644 --- a/testing/btest/Baseline/core.leaks.ayiya/tunnel.log +++ b/testing/btest/Baseline/core.leaks.ayiya/tunnel.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user -#types time string addr port addr port enum enum string -1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - -1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - -1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - -1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#types time string addr port addr port enum enum +1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA +1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA +1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA +1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA diff --git a/testing/btest/Baseline/core.leaks.teredo/tunnel.log b/testing/btest/Baseline/core.leaks.teredo/tunnel.log index 5549d66a29..5a2114dd1c 100644 --- a/testing/btest/Baseline/core.leaks.teredo/tunnel.log +++ b/testing/btest/Baseline/core.leaks.teredo/tunnel.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user -#types time string addr port addr port enum enum string -1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO - -1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO - -1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO - -1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO - -1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO - -1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO - +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#types time string addr port addr port enum enum +1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO +1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO +1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO +1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO +1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO +1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO diff --git a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log index 69e331b477..512f49b6ee 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user -#types time string addr port addr port enum enum string -1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - -1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA - -1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - -1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA - +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#types time string addr port addr port enum enum +1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA +1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA +1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA +1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA diff --git a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log index 5549d66a29..5a2114dd1c 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type user -#types time string addr port addr port enum enum string -1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO - -1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO - -1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO - -1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO - -1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO - -1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO - +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#types time string addr port addr port enum enum +1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO +1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO +1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO +1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO +1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO +1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO From d1e4e6e812a99274aa85a29c16093b87bebaa499 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 6 Jun 2012 16:11:23 -0500 Subject: [PATCH 375/651] Include header for usleep(), caused compile failure on Archlinux. --- src/threading/MsgThread.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index c3f694cdc1..6a3d496325 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -4,6 +4,8 @@ #include "MsgThread.h" #include "Manager.h" +#include + using namespace threading; namespace threading { From 6f346c84060b9db6d06e931a67f47628ab72ebdd Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 7 Jun 2012 12:12:57 -0500 Subject: [PATCH 376/651] Add Teredo analysis option to reduce false positive decapsulation. The Tunnel::yielding_teredo_decapsulation (on by default) makes it so the Teredo analyzer doesn't attempt to decapsulate payloads when there's already a sibling analyzer that thinks it's parsing the right protocol. Sometimes, UDP payloads just happen to look like they are validly Teredo-encapsulated and doing further analysis on the decapsulated packet can quickly turn into a weird; this change helps reduce such weirds. --- scripts/base/init-bare.bro | 8 +++++ src/Analyzer.h | 4 +++ src/Teredo.cc | 28 +++++++++++++-- src/const.bif | 1 + .../core.tunnels.false-teredo/weird.log | 19 ++++++++++ .../btest/Traces/tunnels/false-teredo.pcap | Bin 0 -> 3098 bytes testing/btest/core/tunnels/false-teredo.bro | 34 ++++++++++++++++++ 7 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 testing/btest/Baseline/core.tunnels.false-teredo/weird.log create mode 100644 testing/btest/Traces/tunnels/false-teredo.pcap create mode 100644 testing/btest/core/tunnels/false-teredo.bro diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index a356167cd7..879a4f5995 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2705,6 +2705,14 @@ export { ## Toggle whether to do IPv6-in-Teredo decapsulation. const enable_teredo = T &redef; + + ## With this option set, the Teredo analysis will first check to see if + ## other protocol analyzers have confirmed that they think they're + ## parsing the right protocol and only continue with Teredo tunnel + ## decapsulation if nothing else has yet confirmed. This can help + ## reduce false positives of UDP traffic (e.g. DNS) that also happens + ## to have a valid Teredo encapsulation. + const yielding_teredo_decapsulation = T &redef; } # end export module GLOBAL; diff --git a/src/Analyzer.h b/src/Analyzer.h index ef596ac696..21dcba9fa9 100644 --- a/src/Analyzer.h +++ b/src/Analyzer.h @@ -343,6 +343,10 @@ private: for ( analyzer_list::iterator var = the_kids.begin(); \ var != the_kids.end(); var++ ) +#define LOOP_OVER_GIVEN_CONST_CHILDREN(var, the_kids) \ + for ( analyzer_list::const_iterator var = the_kids.begin(); \ + var != the_kids.end(); var++ ) + class SupportAnalyzer : public Analyzer { public: SupportAnalyzer(AnalyzerTag::Tag tag, Connection* conn, bool arg_orig) diff --git a/src/Teredo.cc b/src/Teredo.cc index 945e54ee18..21d8f90ee7 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -158,13 +158,37 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, int rslt = sessions->ParseIPPacket(len, te.InnerIP(), IPPROTO_IPV6, inner); if ( rslt == 0 ) - ProtocolConfirmation(); + { + if ( BifConst::Tunnel::yielding_teredo_decapsulation && + ! ProtocolConfirmed() ) + { + // Only confirm the Teredo tunnel and start decapsulating packets + // when no other sibling analyzer thinks it's already parsing the + // right protocol. + bool sibling_has_confirmed = false; + if ( Parent() ) + { + LOOP_OVER_GIVEN_CONST_CHILDREN(i, Parent()->GetChildren()) + { + if ( (*i)->ProtocolConfirmed() ) + sibling_has_confirmed = true; + } + } + if ( ! sibling_has_confirmed ) + ProtocolConfirmation(); + } + else + { + // Aggressively decapsulate anything with valid Teredo encapsulation + ProtocolConfirmation(); + } + } else if ( rslt < 0 ) ProtocolViolation("Truncated Teredo", (const char*) data, len); else ProtocolViolation("Teredo payload length", (const char*) data, len); - if ( rslt != 0 ) return; + if ( rslt != 0 || ! ProtocolConfirmed() ) return; Val* teredo_hdr = 0; diff --git a/src/const.bif b/src/const.bif index 3e8fe4b53b..368ee34396 100644 --- a/src/const.bif +++ b/src/const.bif @@ -15,5 +15,6 @@ const Tunnel::max_depth: count; const Tunnel::enable_ip: bool; const Tunnel::enable_ayiya: bool; const Tunnel::enable_teredo: bool; +const Tunnel::yielding_teredo_decapsulation: bool; const Threading::heartbeat_interval: interval; diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log new file mode 100644 index 0000000000..d2bafa0384 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log @@ -0,0 +1,19 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1258567191.405770 - - - - - truncated_header - F bro +1258567191.486869 UWkUyAuUGXf 192.168.1.105 57696 192.168.1.1 53 Teredo_payload_len_mismatch - F bro +1258578181.260420 - - - - - truncated_header - F bro +1258578181.516140 nQcgTWjvg4c 192.168.1.104 64838 192.168.1.1 53 Teredo_payload_len_mismatch - F bro +1258579063.557927 - - - - - truncated_header - F bro +1258579063.784919 j4u32Pc5bif 192.168.1.104 55778 192.168.1.1 53 Teredo_payload_len_mismatch - F bro +1258581768.568451 - - - - - truncated_header - F bro +1258581768.898165 TEfuqmmG4bh 192.168.1.104 50798 192.168.1.1 53 Teredo_payload_len_mismatch - F bro +1258584478.859853 - - - - - truncated_header - F bro +1258584478.989528 FrJExwHcSal 192.168.1.104 64963 192.168.1.1 53 Teredo_payload_len_mismatch - F bro +1258600683.934458 - - - - - truncated_header - F bro +1258600683.934672 5OKnoww6xl4 192.168.1.103 59838 192.168.1.1 53 Teredo_payload_len_mismatch - F bro diff --git a/testing/btest/Traces/tunnels/false-teredo.pcap b/testing/btest/Traces/tunnels/false-teredo.pcap new file mode 100644 index 0000000000000000000000000000000000000000..e82a6f41693b06517d460a0f976cbed141f9e8fb GIT binary patch literal 3098 zcmd6pe`r%z6vyv-xlLkiV`IB%WtO28hO}auCKj!;^oNtCLtFF&=(wfB$;N*C;fihybY)04n2zCG9nqchUXqvgvHUrf za-iqj`|kIi&pG$LcWvVA37$}r|2=v{2nWt0KMj<7>|`tak49>zZojykIFl{hIVb5N zq|0`QkYV>ny=;{02jgz-AU@)`oenL9=Wz-6Q6V0ZTtg{QOZF>rTu=waq%EYxaRRqz z;8F%%O6qpCT^BBVT@S+I=G&a}19huZ& z=z0E|LLu)uLOY-N_86-+^0vK792C;_uxbf5(6Nt@ZqP=vq}Wpa08>0g@+oFFLW=bV z>b+xTikv_rT39ZmcB}4aAFdpL)@p@mXmeBJRy9Aw)W+>*YT1q4l0!$lV|r>fG7p+& z&dQKd8J5mfMHUy0ImB>O927G)Sxgf4lriV|9i)beEo+*%Xt^*b#D#vG#n!Q&Tzk|I zy2Czoifu8lIGn_8lUt&!cXnj8CE~M0nkmWUT3WdaR!&e-N2&G|oU z9;`Bs=D-E4QjZ^4O2|{MlIMx@QWjt#eJnskQIbI+7UNTc4pmGgMO7=5?BtQguTiGq zri1(rlhKGSP;@k>_E%uZBAvOCNyG<2-(*in+1$~z5x%5MzJ8;O)XfXUQW|W+o|KBT|~3G*3Mf~wGo-|kkij^Zk59@bG5sjB{HG%*&_BtcDx?8%~W zs4G0No1HM$tdR-5@t*J&8!`Rfdn@^LmM}HOqntP+S2Lt0$I?cl#mo-VpJ6pQlU}(+{LF+CUvfo zl)*D-7J&CZ`56JWZZ!dd&{Q8)rV0RQNtt}3@_mG7RjkTr6!2)`(DgEp5vcQUaA+3!|O_Sp&U-QB;4mv#1V%MyD@nLK;ABVAG|jIq9tVFNYd31{@#|;A z3yQNN&6bGI67g6fO_qqCwU6U_9X@s5A0l+qbeC(q+Vp$6kH5*jH2t<^XO6QUb)UfA zV7?S&cTB(Q^k46cuQ2_#WvBQ2dVvPkw!5so5_;j9@j~H!*emDoyCaH)oc)*JgIY)NTj zt|g6M_><7<-`||heM)i5A15wHG#PJ{Ga;MsauoW~ X_{hSM`A!^xS5kyMizwXx^F96p8SMi; literal 0 HcmV?d00001 diff --git a/testing/btest/core/tunnels/false-teredo.bro b/testing/btest/core/tunnels/false-teredo.bro new file mode 100644 index 0000000000..ebb428f65a --- /dev/null +++ b/testing/btest/core/tunnels/false-teredo.bro @@ -0,0 +1,34 @@ +# @TEST-EXEC: bro -r $TRACES/tunnels/false-teredo.pcap %INPUT >output +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/tunnels/false-teredo.pcap %INPUT Tunnel::yielding_teredo_decapsulation=F >output +# @TEST-EXEC: btest-diff weird.log + +function print_teredo(name: string, outer: connection, inner: teredo_hdr) + { + print fmt("%s: %s", name, outer$id); + print fmt(" ip6: %s", inner$hdr$ip6); + if ( inner?$auth ) + print fmt(" auth: %s", inner$auth); + if ( inner?$origin ) + print fmt(" origin: %s", inner$origin); + } + +event teredo_packet(outer: connection, inner: teredo_hdr) + { + print_teredo("packet", outer, inner); + } + +event teredo_authentication(outer: connection, inner: teredo_hdr) + { + print_teredo("auth", outer, inner); + } + +event teredo_origin_indication(outer: connection, inner: teredo_hdr) + { + print_teredo("origin", outer, inner); + } + +event teredo_bubble(outer: connection, inner: teredo_hdr) + { + print_teredo("bubble", outer, inner); + } From 9d2a21c49044cea53a8d5b97ba9b2025081a3fac Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 7 Jun 2012 13:03:13 -0500 Subject: [PATCH 377/651] Extend weird names that occur in core packet processing during decapsulation. Appending a "_in_tunnel" to the weird name might help clarify that the weird is happening with a packet that is attempting to be processed as a result of decapsulation. --- src/Sessions.cc | 49 +++++++++++-------- src/Sessions.h | 10 ++-- .../core.tunnels.false-teredo/weird.log | 12 ++--- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/src/Sessions.cc b/src/Sessions.cc index 77ccd7aeb6..1e0068acec 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -353,7 +353,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, uint32 len = ip_hdr->TotalLen(); if ( hdr->len < len + hdr_size ) { - Weird("truncated_IP", hdr, pkt); + Weird("truncated_IP", hdr, pkt, encapsulation); return; } @@ -365,7 +365,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( ! ignore_checksums && ip4 && ones_complement_checksum((void*) ip4, ip_hdr_len, 0) != 0xffff ) { - Weird("bad_IP_checksum", hdr, pkt); + Weird("bad_IP_checksum", hdr, pkt, encapsulation); return; } @@ -380,7 +380,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( caplen < len ) { - Weird("incompletely_captured_fragment", ip_hdr); + Weird("incompletely_captured_fragment", ip_hdr, encapsulation); // Don't try to reassemble, that's doomed. // Discard all except the first fragment (which @@ -432,7 +432,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( ! ignore_checksums && mobility_header_checksum(ip_hdr) != 0xffff ) { - Weird("bad_MH_checksum", hdr, pkt); + Weird("bad_MH_checksum", hdr, pkt, encapsulation); Remove(f); return; } @@ -445,7 +445,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } if ( ip_hdr->NextProto() != IPPROTO_NONE ) - Weird("mobility_piggyback", hdr, pkt); + Weird("mobility_piggyback", hdr, pkt, encapsulation); Remove(f); return; @@ -454,7 +454,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, int proto = ip_hdr->NextProto(); - if ( CheckHeaderTrunc(proto, len, caplen, hdr, pkt) ) + if ( CheckHeaderTrunc(proto, len, caplen, hdr, pkt, encapsulation) ) { Remove(f); return; @@ -525,7 +525,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, { if ( ! BifConst::Tunnel::enable_ip ) { - reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "IP_tunnel"); + Weird("IP_tunnel", ip_hdr, encapsulation); Remove(f); return; } @@ -533,7 +533,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( encapsulation && encapsulation->Depth() >= BifConst::Tunnel::max_depth ) { - reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), "tunnel_depth"); + Weird("tunnel_depth", ip_hdr, encapsulation); Remove(f); return; } @@ -543,11 +543,9 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, int result = ParseIPPacket(caplen, data, proto, inner); if ( result < 0 ) - reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), - "truncated_inner_IP"); + Weird("truncated_inner_IP", ip_hdr, encapsulation); else if ( result > 0 ) - reporter->Weird(ip_hdr->SrcAddr(), ip_hdr->DstAddr(), - "inner_IP_payload_mismatch"); + Weird("inner_IP_payload_mismatch", ip_hdr, encapsulation); if ( result != 0 ) { @@ -599,7 +597,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } default: - Weird(fmt("unknown_protocol_%d", proto), hdr, pkt); + Weird(fmt("unknown_protocol_%d", proto), hdr, pkt, encapsulation); Remove(f); return; } @@ -746,7 +744,8 @@ int NetSessions::ParseIPPacket(int caplen, const u_char* const pkt, int proto, } bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, - const struct pcap_pkthdr* h, const u_char* p) + const struct pcap_pkthdr* h, + const u_char* p, const Encapsulation* encap) { uint32 min_hdr_len = 0; switch ( proto ) { @@ -775,13 +774,13 @@ bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, if ( len < min_hdr_len ) { - Weird("truncated_header", h, p); + Weird("truncated_header", h, p, encap); return true; } if ( caplen < min_hdr_len ) { - Weird("internally_truncated_header", h, p); + Weird("internally_truncated_header", h, p, encap); return true; } @@ -1298,18 +1297,26 @@ void NetSessions::Internal(const char* msg, const struct pcap_pkthdr* hdr, reporter->InternalError("%s", msg); } -void NetSessions::Weird(const char* name, - const struct pcap_pkthdr* hdr, const u_char* pkt) +void NetSessions::Weird(const char* name, const struct pcap_pkthdr* hdr, + const u_char* pkt, const Encapsulation* encap) { if ( hdr ) dump_this_packet = 1; - reporter->Weird(name); + if ( encap && encap->LastType() != BifEnum::Tunnel::NONE ) + reporter->Weird(fmt("%s_in_tunnel", name)); + else + reporter->Weird(name); } -void NetSessions::Weird(const char* name, const IP_Hdr* ip) +void NetSessions::Weird(const char* name, const IP_Hdr* ip, + const Encapsulation* encap) { - reporter->Weird(ip->SrcAddr(), ip->DstAddr(), name); + if ( encap && encap->LastType() != BifEnum::Tunnel::NONE ) + reporter->Weird(ip->SrcAddr(), ip->DstAddr(), + fmt("%s_in_tunnel", name)); + else + reporter->Weird(ip->SrcAddr(), ip->DstAddr(), name); } unsigned int NetSessions::ConnectionMemoryUsage() diff --git a/src/Sessions.h b/src/Sessions.h index b98fc7e432..28f331212b 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -108,9 +108,10 @@ public: void GetStats(SessionStats& s) const; - void Weird(const char* name, - const struct pcap_pkthdr* hdr, const u_char* pkt); - void Weird(const char* name, const IP_Hdr* ip); + void Weird(const char* name, const struct pcap_pkthdr* hdr, + const u_char* pkt, const Encapsulation* encap = 0); + void Weird(const char* name, const IP_Hdr* ip, + const Encapsulation* encap = 0); PacketFilter* GetPacketFilter() { @@ -231,7 +232,8 @@ protected: // from lower-level headers or the length actually captured is less // than that protocol's minimum header size. bool CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, - const struct pcap_pkthdr* hdr, const u_char* pkt); + const struct pcap_pkthdr* hdr, const u_char* pkt, + const Encapsulation* encap); CompositeHash* ch; PDict(Connection) tcp_conns; diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log index d2bafa0384..989b7beede 100644 --- a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log +++ b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log @@ -5,15 +5,15 @@ #path weird #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1258567191.405770 - - - - - truncated_header - F bro +1258567191.405770 - - - - - truncated_header_in_tunnel - F bro 1258567191.486869 UWkUyAuUGXf 192.168.1.105 57696 192.168.1.1 53 Teredo_payload_len_mismatch - F bro -1258578181.260420 - - - - - truncated_header - F bro +1258578181.260420 - - - - - truncated_header_in_tunnel - F bro 1258578181.516140 nQcgTWjvg4c 192.168.1.104 64838 192.168.1.1 53 Teredo_payload_len_mismatch - F bro -1258579063.557927 - - - - - truncated_header - F bro +1258579063.557927 - - - - - truncated_header_in_tunnel - F bro 1258579063.784919 j4u32Pc5bif 192.168.1.104 55778 192.168.1.1 53 Teredo_payload_len_mismatch - F bro -1258581768.568451 - - - - - truncated_header - F bro +1258581768.568451 - - - - - truncated_header_in_tunnel - F bro 1258581768.898165 TEfuqmmG4bh 192.168.1.104 50798 192.168.1.1 53 Teredo_payload_len_mismatch - F bro -1258584478.859853 - - - - - truncated_header - F bro +1258584478.859853 - - - - - truncated_header_in_tunnel - F bro 1258584478.989528 FrJExwHcSal 192.168.1.104 64963 192.168.1.1 53 Teredo_payload_len_mismatch - F bro -1258600683.934458 - - - - - truncated_header - F bro +1258600683.934458 - - - - - truncated_header_in_tunnel - F bro 1258600683.934672 5OKnoww6xl4 192.168.1.103 59838 192.168.1.1 53 Teredo_payload_len_mismatch - F bro From 4223b5261b3a735a41e79e7e53807569619fb635 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 7 Jun 2012 13:51:23 -0700 Subject: [PATCH 378/651] small documentation fixes --- doc/input.rst | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/doc/input.rst b/doc/input.rst index 2038ccb22f..2e801a8eb2 100644 --- a/doc/input.rst +++ b/doc/input.rst @@ -2,9 +2,11 @@ Loading Data into Bro with the Input Framework ============================================== -Bro now features a flexible input frameworks that allows users -to import data into Bro. Data is either read into Bro tables or -converted to events which can then be handled by scripts. +.. rst-class:: opening + + Bro now features a flexible input frameworks that allows users + to import data into Bro. Data is either read into Bro tables or + converted to events which can then be handled by scripts. The input framework is merged into the git master and we will give a short summary on how to use it. @@ -64,8 +66,9 @@ The two records are defined as: reason: string; }; -Not ethat the record definition has to contain the same names as the fields -line in the log file. +ote that the names of the fields in the record definitions have to correspond to +the column names listed in the '#fields' line of the log file, in this case 'ip', +'timestamp', and 'reason'. The log file is read into the table with a simple call of the add_table function: @@ -89,7 +92,10 @@ sends it back to the main Bro thread. Because of this, the data is not immediately accessible. Depending on the size of the data source it might take from a few milliseconds up to a few seconds -until all data is present in the table. +until all data is present in the table. Please note that this means that when Bro +is running without an input source or on very short captured files, it might terminate +before the data is present in the system (because Bro already handled all packets +before the import thread finished). Subsequent calls to an input source are queued until the previous action has been completed. Because of this, it is, for example, possible to call ``add_table`` and @@ -306,11 +312,6 @@ Event streams work very similarly to table streams that were already discussed i detail. To read the blacklist of the previous example into an event stream, the following Bro code could be used: -Event Streams are streams that generate an event for each line in of the input source. - -For example, a simple stream retrieving the fields ``i`` and ``b`` from an input Source -could be defined as follows: - .. code:: bro type Val: record { From 852de4700c763c0960a6466052f330de2255e7a1 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 7 Jun 2012 14:12:42 -0700 Subject: [PATCH 379/651] automatically delete disabled input streams --- src/input/Manager.cc | 20 +++++++++++++++----- src/input/Manager.h | 17 ++++++++++++++++- src/input/ReaderBackend.cc | 9 +++++++-- src/input/ReaderFrontend.cc | 4 +--- 4 files changed, 39 insertions(+), 11 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index f35071081b..a39f911cd6 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -689,16 +689,14 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) } -bool Manager::RemoveStream(const string &name) +bool Manager::RemoveStream(Stream *i) { - Stream *i = FindStream(name); - if ( i == 0 ) return false; // not found if ( i->removed ) { - reporter->Error("Stream %s is already queued for removal. Ignoring remove.", name.c_str()); + reporter->Error("Stream %s is already queued for removal. Ignoring remove.", i->name.c_str()); return false; } @@ -708,12 +706,24 @@ bool Manager::RemoveStream(const string &name) #ifdef DEBUG DBG_LOG(DBG_INPUT, "Successfully queued removal of stream %s", - name.c_str()); + i->name.c_str()); #endif return true; } +bool Manager::RemoveStream(ReaderFrontend* frontend) + { + return RemoveStream(FindStream(frontend)); + } + + +bool Manager::RemoveStream(const string &name) + { + return RemoveStream(FindStream(name)); + } + + bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) { Stream *i = FindStream(reader); diff --git a/src/input/Manager.h b/src/input/Manager.h index 400918366e..f33e54583d 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -72,7 +72,7 @@ public: /** * Deletes an existing input stream. * - * @param id The enum value corresponding the input stream. + * @param id The name of the input stream to be removed. * * This method corresponds directly to the internal BiF defined in * input.bif, which just forwards here. @@ -88,6 +88,7 @@ protected: friend class SendEntryMessage; friend class EndCurrentSendMessage; friend class ReaderClosedMessage; + friend class DisableMessage; // For readers to write to input stream in direct mode (reporting // new/deleted values directly). Functions take ownership of @@ -118,12 +119,26 @@ protected: // main thread. This makes sure all data that has ben queued for a // stream is still received. bool RemoveStreamContinuation(ReaderFrontend* reader); + + /** + * Deletes an existing input stream. + * + * @param frontend pointer to the frontend of the input stream to be removed. + * + * This method is used by the reader backends to remove a reader when it fails + * for some reason. + */ + bool RemoveStream(ReaderFrontend* frontend); private: class Stream; class TableStream; class EventStream; + // actual RemoveStream implementation -- the function public + // and protected function definitions are wrappers around this function. + bool RemoveStream(Stream* i); + bool CreateStream(Stream*, RecordVal* description); // SendEntry implementation for Table stream. diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index b5d898fedd..2c6fff7236 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -113,6 +113,7 @@ public: virtual bool Process() { + Object()->SetDisable(); return input_mgr->RemoveStreamContinuation(Object()); } @@ -129,6 +130,11 @@ public: virtual bool Process() { Object()->SetDisable(); + // and - because we do not need disabled objects any more - there is no way to re-enable them + // simply delete them. + // This avoids the problem of having to periodically check if there are any disabled readers + // out there. As soon as a reader disables itself, it deletes itself. + input_mgr->RemoveStream(Object()); return true; } }; @@ -203,8 +209,7 @@ bool ReaderBackend::Init(string arg_source, ReaderMode arg_mode, const int arg_n void ReaderBackend::Close() { DoClose(); - disabled = true; - DisableFrontend(); + disabled = true; // frontend disables itself when it gets the Close-message. SendOut(new ReaderClosedMessage(frontend)); if ( fields != 0 ) diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index 0236ac83be..a9a4c778dd 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -6,9 +6,6 @@ #include "threading/MsgThread.h" -// FIXME: cleanup of disabled inputreaders is missing. we need this, because -// stuff can e.g. fail in init and might never be removed afterwards. - namespace input { class InitMessage : public threading::InputMessage @@ -106,6 +103,7 @@ void ReaderFrontend::Close() return; } + disabled = true; backend->SendIn(new CloseMessage(backend)); } From 3e3ceda1a7d38dcecbaf075ba9e19f537a20801d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 7 Jun 2012 14:36:03 -0700 Subject: [PATCH 380/651] disable streaming reads from executed commands. This lead to hanging bro's because pclose apparently can wait for eternity if things go wrong. And there probably are a couple of other problems with this approach. --- src/input/readers/Raw.cc | 15 +- .../out | 145 ------------------ .../frameworks/input/executestreamraw.bro | 58 ------- 3 files changed, 14 insertions(+), 204 deletions(-) delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out delete mode 100644 testing/btest/scripts/base/frameworks/input/executestreamraw.bro diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index fa1b09da7c..59899f32fc 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -79,6 +79,9 @@ bool Raw::CloseInput() InternalError(Fmt("Trying to close closed file for stream %s", fname.c_str())); return false; } +#ifdef DEBUG + Debug(DBG_INPUT, "Raw reader starting close"); +#endif delete in; @@ -90,6 +93,10 @@ bool Raw::CloseInput() in = NULL; file = NULL; +#ifdef DEBUG + Debug(DBG_INPUT, "Raw reader finished close"); +#endif + return true; } @@ -128,7 +135,7 @@ bool Raw::DoInit(string path, ReaderMode mode, int num_fields, const Field* cons execute = true; fname = path.substr(0, fname.length() - 1); - if ( (mode != MODE_MANUAL) && (mode != MODE_STREAM) ) + if ( (mode != MODE_MANUAL) ) { Error(Fmt("Unsupported read mode %d for source %s in execution mode", mode, fname.c_str())); @@ -254,8 +261,14 @@ bool Raw::DoHeartbeat(double network_time, double current_time) case MODE_REREAD: case MODE_STREAM: +#ifdef DEBUG + Debug(DBG_INPUT, "Starting Heartbeat update"); +#endif Update(); // call update and not DoUpdate, because update // checks disabled. +#ifdef DEBUG + Debug(DBG_INPUT, "Finished with heartbeat update"); +#endif break; default: assert(false); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out deleted file mode 100644 index bb69da3267..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.input.executestreamraw/out +++ /dev/null @@ -1,145 +0,0 @@ -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW -q3r3057fdf -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW -sdfs\d -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW - -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW -dfsdf -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW -sdf -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW -3rw43wRRERLlL#RWERERERE. -[source=tail -f ../input.log |, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line -{ -print A::outfile, A::description; -print A::outfile, A::tpe; -print A::outfile, A::s; -A::try = A::try + 1; -if (9 == A::try) -{ -print A::outfile, done; -close(A::outfile); -Input::remove(input); -} - -}] -Input::EVENT_NEW - -done diff --git a/testing/btest/scripts/base/frameworks/input/executestreamraw.bro b/testing/btest/scripts/base/frameworks/input/executestreamraw.bro deleted file mode 100644 index d97a7b26a0..0000000000 --- a/testing/btest/scripts/base/frameworks/input/executestreamraw.bro +++ /dev/null @@ -1,58 +0,0 @@ -# -# @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: sleep 3 -# @TEST-EXEC: cat input2.log >> input.log -# @TEST-EXEC: sleep 3 -# @TEST-EXEC: cat input3.log >> input.log -# @TEST-EXEC: btest-bg-wait -k 3 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input1.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -@TEST-END-FILE - -@TEST-START-FILE input2.log -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -q3r3057fdf -@TEST-END-FILE - -@TEST-START-FILE input3.log -sdfs\d - -dfsdf -sdf -3rw43wRRERLlL#RWERERERE. - -@TEST-END-FILE - -@load frameworks/communication/listen - -module A; - -type Val: record { - s: string; -}; - -global try: count; -global outfile: file; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) { - print outfile, description; - print outfile, tpe; - print outfile, s; - try = try + 1; - - if ( try == 9 ) { - print outfile, "done"; - close(outfile); - Input::remove("input"); - } -} - -event bro_init() -{ - outfile = open ("../out"); - try = 0; - Input::add_event([$source="tail -f ../input.log |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); -} From 83dcbd4aa78f5c429e928eb191bf7cd8999886ac Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 7 Jun 2012 15:13:13 -0700 Subject: [PATCH 381/651] Updating submodule(s). [nomail] --- aux/bro-aux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/bro-aux b/aux/bro-aux index 2038e3de04..f938c81ada 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 2038e3de042115c3caa706426e16c830c1fd1e9e +Subproject commit f938c81ada94641ab5f0231983edc2ba866b9a1f From e9c18b51a31fb1a7e6aba802d8fd1cc16f4927f7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 8 Jun 2012 10:11:54 -0500 Subject: [PATCH 382/651] Add more error handling code to logging of enum vals. (addresses #829) If lookup of enum name by value fails, an error is now sent through the reporter framework and the value logged will be an empty string (as opposed to trying to construct a string with null pointer which throws a logic_error and aborts Bro). --- src/logging/Manager.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index baf832e6a9..f0b5cc1748 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -819,7 +819,13 @@ threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) const char* s = val->Type()->AsEnumType()->Lookup(val->InternalInt()); - lval->val.string_val = new string(s); + if ( s ) + lval->val.string_val = new string(s); + else + { + val->Type()->Error("enum type does not contain value", val); + lval->val.string_val = new string(); + } break; } From 18e61fcdfcb7dca87fba5e07232bf52f21eb7814 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 8 Jun 2012 15:25:49 -0500 Subject: [PATCH 383/651] Fix val_size BIF tests and improve docs Improve documentation of "order" BIF, and made the "val_size" tests more portable. --- src/bro.bif | 2 ++ testing/btest/Baseline/bifs.val_size/out | 2 -- testing/btest/bifs/order.bro | 3 +-- testing/btest/bifs/val_size.bro | 16 ++++++++++------ 4 files changed, 13 insertions(+), 10 deletions(-) delete mode 100644 testing/btest/Baseline/bifs.val_size/out diff --git a/src/bro.bif b/src/bro.bif index b49c88058a..721d0704f1 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1494,6 +1494,8 @@ function sort%(v: any, ...%) : any ## v: The vector whose order to compute. ## ## Returns: A ``vector of count`` with the indices of the ordered elements. +## For example, the elements of *v* in order are (assuming ``o`` +## is the vector returned by ``order``): v[o[0]], v[o[1]], etc. ## ## .. bro:see:: sort function order%(v: any, ...%) : index_vec diff --git a/testing/btest/Baseline/bifs.val_size/out b/testing/btest/Baseline/bifs.val_size/out deleted file mode 100644 index 16b548f269..0000000000 --- a/testing/btest/Baseline/bifs.val_size/out +++ /dev/null @@ -1,2 +0,0 @@ -72 -72 diff --git a/testing/btest/bifs/order.bro b/testing/btest/bifs/order.bro index 5f3260ee3f..333a8acac1 100644 --- a/testing/btest/bifs/order.bro +++ b/testing/btest/bifs/order.bro @@ -22,8 +22,7 @@ function myfunc2(a: double, b: double): int event bro_init() { - # TODO: these results don't make sense - + # Tests without supplying a comparison function local a1 = vector( 5, 2, 8, 3 ); diff --git a/testing/btest/bifs/val_size.bro b/testing/btest/bifs/val_size.bro index b779460b9b..5b2e535c5c 100644 --- a/testing/btest/bifs/val_size.bro +++ b/testing/btest/bifs/val_size.bro @@ -1,12 +1,16 @@ # -# @TEST-EXEC: bro %INPUT > out -# @TEST-EXEC: btest-diff out +# @TEST-EXEC: bro %INPUT event bro_init() { - local a = 1; - local b = T; + local a = T; + local b = 12; + local c: table[string] of addr = { ["a"] = 192.168.0.2, ["b"] = 10.0.0.2 }; + + if ( val_size(a) > val_size(b) ) + exit(1); + + if ( val_size(b) > val_size(c) ) + exit(1); - print val_size(a); - print val_size(b); } From 191994a60a8050bc04a900098da5bf2c66821a54 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 8 Jun 2012 16:51:45 -0500 Subject: [PATCH 384/651] Fix summary lines for BIF documentation The summary lines (the first sentence in the description) for some BIFs contained a period before the end of the sentence, so only part of the sentence would appear in the "summary" section of the HTML document (fixed by rewording the sentence). Some summary lines were too long (fixed by splitting the sentence with first sentence being more concise). Also corrected the description of "fmt" and "floor" BIFs. --- src/bro.bif | 49 ++++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index 721d0704f1..1feccb8639 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1671,9 +1671,10 @@ function cat_sep%(sep: string, def: string, ...%): string ## ## - ``[efg]``: Double ## -## Returns: Returns the formatted string. Given no arguments, :bro:id:`fmt`. Given -## no format string or the wrong number of additional arguments for the -## given format specifier, :bro:id:`fmt` generates a run-time error. +## Returns: Returns the formatted string. Given no arguments, :bro:id:`fmt` +## returns an empty string. Given no format string or the wrong +## number of additional arguments for the given format specifier, +## :bro:id:`fmt` generates a run-time error. ## ## .. bro:see:: cat cat_sep string_cat cat_string_array cat_string_array_n function fmt%(...%): string @@ -1717,8 +1718,9 @@ function fmt%(...%): string # # =========================================================================== -## Chops off any decimal digits of the given double, i.e., computes the -## "floor" of it. For example, ``floor(3.14)`` returns ``3.0``. +## Computes the greatest integer less than the given :bro:type:`double` value. +## For example, ``floor(3.14)`` returns ``3.0``, and ``floor(-3.14)`` +## returns ``-4.0``. ## ## d: The :bro:type:`double` to manipulate. ## @@ -1897,8 +1899,9 @@ function reading_traces%(%): bool return new Val(reading_traces, TYPE_BOOL); %} -## Returns statistics about the number of packets *(i)* received by Bro, -## *(ii)* dropped, and *(iii)* seen on the link (not always available). +## Returns packet capture statistics. Statistics include the number of +## packets *(i)* received by Bro, *(ii)* dropped, and *(iii)* seen on the +## link (not always available). ## ## Returns: A record of packet statistics. ## @@ -1932,9 +1935,9 @@ function net_stats%(%): NetStats return ns; %} -## Returns Bro process statistics, such as real/user/sys CPU time, memory -## usage, page faults, number of TCP/UDP/ICMP connections, timers, and events -## queued/dispatched. +## Returns Bro process statistics. Statistics include real/user/sys CPU time, +## memory usage, page faults, number of TCP/UDP/ICMP connections, timers, +## and events queued/dispatched. ## ## Returns: A record with resource usage statistics. ## @@ -2009,10 +2012,10 @@ function resource_usage%(%): bro_resources return res; %} -## Returns statistics about the regular expression engine, such as the number -## of distinct matchers, DFA states, DFA state transitions, memory usage of -## DFA states, cache hits/misses, and average number of NFA states across all -## matchers. +## Returns statistics about the regular expression engine. Statistics include +## the number of distinct matchers, DFA states, DFA state transitions, memory +## usage of DFA states, cache hits/misses, and average number of NFA states +## across all matchers. ## ## Returns: A record with matcher statistics. ## @@ -2192,10 +2195,10 @@ function record_fields%(rec: any%): record_field_table return fields; %} -## Enables detailed collections of statistics about CPU/memory usage, -## connections, TCP states/reassembler, DNS lookups, timers, and script-level -## state. The script variable :bro:id:`profiling_file` holds the name of the -## file. +## Enables detailed collection of profiling statistics. Statistics include +## CPU/memory usage, connections, TCP states/reassembler, DNS lookups, +## timers, and script-level state. The script variable :bro:id:`profiling_file` +## holds the name of the file. ## ## .. bro:see:: net_stats ## resource_usage @@ -2619,7 +2622,7 @@ function count_to_v4_addr%(ip: count%): addr return new AddrVal(htonl(uint32(ip))); %} -## Converts a :bro:type:`string` of bytes into an IP address. In particular, +## Converts a :bro:type:`string` of bytes into an IPv4 address. In particular, ## this function interprets the first 4 bytes of the string as an IPv4 address ## in network order. ## @@ -3018,8 +3021,8 @@ function decode_netbios_name_type%(name: string%): count return new Val(return_val, TYPE_COUNT); %} -## Converts a string of bytes into its hexadecimal representation, e.g., -## ``"04"`` to ``"3034"``. +## Converts a string of bytes into its hexadecimal representation. +## For example, ``"04"`` would be converted to ``"3034"``. ## ## bytestring: The string of bytes. ## @@ -3281,7 +3284,7 @@ function mask_addr%(a: addr, top_bits_to_keep: count%): subnet return new SubNetVal(a->AsAddr(), top_bits_to_keep); %} -## Takes some top bits (e.g., subnet address) from one address and the other +## Takes some top bits (such as a subnet address) from one address and the other ## bits (intra-subnet part) from a second address and merges them to get a new ## address. This is useful for anonymizing at subnet level while preserving ## serial scans. @@ -3518,7 +3521,7 @@ function skip_http_entity_data%(c: connection, is_orig: bool%): any return 0; %} -## Unescapes all characters in a URI, i.e., decodes every ``%xx`` group. +## Unescapes all characters in a URI (decode every ``%xx`` group). ## ## URI: The URI to unescape. ## From c5d3ea009d5f50148f4aee866b9b93c6c128fa1e Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 11 Jun 2012 15:35:09 -0500 Subject: [PATCH 385/651] Fix exceptions thrown in event handlers preventing others from running. If some expression in an event handler body causes an InterpreterException internally, then the rest of that body doesn't get executed, but also the bodies of any other handlers were not executed. --- src/Func.cc | 11 +++++++++- .../btest/Baseline/core.expr-exception/output | 18 +++++++++++++++++ .../Baseline/core.expr-exception/reporter.log | 18 ++++++++--------- testing/btest/core/expr-exception.bro | 20 +++++++++++++++++-- 4 files changed, 55 insertions(+), 12 deletions(-) create mode 100644 testing/btest/Baseline/core.expr-exception/output diff --git a/src/Func.cc b/src/Func.cc index b6fc7f0785..82337d5311 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -329,7 +329,16 @@ Val* BroFunc::Call(val_list* args, Frame* parent) const bodies[i].stmts->GetLocationInfo()); Unref(result); - result = bodies[i].stmts->Exec(f, flow); + + try + { + result = bodies[i].stmts->Exec(f, flow); + } + catch ( InterpreterException& e ) + { + // already reported, but should continue exec'ing remaining bodies + continue; + } if ( f->HasDelayed() ) { diff --git a/testing/btest/Baseline/core.expr-exception/output b/testing/btest/Baseline/core.expr-exception/output new file mode 100644 index 0000000000..45abb333c6 --- /dev/null +++ b/testing/btest/Baseline/core.expr-exception/output @@ -0,0 +1,18 @@ +ftp field missing +[orig_h=141.142.220.118, orig_p=48649/tcp, resp_h=208.80.152.118, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.118, orig_p=49997/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.118, orig_p=49996/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.118, orig_p=49998/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.118, orig_p=50000/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.118, orig_p=49999/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.118, orig_p=50001/tcp, resp_h=208.80.152.3, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.118, orig_p=35642/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +ftp field missing +[orig_h=141.142.220.235, orig_p=6705/tcp, resp_h=173.192.163.128, resp_p=80/tcp] diff --git a/testing/btest/Baseline/core.expr-exception/reporter.log b/testing/btest/Baseline/core.expr-exception/reporter.log index 3767de37d8..2d0441f48a 100644 --- a/testing/btest/Baseline/core.expr-exception/reporter.log +++ b/testing/btest/Baseline/core.expr-exception/reporter.log @@ -5,12 +5,12 @@ #path reporter #fields ts level message location #types time enum string string -1300475168.783842 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475168.915940 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475168.916118 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475168.918295 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475168.952193 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475168.952228 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475168.954761 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475168.962628 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 -1300475169.780331 Reporter::ERROR field value missing [c$ftp] /Users/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 8 +1300475168.783842 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.915940 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.916118 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.918295 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.952193 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.952228 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.954761 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.962628 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475169.780331 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 diff --git a/testing/btest/core/expr-exception.bro b/testing/btest/core/expr-exception.bro index 5225f092ba..66f9b78c4b 100644 --- a/testing/btest/core/expr-exception.bro +++ b/testing/btest/core/expr-exception.bro @@ -1,9 +1,25 @@ -# Bro shouldn't crash when doing nothing, nor outputting anything. +# Expressions in an event handler that raise interpreter exceptions +# shouldn't abort Bro entirely, but just return from the function body. # -# @TEST-EXEC: cat /dev/null | bro -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT >output # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff reporter.log +# @TEST-EXEC: btest-diff output event connection_established(c: connection) { print c$ftp; + print "not reached"; + } + +event connection_established(c: connection) + { + if ( c?$ftp ) + print c$ftp; + else + print "ftp field missing"; + } + +event connection_established(c: connection) + { + print c$id; } From 1c7709ed89837626cd3af39455148b81b85dcdd7 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 11 Jun 2012 17:25:06 -0700 Subject: [PATCH 386/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- cmake | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aux/binpac b/aux/binpac index b4094cb75e..6f43a8115d 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 +Subproject commit 6f43a8115d8e6483a50957c5d21c5d69270ab3aa diff --git a/aux/bro-aux b/aux/bro-aux index f938c81ada..c6391412e9 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit f938c81ada94641ab5f0231983edc2ba866b9a1f +Subproject commit c6391412e902e896836450ab98910309b2ca2d9b diff --git a/aux/broccoli b/aux/broccoli index 4e17842743..0d139c09d5 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 4e17842743fef8df6abf0588c7ca86c6937a2b6d +Subproject commit 0d139c09d5a9c8623ecc2a5f395178f0ddcd7e16 diff --git a/aux/broctl b/aux/broctl index 589cb04c3d..880f3e48d3 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 589cb04c3d7e28a81aa07454e2b9b6b092f0e1af +Subproject commit 880f3e48d33bb28d17184656f858a4a0e2e1574c diff --git a/cmake b/cmake index 96f3d92aca..2a72c5e08e 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 96f3d92acadbe1ae64f410e974c5ff503903394b +Subproject commit 2a72c5e08e018cf632033af3920432d5f684e130 From 1f60c3db074d52dd71558d5fd8378b41d2f75375 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 12 Jun 2012 17:36:11 -0500 Subject: [PATCH 387/651] Fix many errors in the event documentation Fixed broken links, broken reST formatting, added missing event parameters to the documentation, removed documentation of event parameters that don't exist, corrected spelling errors, fixed some summary lines (i.e., the first line in each event description) that were truncated in the HTML, and reformatted some lines to fit within 80 columns (much easier to read the docs when using a text editor to view the source files). --- src/event.bif | 2073 ++++++++++++++++++++++++++----------------------- 1 file changed, 1095 insertions(+), 978 deletions(-) diff --git a/src/event.bif b/src/event.bif index af2381ecf6..d0418c42c9 100644 --- a/src/event.bif +++ b/src/event.bif @@ -10,7 +10,7 @@ # # - List parameters with an empty line in between. # -# - Within the description, reference other parameters of the same events +# - Within the description, reference other parameters of the same event # as *arg*. # # - Order: @@ -31,17 +31,18 @@ ## Generated at Bro initialization time. The event engine generates this ## event just before normal input processing begins. It can be used to execute ## one-time initialization code at startup. At the time a handler runs, Bro will -## have executed any global initializations and statements. +## have executed any global initializations and statements. ## ## .. bro:see:: bro_done ## ## .. note:: ## -## When a ``bro_init`` handler executes, Bro has not yet seen any input packets -## and therefore :bro:id:`network_time` is not initialized yet. An artifact -## of that is that any timer installed in a ``bro_init`` handler will fire -## immediately with the first packet. The standard way to work around that is to -## ignore the first time the timer fires and immediately reschedule. +## When a ``bro_init`` handler executes, Bro has not yet seen any input +## packets and therefore :bro:id:`network_time` is not initialized yet. An +## artifact of that is that any timer installed in a ``bro_init`` handler +## will fire immediately with the first packet. The standard way to work +## around that is to ignore the first time the timer fires and immediately +## reschedule. ## event bro_init%(%); @@ -54,13 +55,13 @@ event bro_init%(%); ## ## .. note:: ## -## If Bro terminates due to an invocation of :bro:id:`exit`, then this event is -## not generated. +## If Bro terminates due to an invocation of :bro:id:`exit`, then this event +## is not generated. event bro_done%(%); -## Generated when an internal DNS lookup reduces the same result as last time. +## Generated when an internal DNS lookup produces the same result as last time. ## Bro keeps an internal DNS cache for host names and IP addresses it has -## already resolved. This event is generated when subsequent lookup returns +## already resolved. This event is generated when a subsequent lookup returns ## the same result as stored in the cache. ## ## dm: A record describing the new resolver result (which matches the old one). @@ -69,10 +70,11 @@ event bro_done%(%); ## dns_mapping_unverified event dns_mapping_valid%(dm: dns_mapping%); -## Generated when an internal DNS lookup got no answer even though it had succeeded he -## past. Bro keeps an internal DNS cache for host names and IP addresses it has -## already resolved. This event is generated when a subsequent lookup does not -## produce an answer even though we have already stored a result in the cache. +## Generated when an internal DNS lookup got no answer even though it had +## succeeded in the past. Bro keeps an internal DNS cache for host names and IP +## addresses it has already resolved. This event is generated when a +## subsequent lookup does not produce an answer even though we have +## already stored a result in the cache. ## ## dm: A record describing the old resolver result. ## @@ -80,8 +82,8 @@ event dns_mapping_valid%(dm: dns_mapping%); ## dns_mapping_valid event dns_mapping_unverified%(dm: dns_mapping%); -## Generated when an internal DNS lookup succeeed but an earlier attempt not. had -## had succeeded he past. Bro keeps an internal DNS cache for host names and IP +## Generated when an internal DNS lookup succeeded but an earlier attempt +## did not. Bro keeps an internal DNS cache for host names and IP ## addresses it has already resolved. This event is generated when a subsequent ## lookup produces an answer for a query that was marked as failed in the cache. ## @@ -92,10 +94,10 @@ event dns_mapping_unverified%(dm: dns_mapping%); event dns_mapping_new_name%(dm: dns_mapping%); ## Generated when an internal DNS lookup returned zero answers even though it -## had succeeded he past. Bro keeps an internal DNS cache for host names and IP -## addresses it has already resolved. This event is generated when for a subsequent -## lookup we received answer that however was empty even though we have -## already stored a result in the cache. +## had succeeded in the past. Bro keeps an internal DNS cache for host names +## and IP addresses it has already resolved. This event is generated when +## on a subsequent lookup we receive an answer that is empty even +## though we have already stored a result in the cache. ## ## dm: A record describing the old resolver result. ## @@ -104,26 +106,26 @@ event dns_mapping_new_name%(dm: dns_mapping%); event dns_mapping_lost_name%(dm: dns_mapping%); ## Generated when an internal DNS lookup produced a different result than in -## past. Bro keeps an internal DNS cache for host names and IP addresses it has -## already resolved. This event is generated when a subsequent lookup returns -## a different answer than we have stored in the cache. +## the past. Bro keeps an internal DNS cache for host names and IP addresses +## it has already resolved. This event is generated when a subsequent lookup +## returns a different answer than we have stored in the cache. ## ## dm: A record describing the new resolver result. ## ## old_addrs: Addresses that used to be part of the returned set for the query ## described by *dm*, but are not anymore. ## -## new_addrs: Addresses that did not use to be part of the returned set for the -## query described by *dm*, but now are. +## new_addrs: Addresses that were not part of the returned set for the query +## described by *dm*, but now are. ## ## .. bro:see:: dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid event dns_mapping_altered%(dm: dns_mapping, old_addrs: addr_set, new_addrs: addr_set%); -## Generated for every new connection. The event is raised with the first packet -## of a previously unknown connection. Bro uses a flow-based definition of -## "connection" here that includes not only TCP sessions but also UDP and ICMP -## flows. +## Generated for every new connection. This event is raised with the first +## packet of a previously unknown connection. Bro uses a flow-based definition +## of "connection" here that includes not only TCP sessions but also UDP and +## ICMP flows. ## ## c: The connection. ## @@ -141,7 +143,7 @@ event dns_mapping_altered%(dm: dns_mapping, old_addrs: addr_set, new_addrs: addr ## event. event new_connection%(c: connection%); -## Generated when reassembly starts for a TCP connection. The event is raised +## Generated when reassembly starts for a TCP connection. This event is raised ## at the moment when Bro's TCP analyzer enables stream reassembly for a ## connection. ## @@ -155,11 +157,11 @@ event new_connection%(c: connection%); ## expected_connection_seen new_connection partial_connection event new_connection_contents%(c: connection%); -## Generated for an unsuccessful connection attempt. The event is raised when an -## originator unsuccessfully attempted to establish a connection. "Unsuccessful" -## is defined as at least :bro:id:`tcp_attempt_delay` seconds having elapsed since -## the originator first sent a connection establishment packet to the destination -## without seeing a reply. +## Generated for an unsuccessful connection attempt. This event is raised when +## an originator unsuccessfully attempted to establish a connection. +## "Unsuccessful" is defined as at least :bro:id:`tcp_attempt_delay` seconds +## having elapsed since the originator first sent a connection establishment +## packet to the destination without seeing a reply. ## ## c: The connection. ## @@ -171,7 +173,7 @@ event new_connection_contents%(c: connection%); ## new_connection new_connection_contents partial_connection event connection_attempt%(c: connection%); -## Generated when a SYN-ACK packet is seen in response to SYN a packet during +## Generated when a SYN-ACK packet is seen in response to a SYN packet during ## a TCP handshake. The final ACK of the handshake in response to SYN-ACK may ## or may not occur later, one way to tell is to check the *history* field of ## :bro:type:`connection` to see if the originator sent an ACK, indicated by @@ -188,8 +190,9 @@ event connection_attempt%(c: connection%); event connection_established%(c: connection%); ## Generated for a new active TCP connection if Bro did not see the initial -## handshake. The event is raised when Bro has observed traffic from each endpoint, -## but the activity did not begin with the usual connection establishment. +## handshake. This event is raised when Bro has observed traffic from each +## endpoint, but the activity did not begin with the usual connection +## establishment. ## ## c: The connection. ## @@ -202,11 +205,11 @@ event connection_established%(c: connection%); ## event partial_connection%(c: connection%); -## Generated when a previously inactive endpoint attempts to close a TCP connection -## via a normal FIN handshake or an abort RST sequence. When the endpoint sent -## one of these packets, Bro waits :bro:id:`tcp_partial_close_delay` prior -## to generating the event, to give the other endpoint a chance to close the -## connection normally. +## Generated when a previously inactive endpoint attempts to close a TCP +## connection via a normal FIN handshake or an abort RST sequence. When the +## endpoint sent one of these packets, Bro waits +## :bro:id:`tcp_partial_close_delay` prior to generating the event, to give +## the other endpoint a chance to close the connection normally. ## ## c: The connection. ## @@ -245,9 +248,9 @@ event connection_finished%(c: connection%); ## new_connection new_connection_contents partial_connection event connection_half_finished%(c: connection%); -## Generated for a rejected TCP connection. The event is raised when an originator -## attempted to setup a TCP connection but the responder replied with a RST packet -## denying it. +## Generated for a rejected TCP connection. This event is raised when an +## originator attempted to setup a TCP connection but the responder replied +## with a RST packet denying it. ## ## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished @@ -261,9 +264,9 @@ event connection_half_finished%(c: connection%); ## .. note:: ## ## If the responder does not respond at all, :bro:id:`connection_attempt` is -## raised instead. If the responder initially accepts the connection but aborts -## it later, Bro first generates :bro:id:`connection_established` and then -## :bro:id:`connection_reset`. +## raised instead. If the responder initially accepts the connection but +## aborts it later, Bro first generates :bro:id:`connection_established` +## and then :bro:id:`connection_reset`. event connection_rejected%(c: connection%); ## Generated when an endpoint aborted a TCP connection. The event is raised @@ -296,9 +299,9 @@ event connection_pending%(c: connection%); ## Generated when a connection's internal state is about to be removed from ## memory. Bro generates this event reliably once for every connection when it ## is about to delete the internal state. As such, the event is well-suited for -## scrip-level cleanup that needs to be performed for every connection. The -## ``connection_state_remove`` event is generated not only for TCP sessions but -## also for UDP and ICMP flows. +## script-level cleanup that needs to be performed for every connection. This +## event is generated not only for TCP sessions but also for UDP and ICMP +## flows. ## ## c: The connection. ## @@ -311,8 +314,8 @@ event connection_pending%(c: connection%); ## tcp_inactivity_timeout icmp_inactivity_timeout conn_stats event connection_state_remove%(c: connection%); -## Generated for a SYN packet. Bro raises this event for every SYN packet seen by -## its TCP analyzer. +## Generated for a SYN packet. Bro raises this event for every SYN packet seen +## by its TCP analyzer. ## ## c: The connection. ## @@ -327,14 +330,15 @@ event connection_state_remove%(c: connection%); ## ## .. note:: ## -## This event has quite low-level semantics and can potentially be expensive to -## generate. It should only be used if one really needs the specific information -## passed into the handler via the ``pkt`` argument. If not, handling one of the -## other ``connection_*`` events is typically the better approach. +## This event has quite low-level semantics and can potentially be expensive +## to generate. It should only be used if one really needs the specific +## information passed into the handler via the ``pkt`` argument. If not, +## handling one of the other ``connection_*`` events is typically the +## better approach. event connection_SYN_packet%(c: connection, pkt: SYN_packet%); ## Generated for the first ACK packet seen for a TCP connection from -## its *orginator*. +## its *originator*. ## ## c: The connection. ## @@ -350,10 +354,10 @@ event connection_SYN_packet%(c: connection, pkt: SYN_packet%); ## This event has quite low-level semantics and should be used only rarely. event connection_first_ACK%(c: connection%); -## Generated when a TCP connection timed out. This event is raised when no activity -## was seen for an interval of at least :bro:id:`tcp_connection_linger`, and -## either one endpoint has already closed the connection or one side never -## never became active. +## Generated when a TCP connection timed out. This event is raised when +## no activity was seen for an interval of at least +## :bro:id:`tcp_connection_linger`, and either one endpoint has already +## closed the connection or one side never became active. ## ## c: The connection. ## @@ -366,17 +370,17 @@ event connection_first_ACK%(c: connection%); ## ## .. note:: ## -## The precise semantics of this event can be unintuitive as it only +## The precise semantics of this event can be unintuitive as it only ## covers a subset of cases where a connection times out. Often, handling ## :bro:id:`connection_state_remove` is the better option. That one will be -## generated reliably when an interval of ``tcp_inactivity_timeout`` has passed -## with out any activity seen (but also for all other ways a connection may -## terminate). +## generated reliably when an interval of ``tcp_inactivity_timeout`` has +## passed without any activity seen (but also for all other ways a +## connection may terminate). event connection_timeout%(c: connection%); -## Generated when a connection 4-tuple is reused. The event is raised when Bro -## sees a new TCP session or UDP flow using a 4-tuple matching that of an earlier -## connection it still consideres active. +## Generated when a connection 4-tuple is reused. This event is raised when Bro +## sees a new TCP session or UDP flow using a 4-tuple matching that of an +## earlier connection it still considers active. ## ## c: The connection. ## @@ -388,8 +392,8 @@ event connection_timeout%(c: connection%); ## new_connection new_connection_contents partial_connection event connection_reused%(c: connection%); -## Generated in regular intervals during the life time of a connection. The -## events is raised each ``connection_status_update_interval`` seconds +## Generated in regular intervals during the lifetime of a connection. The +## event is raised each ``connection_status_update_interval`` seconds ## and can be used to check conditions on a regular basis. ## ## c: The connection. @@ -432,13 +436,17 @@ event connection_flow_label_changed%(c: connection, is_orig: bool, old_label: co ## new_connection new_connection_contents partial_connection event connection_EOF%(c: connection, is_orig: bool%); -## Generated for a new connection received from the communication subsystem. Remote -## peers can inject packets into Bro's packet loop, for example via :doc:`Broccoli -## `. The communication systems raises this event -## with the first packet of a connection coming in this way. +## Generated for a new connection received from the communication subsystem. +## Remote peers can inject packets into Bro's packet loop, for example via +## :doc:`Broccoli `. The communication system +## raises this event with the first packet of a connection coming in this way. +## +## c: The connection. +## +## tag: TODO. event connection_external%(c: connection, tag: string%); -## Generated when a connected is seen that has previously marked as being expected. +## Generated when a connection is seen that is marked as being expected. ## The function :bro:id:`expect_connection` tells Bro to expect a particular ## connection to come up, and which analyzer to associate with it. Once the ## first packet of such a connection is indeed seen, this event is raised. @@ -457,12 +465,12 @@ event connection_external%(c: connection, tag: string%); ## connection_state_remove connection_status_update connection_timeout ## new_connection new_connection_contents partial_connection ## -## .. todo: We don't have a good way to document the automatically generated +## .. todo:: We don't have a good way to document the automatically generated ## ``ANALYZER_*`` constants right now. event expected_connection_seen%(c: connection, a: count%); ## Generated for every packet Bro sees. This is a very low-level and expensive -## event that should be avoided when at all possible. Is's usually infeasible to +## event that should be avoided when at all possible. It's usually infeasible to ## handle when processing even medium volumes of traffic in real-time. That ## said, if you work from a trace and want to do some packet-level analysis, ## it may come in handy. @@ -475,7 +483,7 @@ event expected_connection_seen%(c: connection, a: count%); event new_packet%(c: connection, p: pkt_hdr%); ## Generated for every IPv6 packet that contains extension headers. -## This is potentially an expensive event to handle if analysiing IPv6 traffic +## This is potentially an expensive event to handle if analysing IPv6 traffic ## that happens to utilize extension headers frequently. ## ## c: The connection the packet is part of. @@ -500,26 +508,26 @@ event esp_packet%(p: pkt_hdr%); ## .. bro:see:: new_packet tcp_packet ipv6_ext_headers event mobile_ipv6_message%(p: pkt_hdr%); -## Generated for every packet that has non-empty transport-layer payload. This is a -## very low-level and expensive event that should be avoided when at all possible. -## It's usually infeasible to handle when processing even medium volumes of -## traffic in real-time. It's even worse than :bro:id:`new_packet`. That said, if -## you work from a trace and want to do some packet-level analysis, it may come in -## handy. +## Generated for every packet that has a non-empty transport-layer payload. +## This is a very low-level and expensive event that should be avoided when +## at all possible. It's usually infeasible to handle when processing even +## medium volumes of traffic in real-time. It's even worse than +## :bro:id:`new_packet`. That said, if you work from a trace and want to +## do some packet-level analysis, it may come in handy. ## ## c: The connection the packet is part of. ## -## contants: The raw transport-layer payload. +## contents: The raw transport-layer payload. ## ## .. bro:see:: new_packet tcp_packet event packet_contents%(c: connection, contents: string%); ## Generated for every TCP packet. This is a very low-level and expensive event -## that should be avoided when at all possible. It's usually infeasible to handle -## when processing even medium volumes of traffic in real-time. It's slightly -## better than :bro:id:`new_packet` because it affects only TCP, but not much. That -## said, if you work from a trace and want to do some packet-level analysis, it may -## come in handy. +## that should be avoided when at all possible. It's usually infeasible to +## handle when processing even medium volumes of traffic in real-time. It's +## slightly better than :bro:id:`new_packet` because it affects only TCP, but +## not much. That said, if you work from a trace and want to do some +## packet-level analysis, it may come in handy. ## ## c: The connection the packet is part of. ## @@ -535,8 +543,8 @@ event packet_contents%(c: connection, contents: string%); ## ## len: The length of the TCP payload, as specified in the packet header. ## -## payload: The raw TCP payload. Note that this may less than *len* if the packet -## was not fully captured. +## payload: The raw TCP payload. Note that this may be shorter than *len* if +## the packet was not fully captured. ## ## .. bro:see:: new_packet packet_contents tcp_option tcp_contents tcp_rexmit event tcp_packet%(c: connection, is_orig: bool, flags: string, seq: count, ack: count, len: count, payload: string%); @@ -563,9 +571,9 @@ event tcp_option%(c: connection, is_orig: bool, opt: count, optlen: count%); ## :bro:id:`tcp_content_delivery_ports_resp`, ## :bro:id:`tcp_content_deliver_all_orig`, ## :bro:id:`tcp_content_deliver_all_resp`), this event is raised for each chunk -## of in-order payload reconstructed from the packet stream. Note that this event -## is potentially expensive if many connections carry signficant amounts of data as -## then all that needs to be passed on to the scripting layer. +## of in-order payload reconstructed from the packet stream. Note that this +## event is potentially expensive if many connections carry significant amounts +## of data as then all that data needs to be passed on to the scripting layer. ## ## c: The connection the payload is part of. ## @@ -574,7 +582,7 @@ event tcp_option%(c: connection, is_orig: bool, opt: count, optlen: count%); ## seq: The sequence number corresponding to the first byte of the payload ## chunk. ## -## payload: The raw payload, which will be non-empty. +## contents: The raw payload, which will be non-empty. ## ## .. bro:see:: tcp_packet tcp_option tcp_rexmit ## tcp_content_delivery_ports_orig tcp_content_delivery_ports_resp @@ -590,13 +598,14 @@ event tcp_option%(c: connection, is_orig: bool, opt: count, optlen: count%); ## network-level effects such as latency, acknowledgements, reordering, etc. event tcp_contents%(c: connection, is_orig: bool, seq: count, contents: string%); -## Generated +## TODO. event tcp_rexmit%(c: connection, is_orig: bool, seq: count, len: count, data_in_flight: count, window: count%); ## Generated when Bro detects a TCP retransmission inconsistency. When -## reassemling TCP stream, Bro buffers all payload until it seens the responder -## acking it. If during time, the sender resends a chunk of payload but with -## content than originally, this event will be raised. +## reassembling a TCP stream, Bro buffers all payload until it sees the +## responder acking it. If during that time, the sender resends a chunk of +## payload but with different content than originally, this event will be +## raised. ## ## c: The connection showing the inconsistency. ## @@ -607,7 +616,7 @@ event tcp_rexmit%(c: connection, is_orig: bool, seq: count, len: count, data_in_ ## .. bro:see:: tcp_rexmit tcp_contents event rexmit_inconsistency%(c: connection, t1: string, t2: string%); -## Generated when a TCP endpoint acknowledges payload that Bro did never see. +## Generated when a TCP endpoint acknowledges payload that Bro never saw. ## ## c: The connection. ## @@ -621,19 +630,19 @@ event rexmit_inconsistency%(c: connection, t1: string, t2: string%); ## (which isn't unheard of). In practice, one will always see a few of these ## events in any larger volume of network traffic. If there are lots of them, ## however, that typically means that there is a problem with the monitoring -## infrastructure such as a tap dropping packets, split routing on the path, or -## reordering at the tap. +## infrastructure such as a tap dropping packets, split routing on the path, +## or reordering at the tap. ## -## This event reports similar situations as :bro:id:`content_gap`, though their -## specifics differ slightly. Often, however, both will be raised for the same -## connection if some of its data is missing. We should eventually merge -## the two. +## This event reports similar situations as :bro:id:`content_gap`, though +## their specifics differ slightly. Often, however, both will be raised for +## the same connection if some of its data is missing. We should eventually +## merge the two. event ack_above_hole%(c: connection%); -## Generated when Bro detects a gap in a reassembled TCP payload stream. This event -## is raised when Bro, while reassemling a payload stream, determines that a chunk -## of payload is missing (e.g., because the responder has already acknowledged it, -## even though Bro didn't see it). +## Generated when Bro detects a gap in a reassembled TCP payload stream. This +## event is raised when Bro, while reassembling a payload stream, determines +## that a chunk of payload is missing (e.g., because the responder has already +## acknowledged it, even though Bro didn't see it). ## ## c: The connection. ## @@ -647,25 +656,26 @@ event ack_above_hole%(c: connection%); ## ## .. note:: ## -## Content gaps tend to occur occasionally for various reasons, including broken -## TCP stacks. If, however, one finds lots of them, that typically means that -## there is a problem with the monitoring infrastructure such as a tap dropping -## packets, split routing on the path, or reordering at the tap. +## Content gaps tend to occur occasionally for various reasons, including +## broken TCP stacks. If, however, one finds lots of them, that typically +## means that there is a problem with the monitoring infrastructure such as +## a tap dropping packets, split routing on the path, or reordering at the +## tap. ## ## This event reports similar situations as :bro:id:`ack_above_hole`, though ## their specifics differ slightly. Often, however, both will be raised for -## connection if some of its data is missing. We should eventually merge the -## two. +## a connection if some of its data is missing. We should eventually merge +## the two. event content_gap%(c: connection, is_orig: bool, seq: count, length: count%); -## Summarizes the amount of missing TCP payload at regular intervals. Internally, -## Bro tracks (1) the number of :bro:id:`ack_above_hole` events, including the -## numer of bytes missing; and (2) the total number of TCP acks seen, with the -## total volume of bytes that have been acked. This event reports these statistics -## in :bro:id:`gap_report_freq` intervals for the purpose of determining packet -## loss. +## Summarizes the amount of missing TCP payload at regular intervals. +## Internally, Bro tracks (1) the number of :bro:id:`ack_above_hole` events, +## including the number of bytes missing; and (2) the total number of TCP +## acks seen, with the total volume of bytes that have been acked. This event +## reports these statistics in :bro:id:`gap_report_freq` intervals for the +## purpose of determining packet loss. ## -## dt: The time that has past since the last ``gap_report`` interval. +## dt: The time that has passed since the last ``gap_report`` interval. ## ## info: The gap statistics. ## @@ -673,17 +683,17 @@ event content_gap%(c: connection, is_orig: bool, seq: count, length: count%); ## ## .. note:: ## -## Bro comes with a script :doc:`/scripts/policy/misc/capture-loss` that uses -## this event to estimate packet loss and report when a predefined threshold is -## exceeded. +## Bro comes with a script :doc:`/scripts/policy/misc/capture-loss` that uses +## this event to estimate packet loss and report when a predefined threshold +## is exceeded. event gap_report%(dt: interval, info: gap_info%); ## Generated when a protocol analyzer confirms that a connection is indeed ## using that protocol. Bro's dynamic protocol detection heuristically activates -## analyzers as soon as it believe a connection *could* be using a particular -## protocol. It is then left to the corresponding analyzer to verify whether that -## is indeed the case; if so, this event will be generated. +## analyzers as soon as it believes a connection *could* be using a particular +## protocol. It is then left to the corresponding analyzer to verify whether +## that is indeed the case; if so, this event will be generated. ## ## c: The connection. ## @@ -694,24 +704,24 @@ event gap_report%(dt: interval, info: gap_info%); ## ## aid: A unique integer ID identifying the specific *instance* of the ## analyzer *atype* that is analyzing the connection ``c``. The ID can -## be used to reference the analyzer when using builtin functions like +## be used to reference the analyzer when using builtin functions like ## :bro:id:`disable_analyzer`. ## ## .. bro:see:: protocol_violation ## ## .. note:: ## -## Bro's default scripts use this event to determine the ``service`` column of -## :bro:type:`Conn::Info`: once confirmed, the protocol will be listed there -## (and thus in ``conn.log``). +## Bro's default scripts use this event to determine the ``service`` column +## of :bro:type:`Conn::Info`: once confirmed, the protocol will be listed +## there (and thus in ``conn.log``). event protocol_confirmation%(c: connection, atype: count, aid: count%); ## Generated when a protocol analyzer determines that a connection it is parsing -## is not conforming to the protocol it expects. Bro's dynamic protocol detection -## heuristically activates analyzers as soon as it believe a connection *could* be -## using a particular protocol. It is then left to the corresponding analyzer to -## verify whether that is indeed the case; if not, the analyzer will trigger this -## event. +## is not conforming to the protocol it expects. Bro's dynamic protocol +## detection heuristically activates analyzers as soon as it believes a +## connection *could* be using a particular protocol. It is then left to the +## corresponding analyzer to verify whether that is indeed the case; if not, +## the analyzer will trigger this event. ## ## c: The connection. ## @@ -722,20 +732,24 @@ event protocol_confirmation%(c: connection, atype: count, aid: count%); ## ## aid: A unique integer ID identifying the specific *instance* of the ## analyzer *atype* that is analyzing the connection ``c``. The ID can -## be used to reference the analyzer when using builtin functions like +## be used to reference the analyzer when using builtin functions like ## :bro:id:`disable_analyzer`. ## +## reason: TODO. +## ## .. bro:see:: protocol_confirmation ## ## .. note:: ## ## Bro's default scripts use this event to disable an analyzer via -## :bro:id:`disable_analyzer` if it's parsing the wrong protocol. That's however -## a script-level decision and not done automatically by the event eninge. +## :bro:id:`disable_analyzer` if it's parsing the wrong protocol. That's +## however a script-level decision and not done automatically by the event +## engine. event protocol_violation%(c: connection, atype: count, aid: count, reason: string%); ## Generated for each packet sent by a UDP flow's originator. This a potentially -## expsensive event due to the volume of UDP traffic and should be used with care. +## expensive event due to the volume of UDP traffic and should be used with +## care. ## ## u: The connection record for the corresponding UDP flow. ## @@ -743,7 +757,8 @@ event protocol_violation%(c: connection, atype: count, aid: count, reason: strin event udp_request%(u: connection%); ## Generated for each packet sent by a UDP flow's responder. This a potentially -## expsensive event due to the volume of UDP traffic and should be used with care. +## expensive event due to the volume of UDP traffic and should be used with +## care. ## ## u: The connection record for the corresponding UDP flow. ## @@ -751,35 +766,40 @@ event udp_request%(u: connection%); event udp_reply%(u: connection%); ## Generated for UDP packets to pass on their payload. As the number of UDP -## packets can be very large, this event is normally raised only for those on -## ports configured in :bro:id:`udp_content_delivery_ports_orig` (for packets sent -## by the flow's orgininator) or :bro:id:`udp_content_delivery_ports_resp` (for -## packets sent by the flow's responder). However, delivery can be enabled for all -## UDP request and reply packets by setting :bro:id:`udp_content_deliver_all_orig` -## or :bro:id:`udp_content_deliver_all_resp`, respectively. Note that this event is -## also raised for all matching UDP packets, including empty ones. +## packets can be very large, this event is normally raised only for those on +## ports configured in :bro:id:`udp_content_delivery_ports_orig` (for packets +## sent by the flow's originator) or :bro:id:`udp_content_delivery_ports_resp` +## (for packets sent by the flow's responder). However, delivery can be enabled +## for all UDP request and reply packets by setting +## :bro:id:`udp_content_deliver_all_orig` or +## :bro:id:`udp_content_deliver_all_resp`, respectively. Note that this +## event is also raised for all matching UDP packets, including empty ones. ## ## u: The connection record for the corresponding UDP flow. ## ## is_orig: True if the event is raised for the originator side. ## +## contents: TODO. +## ## .. bro:see:: udp_reply udp_request udp_session_done ## udp_content_deliver_all_orig udp_content_deliver_all_resp ## udp_content_delivery_ports_orig udp_content_delivery_ports_resp event udp_contents%(u: connection, is_orig: bool, contents: string%); ## Generated when a UDP session for a supported protocol has finished. Some of -## Bro's application-layer UDP analyzers flag the end of a session by raising this -## event. Currently, the analyzers for DNS, NTP, Netbios, and Syslog support this. +## Bro's application-layer UDP analyzers flag the end of a session by raising +## this event. Currently, the analyzers for DNS, NTP, Netbios, and Syslog +## support this. ## ## u: The connection record for the corresponding UDP flow. ## ## .. bro:see:: udp_contents udp_reply udp_request event udp_session_done%(u: connection%); -## Generated for all ICMP messages that are not handled separately with dedicated -## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly -## with dedicated events. This event acts as a fallback for those it doesn't. +## Generated for all ICMP messages that are not handled separately with +## dedicated ICMP events. Bro's ICMP analyzer handles a number of ICMP messages +## directly with dedicated events. This event acts as a fallback for those it +## doesn't. ## ## See `Wikipedia ## `__ for more @@ -808,8 +828,8 @@ event icmp_sent%(c: connection, icmp: icmp_conn%); ## ## seq: The *echo request* sequence number. ## -## payload: The message-specific data of the packet payload, i.e., everything after -## the first 8 bytes of the ICMP header. +## payload: The message-specific data of the packet payload, i.e., everything +## after the first 8 bytes of the ICMP header. ## ## .. bro:see:: icmp_echo_reply event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); @@ -829,8 +849,8 @@ event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, ## ## seq: The *echo reply* sequence number. ## -## payload: The message-specific data of the packet payload, i.e., everything after -## the first 8 bytes of the ICMP header. +## payload: The message-specific data of the packet payload, i.e., everything +## after the first 8 bytes of the ICMP header. ## ## .. bro:see:: icmp_echo_request event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); @@ -851,8 +871,8 @@ event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, pa ## ## code: The ICMP code of the error message. ## -## context: A record with specifics of the original packet that the message refers -## to. +## context: A record with specifics of the original packet that the message +## refers to. ## ## .. bro:see:: icmp_unreachable icmp_packet_too_big ## icmp_time_exceeded icmp_parameter_problem @@ -871,10 +891,11 @@ event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: ## ## code: The ICMP code of the *unreachable* message. ## -## context: A record with specifics of the original packet that the message refers -## to. *Unreachable* messages should include the original IP header from the packet -## that triggered them, and Bro parses that into the *context* structure. Note -## that if the *unreachable* includes only a partial IP header for some reason, no +## context: A record with specifics of the original packet that the message +## refers to. *Unreachable* messages should include the original IP +## header from the packet that triggered them, and Bro parses that +## into the *context* structure. Note that if the *unreachable* +## includes only a partial IP header for some reason, no ## fields of *context* will be filled out. ## ## .. bro:see:: icmp_error_message icmp_packet_too_big @@ -894,11 +915,12 @@ event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: ic ## ## code: The ICMP code of the *too big* message. ## -## context: A record with specifics of the original packet that the message refers -## to. *Too big* messages should include the original IP header from the packet -## that triggered them, and Bro parses that into the *context* structure. Note -## that if the *too big* includes only a partial IP header for some reason, no -## fields of *context* will be filled out. +## context: A record with specifics of the original packet that the message +## refers to. *Too big* messages should include the original IP header +## from the packet that triggered them, and Bro parses that into +## the *context* structure. Note that if the *too big* includes only +## a partial IP header for some reason, no fields of *context* will +## be filled out. ## ## .. bro:see:: icmp_error_message icmp_unreachable ## icmp_time_exceeded icmp_parameter_problem @@ -917,11 +939,12 @@ event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: ## ## code: The ICMP code of the *exceeded* message. ## -## context: A record with specifics of the original packet that the message refers -## to. *Unreachable* messages should include the original IP header from the packet -## that triggered them, and Bro parses that into the *context* structure. Note that -## if the *exceeded* includes only a partial IP header for some reason, no fields -## of *context* will be filled out. +## context: A record with specifics of the original packet that the message +## refers to. *Unreachable* messages should include the original IP +## header from the packet that triggered them, and Bro parses that +## into the *context* structure. Note that if the *exceeded* includes +## only a partial IP header for some reason, no fields of *context* +## will be filled out. ## ## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big ## icmp_parameter_problem @@ -940,10 +963,11 @@ event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: ## ## code: The ICMP code of the *parameter problem* message. ## -## context: A record with specifics of the original packet that the message refers -## to. *Parameter problem* messages should include the original IP header from the packet -## that triggered them, and Bro parses that into the *context* structure. Note that -## if the *parameter problem* includes only a partial IP header for some reason, no fields +## context: A record with specifics of the original packet that the message +## refers to. *Parameter problem* messages should include the original +## IP header from the packet that triggered them, and Bro parses that +## into the *context* structure. Note that if the *parameter problem* +## includes only a partial IP header for some reason, no fields ## of *context* will be filled out. ## ## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big @@ -1076,13 +1100,14 @@ event icmp_redirect%(c: connection, icmp: icmp_conn, tgt: addr, dest: addr%); event conn_stats%(c: connection, os: endpoint_stats, rs: endpoint_stats%); ## Generated for unexpected activity related to a specific connection. When -## Bro's packet analysis encounters activity that does not conform to a protocol's -## specification, it raises one of the ``*_weird`` events to report that. This -## event is raised if the activity is tied directly to a specific connection. +## Bro's packet analysis encounters activity that does not conform to a +## protocol's specification, it raises one of the ``*_weird`` events to report +## that. This event is raised if the activity is tied directly to a specific +## connection. ## ## name: A unique name for the specific type of "weird" situation. Bro's default -## scripts use this name in filtering policies that specify which "weirds" are -## worth reporting. +## scripts use this name in filtering policies that specify which +## "weirds" are worth reporting. ## ## c: The corresponding connection. ## @@ -1091,20 +1116,21 @@ event conn_stats%(c: connection, os: endpoint_stats, rs: endpoint_stats%); ## .. bro:see:: flow_weird net_weird ## ## .. note:: "Weird" activity is much more common in real-world network traffic -## than one would intuitively expect. While in principle, any protocol violation -## could be an attack attempt, it's much more likely that an endpoint's -## implementation interprets an RFC quite liberally. +## than one would intuitively expect. While in principle, any protocol +## violation could be an attack attempt, it's much more likely that an +## endpoint's implementation interprets an RFC quite liberally. event conn_weird%(name: string, c: connection, addl: string%); ## Generated for unexpected activity related to a pair of hosts, but independent -## of a specific connection. When Bro's packet analysis encounters activity that -## does not conform to a protocol's specification, it raises one of the ``*_weird`` -## event to report that. This event is raised if the activity is related to a -## pair of hosts, yet not to a specific connection between them. +## of a specific connection. When Bro's packet analysis encounters activity +## that does not conform to a protocol's specification, it raises one of +## the ``*_weird`` events to report that. This event is raised if the activity +## is related to a pair of hosts, yet not to a specific connection between +## them. ## ## name: A unique name for the specific type of "weird" situation. Bro's default -## scripts use this name in filtering policies that specify which "weirds" are -## worth reporting. +## scripts use this name in filtering policies that specify which +## "weirds" are worth reporting. ## ## src: The source address corresponding to the activity. ## @@ -1113,47 +1139,47 @@ event conn_weird%(name: string, c: connection, addl: string%); ## .. bro:see:: conn_weird net_weird ## ## .. note:: "Weird" activity is much more common in real-world network traffic -## than one would intuitively expect. While in principle, any protocol violation -## could be an attack attempt, it's much more likely that an endpoint's -## implementation interprets an RFC quite liberally. +## than one would intuitively expect. While in principle, any protocol +## violation could be an attack attempt, it's much more likely that an +## endpoint's implementation interprets an RFC quite liberally. event flow_weird%(name: string, src: addr, dst: addr%); ## Generated for unexpected activity that is not tied to a specific connection ## or pair of hosts. When Bro's packet analysis encounters activity that ## does not conform to a protocol's specification, it raises one of the -## ``*_weird`` event to report that. This event is raised if the activity is +## ``*_weird`` events to report that. This event is raised if the activity is ## not tied directly to a specific connection or pair of hosts. ## ## name: A unique name for the specific type of "weird" situation. Bro's default -## scripts use this name in filtering policies that specify which "weirds" are -## worth reporting. +## scripts use this name in filtering policies that specify which +## "weirds" are worth reporting. ## ## .. bro:see:: flow_weird ## ## .. note:: "Weird" activity is much more common in real-world network traffic -## than one would intuitively expect. While in principle, any protocol violation -## could be an attack attempt, it's much more likely that an endpoint's -## implementation interprets an RFC quite liberally. +## than one would intuitively expect. While in principle, any protocol +## violation could be an attack attempt, it's much more likely that an +## endpoint's implementation interprets an RFC quite liberally. event net_weird%(name: string%); ## Generated regularly for the purpose of profiling Bro's processing. This event ## is raised for every :bro:id:`load_sample_freq` packet. For these packets, -## Bro records script-level functions executed during their processing as well as -## further internal locations. By sampling the processing in this form, one can -## understand where Bro spends its time. +## Bro records script-level functions executed during their processing as well +## as further internal locations. By sampling the processing in this form, one +## can understand where Bro spends its time. ## -## samples: A set with functions and locations seens during the processing of +## samples: A set with functions and locations seen during the processing of ## the sampled packet. ## -## CPU: The CPU time spent on processing the sampled. +## CPU: The CPU time spent on processing the sampled packet. ## ## dmem: The difference in memory usage caused by processing the sampled packet. event load_sample%(samples: load_sample_info, CPU: interval, dmem: int%); ## Generated for ARP requests. ## -## See `Wikipedia `__ for -## more information about the ARP protocol. +## See `Wikipedia `__ +## for more information about the ARP protocol. ## ## mac_src: The request's source MAC address. ## @@ -1173,12 +1199,12 @@ event arp_request%(mac_src: string, mac_dst: string, SPA: addr, SHA: string, ## Generated for ARP replies. ## -## See `Wikipedia `__ for -## more information about the ARP protocol. +## See `Wikipedia `__ +## for more information about the ARP protocol. ## -## mac_src: The replies's source MAC address. +## mac_src: The reply's source MAC address. ## -## mac_dst: The replies's destination MAC address. +## mac_dst: The reply's destination MAC address. ## ## SPA: The sender protocol address. ## @@ -1192,9 +1218,9 @@ event arp_request%(mac_src: string, mac_dst: string, SPA: addr, SHA: string, event arp_reply%(mac_src: string, mac_dst: string, SPA: addr, SHA: string, TPA: addr, THA: string%); -## Generated for ARP packets that Bro cannot interpret. Examples are packets with -## non-standard hardware address formats or hardware addresses that not match the -## originator of the packet. +## Generated for ARP packets that Bro cannot interpret. Examples are packets +## with non-standard hardware address formats or hardware addresses that do not +## match the originator of the packet. ## ## SPA: The sender protocol address. ## @@ -1216,8 +1242,8 @@ event bad_arp%(SPA: addr, SHA: string, TPA: addr, THA: string, explanation: stri ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_have bittorrent_peer_interested bittorrent_peer_keep_alive @@ -1229,8 +1255,8 @@ event bittorrent_peer_handshake%(c: connection, is_orig: bool, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1241,8 +1267,8 @@ event bittorrent_peer_keep_alive%(c: connection, is_orig: bool%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1253,8 +1279,8 @@ event bittorrent_peer_choke%(c: connection, is_orig: bool%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1265,8 +1291,8 @@ event bittorrent_peer_unchoke%(c: connection, is_orig: bool%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_keep_alive @@ -1277,8 +1303,8 @@ event bittorrent_peer_interested%(c: connection, is_orig: bool%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1289,8 +1315,8 @@ event bittorrent_peer_not_interested%(c: connection, is_orig: bool%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_interested bittorrent_peer_keep_alive @@ -1301,8 +1327,8 @@ event bittorrent_peer_have%(c: connection, is_orig: bool, piece_index: count%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_cancel bittorrent_peer_choke bittorrent_peer_handshake ## bittorrent_peer_have bittorrent_peer_interested bittorrent_peer_keep_alive @@ -1313,8 +1339,8 @@ event bittorrent_peer_bitfield%(c: connection, is_orig: bool, bitfield: string%) ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1326,8 +1352,8 @@ event bittorrent_peer_request%(c: connection, is_orig: bool, index: count, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1339,8 +1365,8 @@ event bittorrent_peer_piece%(c: connection, is_orig: bool, index: count, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1352,8 +1378,8 @@ event bittorrent_peer_cancel%(c: connection, is_orig: bool, index: count, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1364,8 +1390,8 @@ event bittorrent_peer_port%(c: connection, is_orig: bool, listen_port: port%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1377,8 +1403,8 @@ event bittorrent_peer_unknown%(c: connection, is_orig: bool, message_id: count, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1389,8 +1415,8 @@ event bittorrent_peer_weird%(c: connection, is_orig: bool, msg: string%); ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1402,8 +1428,8 @@ event bt_tracker_request%(c: connection, uri: string, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1417,8 +1443,8 @@ event bt_tracker_response%(c: connection, status: count, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1430,8 +1456,8 @@ event bt_tracker_response_not_ok%(c: connection, status: count, ## TODO. ## -## See `Wikipedia `__ for more -## information about the BitTorrent protocol. +## See `Wikipedia `__ for +## more information about the BitTorrent protocol. ## ## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested @@ -1570,7 +1596,7 @@ event gnutella_http_notify%(c: connection%); ## Generated for Ident requests. ## -## See `Wikipedia `__ for more +## See `Wikipedia `__ for more ## information about the Ident protocol. ## ## c: The connection. @@ -1589,7 +1615,7 @@ event ident_request%(c: connection, lport: port, rport: port%); ## Generated for Ident replies. ## -## See `Wikipedia `__ for more +## See `Wikipedia `__ for more ## information about the Ident protocol. ## ## c: The connection. @@ -1612,7 +1638,7 @@ event ident_reply%(c: connection, lport: port, rport: port, user_id: string, sys ## Generated for Ident error replies. ## -## See `Wikipedia `__ for more +## See `Wikipedia `__ for more ## information about the Ident protocol. ## ## c: The connection. @@ -1646,7 +1672,7 @@ event ident_error%(c: connection, lport: port, rport: port, line: string%); ## ## password: The password tried. ## -## line: line is the line of text that led the analyzer to conclude that the +## line: The line of text that led the analyzer to conclude that the ## authentication had failed. ## ## .. bro:see:: login_confused login_confused_text login_display login_input_line @@ -1655,14 +1681,9 @@ event ident_error%(c: connection, lport: port, rport: port, line: string%); ## login_timeouts set_login_state ## ## .. note:: The login analyzer depends on a set of script-level variables that -## need to configured with patterns identifying login attempts. This configuration -## has not yet been ported over from Bro 1.5 to Bro 2.x, and the analyzer is -## therefore not directly usable at the moment. -## -## .. todo: Bro's current default configuration does not activate the protocol -## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. +## need to be configured with patterns identifying login attempts. This +## configuration has not yet been ported over from Bro 1.5 to Bro 2.x, and +## the analyzer is therefore not directly usable at the moment. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -1685,7 +1706,7 @@ event login_failure%(c: connection, user: string, client_user: string, password: ## ## password: The password used. ## -## line: line is the line of text that led the analyzer to conclude that the +## line: The line of text that led the analyzer to conclude that the ## authentication had succeeded. ## ## .. bro:see:: login_confused login_confused_text login_display login_failure @@ -1694,9 +1715,9 @@ event login_failure%(c: connection, user: string, client_user: string, password: ## login_prompts login_success_msgs login_timeouts set_login_state ## ## .. note:: The login analyzer depends on a set of script-level variables that -## need to configured with patterns identifying login attempts. This configuration -## has not yet been ported over from Bro 1.5 to Bro 2.x, and the analyzer is -## therefore not directly usable at the moment. +## need to be configured with patterns identifying login attempts. This +## configuration has not yet been ported over from Bro 1.5 to Bro 2.x, and +## the analyzer is therefore not directly usable at the moment. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -1736,17 +1757,17 @@ event login_input_line%(c: connection, line: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event login_output_line%(c: connection, line: string%); -## Generated when tracking of Telnet/Rlogin authentication failed. As Bro's *login* -## analyzer uses a number of heuristics to extract authentication information, it -## may become confused. If it can no longer correctly track the authentication -## dialog, it raised this event. +## Generated when tracking of Telnet/Rlogin authentication failed. As Bro's +## *login* analyzer uses a number of heuristics to extract authentication +## information, it may become confused. If it can no longer correctly track +## the authentication dialog, it raises this event. ## ## c: The connection. ## ## msg: Gives the particular problem the heuristics detected (for example, -## ``multiple_login_prompts`` means that the engine saw several login prompts in -## a row, without the type-ahead from the client side presumed necessary to cause -## them) +## ``multiple_login_prompts`` means that the engine saw several login +## prompts in a row, without the type-ahead from the client side presumed +## necessary to cause them) ## ## line: The line of text that caused the heuristics to conclude they were ## confused. @@ -1762,9 +1783,10 @@ event login_output_line%(c: connection, line: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event login_confused%(c: connection, msg: string, line: string%); -## Generated after getting confused while tracking a Telnet/Rlogin authentication -## dialog. The *login* analyzer generates this even for every line of user input -## after it has reported :bro:id:`login_confused` for a connection. +## Generated after getting confused while tracking a Telnet/Rlogin +## authentication dialog. The *login* analyzer generates this even for every +## line of user input after it has reported :bro:id:`login_confused` for a +## connection. ## ## c: The connection. ## @@ -1781,7 +1803,7 @@ event login_confused%(c: connection, msg: string, line: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event login_confused_text%(c: connection, line: string%); -## Generated for clients transmitting a terminal type in an Telnet session. This +## Generated for clients transmitting a terminal type in a Telnet session. This ## information is extracted out of environment variables sent as Telnet options. ## ## c: The connection. @@ -1797,12 +1819,12 @@ event login_confused_text%(c: connection, line: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event login_terminal%(c: connection, terminal: string%); -## Generated for clients transmitting a X11 DISPLAY in a Telnet session. This +## Generated for clients transmitting an X11 DISPLAY in a Telnet session. This ## information is extracted out of environment variables sent as Telnet options. ## ## c: The connection. ## -## terminal: The DISPLAY transmitted. +## display: The DISPLAY transmitted. ## ## .. bro:see:: login_confused login_confused_text login_failure login_input_line ## login_output_line login_prompt login_success login_terminal @@ -1813,10 +1835,10 @@ event login_terminal%(c: connection, terminal: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event login_display%(c: connection, display: string%); -## Generated when a Telnet authentication has been successful. The Telnet protocol -## includes options for negotiating authentication. When such an option is sent -## from client to server and the server replies that it accepts the authentication, -## then the event engine generates this event. +## Generated when a Telnet authentication has been successful. The Telnet +## protocol includes options for negotiating authentication. When such an +## option is sent from client to server and the server replies that it accepts +## the authentication, then the event engine generates this event. ## ## See `Wikipedia `__ for more information ## about the Telnet protocol. @@ -1827,8 +1849,9 @@ event login_display%(c: connection, display: string%); ## ## .. bro:see:: authentication_rejected authentication_skipped login_success ## -## .. note:: This event inspects the corresponding Telnet option while :bro:id:`login_success` -## heuristically determines success by watching session data. +## .. note:: This event inspects the corresponding Telnet option +## while :bro:id:`login_success` heuristically determines success by watching +## session data. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -1838,8 +1861,8 @@ event authentication_accepted%(name: string, c: connection%); ## Generated when a Telnet authentication has been unsuccessful. The Telnet ## protocol includes options for negotiating authentication. When such an option -## is sent from client to server and the server replies that it did not accept the -## authentication, then the event engine generates this event. +## is sent from client to server and the server replies that it did not accept +## the authentication, then the event engine generates this event. ## ## See `Wikipedia `__ for more information ## about the Telnet protocol. @@ -1850,9 +1873,9 @@ event authentication_accepted%(name: string, c: connection%); ## ## .. bro:see:: authentication_accepted authentication_skipped login_failure ## -## .. note:: This event inspects the corresponding Telnet option while :bro:id:`login_success` -## heuristically determines failure by watching session -## data. +## .. note:: This event inspects the corresponding Telnet option +## while :bro:id:`login_success` heuristically determines failure by watching +## session data. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -1860,7 +1883,7 @@ event authentication_accepted%(name: string, c: connection%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event authentication_rejected%(name: string, c: connection%); -## Generated when for Telnet/Rlogin sessions when a pattern match indicates +## Generated for Telnet/Rlogin sessions when a pattern match indicates ## that no authentication is performed. ## ## See `Wikipedia `__ for more information @@ -1873,9 +1896,9 @@ event authentication_rejected%(name: string, c: connection%); ## login_success_msgs login_timeouts set_login_state ## ## .. note:: The login analyzer depends on a set of script-level variables that -## need to be configured with patterns identifying actvity. This configuration has -## not yet been ported over from Bro 1.5 to Bro 2.x, and the analyzer is therefore -## not directly usable at the moment. +## need to be configured with patterns identifying activity. This +## configuration has not yet been ported over from Bro 1.5 to Bro 2.x, and +## the analyzer is therefore not directly usable at the moment. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -1883,15 +1906,16 @@ event authentication_rejected%(name: string, c: connection%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event authentication_skipped%(c: connection%); -## Generated for clients transmitting a terminal prompt in a Telnet session. This -## information is extracted out of environment variables sent as Telnet options. +## Generated for clients transmitting a terminal prompt in a Telnet session. +## This information is extracted out of environment variables sent as Telnet +## options. ## ## See `Wikipedia `__ for more information ## about the Telnet protocol. ## ## c: The connection. ## -## terminal: The TTYPROMPT transmitted. +## prompt: The TTYPROMPT transmitted. ## ## .. bro:see:: login_confused login_confused_text login_display login_failure ## login_input_line login_output_line login_success login_terminal @@ -1902,9 +1926,9 @@ event authentication_skipped%(c: connection%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event login_prompt%(c: connection, prompt: string%); -## Generated for Telnet sessions when encryption is activated. The Telnet protoco; -## includes options for negotiating encryption. When such a series of options is -## successfully negotiated, the event engine generates this event. +## Generated for Telnet sessions when encryption is activated. The Telnet +## protocol includes options for negotiating encryption. When such a series of +## options is successfully negotiated, the event engine generates this event. ## ## See `Wikipedia `__ for more information ## about the Telnet protocol. @@ -1916,13 +1940,13 @@ event login_prompt%(c: connection, prompt: string%); ## login_output_line login_prompt login_success login_terminal event activating_encryption%(c: connection%); -## Generated for inconsistent Telnet options observed. Telnet options are specified -## by the client and server stating which options they are willing to support -## vs. which they are not, and then instructing one another which in fact they -## should or should not use for the current connection. If the event engine sees -## a peer violate either what the other peer has instructed it to do, or what it -## itself offered in terms of options in the past, then the engine generates an -## inconsistent_option event. +## Generated for an inconsistent Telnet option. Telnet options are specified +## by the client and server stating which options they are willing to +## support vs. which they are not, and then instructing one another which in +## fact they should or should not use for the current connection. If the event +## engine sees a peer violate either what the other peer has instructed it to +## do, or what it itself offered in terms of options in the past, then the +## engine generates this event. ## ## See `Wikipedia `__ for more information ## about the Telnet protocol. @@ -1958,6 +1982,8 @@ event bad_option%(c: connection%); ## See `Wikipedia `__ for more information ## about the Telnet protocol. ## +## c: The connection. +## ## .. bro:see:: inconsistent_option bad_option authentication_accepted ## authentication_rejected authentication_skipped login_confused ## login_confused_text login_display login_failure login_input_line @@ -1971,15 +1997,15 @@ event bad_option_termination%(c: connection%); ## Generated for client side commands on an RSH connection. ## -## See `RFC 1258 `__ for more information about -## the Rlogin/Rsh protocol. +## See `RFC 1258 `__ for more information +## about the Rlogin/Rsh protocol. ## ## c: The connection. ## ## client_user: The client-side user name as sent in the initial protocol ## handshake. ## -## client_user: The server-side user name as sent in the initial protocol +## server_user: The server-side user name as sent in the initial protocol ## handshake. ## ## line: The command line sent in the request. @@ -1990,8 +2016,8 @@ event bad_option_termination%(c: connection%); ## login_failure login_input_line login_output_line login_prompt login_success ## login_terminal ## -## .. note: For historical reasons, these events are separate from the ``login_`` -## events. Ideally, they would all be handled uniquely. +## .. note:: For historical reasons, these events are separate from the +## ``login_`` events. Ideally, they would all be handled uniquely. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -2001,27 +2027,25 @@ event rsh_request%(c: connection, client_user: string, server_user: string, line ## Generated for client side commands on an RSH connection. ## -## See `RFC 1258 `__ for more information about -## the Rlogin/Rsh protocol. +## See `RFC 1258 `__ for more information +## about the Rlogin/Rsh protocol. ## ## c: The connection. ## ## client_user: The client-side user name as sent in the initial protocol ## handshake. ## -## client_user: The server-side user name as sent in the initial protocol +## server_user: The server-side user name as sent in the initial protocol ## handshake. ## ## line: The command line sent in the request. ## -## new_session: True if this is the first command of the Rsh session. -## ## .. bro:see:: rsh_request login_confused login_confused_text login_display ## login_failure login_input_line login_output_line login_prompt login_success ## login_terminal ## -## .. note: For historical reasons, these events are separate from the ``login_`` -## events. Ideally, they would all be handled uniquely. +## .. note:: For historical reasons, these events are separate from the +## ``login_`` events. Ideally, they would all be handled uniquely. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -2031,8 +2055,8 @@ event rsh_reply%(c: connection, client_user: string, server_user: string, line: ## Generated for client-side FTP commands. ## -## See `Wikipedia `__ for more -## information about the FTP protocol. +## See `Wikipedia `__ for +## more information about the FTP protocol. ## ## c: The connection. ## @@ -2046,8 +2070,8 @@ event ftp_request%(c: connection, command: string, arg: string%) &group="ftp"; ## Generated for server-side FTP replies. ## -## See `Wikipedia `__ for more -## information about the FTP protocol. +## See `Wikipedia `__ for +## more information about the FTP protocol. ## ## c: The connection. ## @@ -2055,9 +2079,10 @@ event ftp_request%(c: connection, command: string, arg: string%) &group="ftp"; ## ## msg: The textual message of the response. ## -## cont_resp: True if the reply line is tagged as being continued to the next line. -## If so, further events will be raised and a handler may want to reassemle the -## pieces before processing the response any further. +## cont_resp: True if the reply line is tagged as being continued to the next +## line. If so, further events will be raised and a handler may want +## to reassemble the pieces before processing the response any +## further. ## ## .. bro:see:: ftp_request fmt_ftp_port parse_eftp_port ## parse_ftp_epsv parse_ftp_pasv parse_ftp_port @@ -2071,10 +2096,11 @@ event ftp_reply%(c: connection, code: count, msg: string, cont_resp: bool%) &gro ## c: The connection. ## ## is_orig: True if the sender of the command is the originator of the TCP -## connection. Note that this is not redundant: the SMTP ``TURN`` command allows -## client and server to flip roles on established SMTP sessions, and hence a -## "request" might still come from the TCP-level responder. In practice, however, -## that will rarely happen as TURN is considered insecure and rarely used. +## connection. Note that this is not redundant: the SMTP ``TURN`` command +## allows client and server to flip roles on established SMTP sessions, +## and hence a "request" might still come from the TCP-level responder. +## In practice, however, that will rarely happen as TURN is considered +## insecure and rarely used. ## ## command: The request's command, without any arguments. ## @@ -2098,16 +2124,18 @@ event smtp_request%(c: connection, is_orig: bool, command: string, arg: string%) ## connection. Note that this is not redundant: the SMTP ``TURN`` command ## allows client and server to flip roles on established SMTP sessions, ## and hence a "reply" might still come from the TCP-level originator. In -## practice, however, that will rarely happen as TURN is considered insecure -## and rarely used. +## practice, however, that will rarely happen as TURN is considered +## insecure and rarely used. ## ## code: The reply's numerical code. ## +## cmd: TODO. +## ## msg: The reply's textual description. ## -## cont_resp: True if the reply line is tagged as being continued to the next line. -## If so, further events will be raised and a handler may want to reassemle the -## pieces before processing the response any further. +## cont_resp: True if the reply line is tagged as being continued to the next +## line. If so, further events will be raised and a handler may want to +## reassemble the pieces before processing the response any further. ## ## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_event mime_one_header mime_segment_data @@ -2118,7 +2146,7 @@ event smtp_reply%(c: connection, is_orig: bool, code: count, cmd: string, msg: s ## Generated for DATA transmitted on SMTP sessions. This event is raised for ## subsequent chunks of raw data following the ``DATA`` SMTP command until the -## corresponding end marker ``.`` is seen. A handler may want to reassembly +## corresponding end marker ``.`` is seen. A handler may want to reassemble ## the pieces as they come in if stream-analysis is required. ## ## See `Wikipedia `__ @@ -2136,23 +2164,23 @@ event smtp_reply%(c: connection, is_orig: bool, code: count, cmd: string, msg: s ## mime_end_entity mime_entity_data mime_event mime_one_header mime_segment_data ## smtp_reply smtp_request skip_smtp_data ## -## .. note:: This event received the unprocessed raw data. There is a separate -## set ``mime_*`` events that strip out the outer MIME-layer of emails and provide -## structured access to their content. +## .. note:: This event receives the unprocessed raw data. There is a separate +## set of ``mime_*`` events that strip out the outer MIME-layer of emails and +## provide structured access to their content. event smtp_data%(c: connection, is_orig: bool, data: string%) &group="smtp"; -## Generated for unexpected activity on SMTP sessions. The SMTP analyzer tracks the -## state of SMTP sessions and reports commands and other activity with this event -## that it sees even though it would not expect so at the current point of the -## communication. +## Generated for unexpected activity on SMTP sessions. The SMTP analyzer tracks +## the state of SMTP sessions and reports commands and other activity with this +## event that it sees even though it would not expect so at the current point +## of the communication. ## ## See `Wikipedia `__ ## for more information about the SMTP protocol. ## ## c: The connection. ## -## is_orig: True if the sender of the unexpected activity is the originator of the -## TCP connection. +## is_orig: True if the sender of the unexpected activity is the originator of +## the TCP connection. ## ## msg: A descriptive message of what was unexpected. ## @@ -2161,14 +2189,14 @@ event smtp_data%(c: connection, is_orig: bool, data: string%) &group="smtp"; ## .. bro:see:: smtp_data smtp_request smtp_reply event smtp_unexpected%(c: connection, is_orig: bool, msg: string, detail: string%) &group="smtp"; -## Generated when starting to parse a email MIME entity. MIME is a +## Generated when starting to parse an email MIME entity. MIME is a ## protocol-independent data format for encoding text and files, along with -## corresponding meta-data, for transmission. Bro raises this event when it begin -## parsing a MIME entity extracted from an email protocol. +## corresponding metadata, for transmission. Bro raises this event when it +## begins parsing a MIME entity extracted from an email protocol. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## @@ -2176,18 +2204,18 @@ event smtp_unexpected%(c: connection, is_orig: bool, msg: string, detail: string ## mime_entity_data mime_event mime_one_header mime_segment_data smtp_data ## http_begin_entity ## -## .. note:: Bro also extracts MIME entities from HTTP session. For those, however, -## it raises :bro:id:`http_begin_entity` instead. +## .. note:: Bro also extracts MIME entities from HTTP sessions. For those, +## however, it raises :bro:id:`http_begin_entity` instead. event mime_begin_entity%(c: connection%); -## Generated when finishing parsing an email MIME entity. MIME is a +## Generated when finishing parsing an email MIME entity. MIME is a ## protocol-independent data format for encoding text and files, along with -## corresponding meta-data, for transmission. Bro raises this event when it +## corresponding metadata, for transmission. Bro raises this event when it ## finished parsing a MIME entity extracted from an email protocol. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## @@ -2195,17 +2223,17 @@ event mime_begin_entity%(c: connection%); ## mime_entity_data mime_event mime_one_header mime_segment_data smtp_data ## http_end_entity ## -## .. note:: Bro also extracts MIME entities from HTTP session. For those, however, -## it raises :bro:id:`http_end_entity` instead. +## .. note:: Bro also extracts MIME entities from HTTP sessions. For those, +## however, it raises :bro:id:`http_end_entity` instead. event mime_end_entity%(c: connection%); ## Generated for individual MIME headers extracted from email MIME ## entities. MIME is a protocol-independent data format for encoding text and -## files, along with corresponding meta-data, for transmission. +## files, along with corresponding metadata, for transmission. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## @@ -2215,44 +2243,45 @@ event mime_end_entity%(c: connection%); ## mime_end_entity mime_entity_data mime_event mime_segment_data ## http_header http_all_headers ## -## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, however, -## it raises :bro:id:`http_header` instead. +## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, +## however, it raises :bro:id:`http_header` instead. event mime_one_header%(c: connection, h: mime_header_rec%); ## Generated for MIME headers extracted from email MIME entities, passing all -## headers at once. MIME is a protocol-independent data format for encoding text -## and files, along with corresponding meta-data, for transmission. +## headers at once. MIME is a protocol-independent data format for encoding +## text and files, along with corresponding metadata, for transmission. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## ## hlist: A *table* containing all headers extracted from the current entity. -## The table is indexed by the position of the header (1 for the first, 2 for the -## second, etc.). +## The table is indexed by the position of the header (1 for the first, +## 2 for the second, etc.). ## ## .. bro:see:: mime_all_data mime_begin_entity mime_content_hash mime_end_entity ## mime_entity_data mime_event mime_one_header mime_segment_data ## http_header http_all_headers ## -## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, however, -## it raises :bro:id:`http_header` instead. +## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, +## however, it raises :bro:id:`http_header` instead. event mime_all_headers%(c: connection, hlist: mime_header_list%); -## Generated for chunks of decoded MIME data from email MIME entities. MIME +## Generated for chunks of decoded MIME data from email MIME entities. MIME ## is a protocol-independent data format for encoding text and files, along with -## corresponding meta-data, for transmission. As Bro parses the data of an entity, -## it raises a sequence of these events, each coming as soon as a new chunk of -## data is available. In contrast, there is also :bro:id:`mime_entity_data`, which -## passes all of an entities data at once in a single block. While the latter is -## more convinient to handle, ``mime_segment_data`` is more efficient as Bro does -## not need to buffer the data. Thus, if possible, this event should be prefered. +## corresponding metadata, for transmission. As Bro parses the data of an +## entity, it raises a sequence of these events, each coming as soon as a new +## chunk of data is available. In contrast, there is also +## :bro:id:`mime_entity_data`, which passes all of an entities data at once +## in a single block. While the latter is more convenient to handle, +## ``mime_segment_data`` is more efficient as Bro does not need to buffer +## the data. Thus, if possible, this event should be preferred. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## @@ -2264,20 +2293,20 @@ event mime_all_headers%(c: connection, hlist: mime_header_list%); ## mime_end_entity mime_entity_data mime_event mime_one_header http_entity_data ## mime_segment_length mime_segment_overlap_length ## -## .. note:: Bro also extracts MIME data from HTTP sessions. For those, however, it -## raises :bro:id:`http_entity_data` (sic!) instead. +## .. note:: Bro also extracts MIME data from HTTP sessions. For those, +## however, it raises :bro:id:`http_entity_data` (sic!) instead. event mime_segment_data%(c: connection, length: count, data: string%); ## Generated for data decoded from an email MIME entity. This event delivers ## the complete content of a single MIME entity. In contrast, there is also ## :bro:id:`mime_segment_data`, which passes on a sequence of data chunks as -## they. come in. While ``mime_entity_data`` is more convinient to handle, -## ``mime_segment_data`` is more efficient as Bro does not need to buffer the data. -## Thus, if possible, the latter should be prefered. +## they come in. While ``mime_entity_data`` is more convenient to handle, +## ``mime_segment_data`` is more efficient as Bro does not need to buffer the +## data. Thus, if possible, the latter should be preferred. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## @@ -2292,15 +2321,15 @@ event mime_segment_data%(c: connection, length: count, data: string%); ## sessions, there's no corresponding event for that currently. event mime_entity_data%(c: connection, length: count, data: string%); -## Generated for passing on all data decoded from an single email MIME +## Generated for passing on all data decoded from a single email MIME ## message. If an email message has more than one MIME entity, this event ## combines all their data into a single value for analysis. Note that because ## of the potentially significant buffering necessary, using this event can be ## expensive. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## @@ -2315,35 +2344,37 @@ event mime_entity_data%(c: connection, length: count, data: string%); ## sessions, there's no corresponding event for that currently. event mime_all_data%(c: connection, length: count, data: string%); -## Generated for errors found when decoding email MIME entities. +## Generated for errors found when decoding email MIME entities. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## -## event_type: A string describing the general category of the problem found (e.g., -## ``illegal format``). +## c: The connection. +## +## event_type: A string describing the general category of the problem found +## (e.g., ``illegal format``). ## ## detail: Further more detailed description of the error. ## ## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_one_header mime_segment_data http_event ## -## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, however, -## it raises :bro:id:`http_event` instead. +## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, +## however, it raises :bro:id:`http_event` instead. event mime_event%(c: connection, event_type: string, detail: string%); -## Generated for decoded MIME entities extracted from email meessage, passing on +## Generated for decoded MIME entities extracted from email messages, passing on ## their MD5 checksums. Bro computes the MD5 over the complete decoded data of ## each MIME entity. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See `Wikipedia -## `__ for more information about the ARP -## protocol. +## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## `Wikipedia `__ for more information +## about MIME. ## ## c: The connection. ## -## content_len: The length of entity being hashed. +## content_len: The length of the entity being hashed. ## ## hash_value: The MD5 hash. ## @@ -2355,15 +2386,15 @@ event mime_event%(c: connection, event_type: string, detail: string%); event mime_content_hash%(c: connection, content_len: count, hash_value: string%); ## Generated for RPC request/reply *pairs*. The RPC analyzer associates request -## and reply by their transactions identifiers and raise this event once both -## have been seen. If there's not reply, the will still be generated eventually -## on timeout. In that case, *status* will be set to :bro:enum:`RPC_TIMEOUT`. +## and reply by their transaction identifiers and raises this event once both +## have been seen. If there's not a reply, this event will still be generated +## eventually on timeout. In that case, *status* will be set to +## :bro:enum:`RPC_TIMEOUT`. ## ## See `Wikipedia `__ for more information ## about the ONC RPC protocol. -## c: The connection. ## -## xid: The transaction identifier allowing to match requests with replies. +## c: The connection. ## ## prog: The remote program to call. ## @@ -2374,7 +2405,7 @@ event mime_content_hash%(c: connection, content_len: count, hash_value: string%) ## status: The status of the reply, which should be one of the index values of ## :bro:id:`RPC_status`. ## -## start_time: Then time when the *call* was seen. +## start_time: The time when the *call* was seen. ## ## call_len: The size of the *call_body* PDU. ## @@ -2441,7 +2472,8 @@ event rpc_reply%(c: connection, xid: count, status: rpc_status, reply_len: count ## Generated for Portmapper requests of type *null*. ## ## Portmapper is a service running on top of RPC. See `Wikipedia -## `__ for more information about the service. +## `__ for more information about the +## service. ## ## r: The RPC connection. ## @@ -2459,14 +2491,16 @@ event pm_request_null%(r: connection%); ## Generated for Portmapper request/reply dialogues of type *set*. ## ## Portmapper is a service running on top of RPC. See `Wikipedia -## `__ for more information about the service. +## `__ for more information about the +## service. ## ## r: The RPC connection. ## ## m: The argument to the request. ## ## success: True if the request was successful, according to the corresponding -## reply. If no reply was seen, this will be false once the request times out. +## reply. If no reply was seen, this will be false once the request +## times out. ## ## .. bro:see:: epm_map_response pm_attempt_callit pm_attempt_dump pm_attempt_getport ## pm_attempt_null pm_attempt_set pm_attempt_unset pm_bad_port pm_request_callit @@ -2482,14 +2516,16 @@ event pm_request_set%(r: connection, m: pm_mapping, success: bool%); ## Generated for Portmapper request/reply dialogues of type *unset*. ## ## Portmapper is a service running on top of RPC. See `Wikipedia -## `__ for more information about the service. +## `__ for more information about the +## service. ## ## r: The RPC connection. ## ## m: The argument to the request. ## ## success: True if the request was successful, according to the corresponding -## reply. If no reply was seen, this will be false once the request times out. +## reply. If no reply was seen, this will be false once the request +## times out. ## ## .. bro:see:: epm_map_response pm_attempt_callit pm_attempt_dump pm_attempt_getport ## pm_attempt_null pm_attempt_set pm_attempt_unset pm_bad_port pm_request_callit @@ -2505,7 +2541,8 @@ event pm_request_unset%(r: connection, m: pm_mapping, success: bool%); ## Generated for Portmapper request/reply dialogues of type *getport*. ## ## Portmapper is a service running on top of RPC. See `Wikipedia -## `__ for more information about the service. +## `__ for more information about the +## service. ## ## r: The RPC connection. ## @@ -2513,9 +2550,6 @@ event pm_request_unset%(r: connection, m: pm_mapping, success: bool%); ## ## p: The port returned by the server. ## -## success: True if the request was successful, according to the corresponding -## reply. If no reply was seen, this will be false once the request times out. -## ## .. bro:see:: epm_map_response pm_attempt_callit pm_attempt_dump pm_attempt_getport ## pm_attempt_null pm_attempt_set pm_attempt_unset pm_bad_port pm_request_callit ## pm_request_dump pm_request_null pm_request_set pm_request_unset rpc_call @@ -2530,7 +2564,8 @@ event pm_request_getport%(r: connection, pr: pm_port_request, p: port%); ## Generated for Portmapper request/reply dialogues of type *dump*. ## ## Portmapper is a service running on top of RPC. See `Wikipedia -## `__ for more information about the service. +## `__ for more information about the +## service. ## ## r: The RPC connection. ## @@ -2555,7 +2590,7 @@ event pm_request_dump%(r: connection, m: pm_mappings%); ## ## r: The RPC connection. ## -## m: The argument to the request. +## call: The argument to the request. ## ## p: The port value returned by the call. ## @@ -2711,9 +2746,9 @@ event pm_attempt_dump%(r: connection, status: rpc_status%); event pm_attempt_callit%(r: connection, status: rpc_status, call: pm_callit_request%); ## Generated for Portmapper requests or replies that include an invalid port -## number. Since ports are represented by unsigned 4-byte integers, they can stray -## outside the allowed range of 0--65535 by being >= 65536. If so, this event is -## generated. +## number. Since ports are represented by unsigned 4-byte integers, they can +## stray outside the allowed range of 0--65535 by being >= 65536. If so, this +## event is generated. ## ## Portmapper is a service running on top of RPC. See `Wikipedia ## `__ for more information about the @@ -2735,8 +2770,8 @@ event pm_attempt_callit%(r: connection, status: rpc_status, call: pm_callit_requ event pm_bad_port%(r: connection, bad_p: count%); ## Generated for NFSv3 request/reply dialogues of type *null*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2758,8 +2793,8 @@ event pm_bad_port%(r: connection, bad_p: count%); event nfs_proc_null%(c: connection, info: NFS3::info_t%); ## Generated for NFSv3 request/reply dialogues of type *getattr*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2769,8 +2804,10 @@ event nfs_proc_null%(c: connection, info: NFS3::info_t%); ## ## info: Reports the status of the dialogue, along with some meta information. ## -## attr: The attributes returned in the reply. The values may not be valid if the -## request was unsuccessful. +## fh: TODO. +## +## attrs: The attributes returned in the reply. The values may not be valid if +## the request was unsuccessful. ## ## .. bro:see:: nfs_proc_create nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir @@ -2784,8 +2821,8 @@ event nfs_proc_null%(c: connection, info: NFS3::info_t%); event nfs_proc_getattr%(c: connection, info: NFS3::info_t, fh: string, attrs: NFS3::fattr_t%); ## Generated for NFSv3 request/reply dialogues of type *lookup*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2812,8 +2849,8 @@ event nfs_proc_getattr%(c: connection, info: NFS3::info_t, fh: string, attrs: NF event nfs_proc_lookup%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::lookup_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *read*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2840,8 +2877,8 @@ event nfs_proc_lookup%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t event nfs_proc_read%(c: connection, info: NFS3::info_t, req: NFS3::readargs_t, rep: NFS3::read_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *readlink*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2868,8 +2905,8 @@ event nfs_proc_read%(c: connection, info: NFS3::info_t, req: NFS3::readargs_t, r event nfs_proc_readlink%(c: connection, info: NFS3::info_t, fh: string, rep: NFS3::readlink_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *write*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2879,7 +2916,7 @@ event nfs_proc_readlink%(c: connection, info: NFS3::info_t, fh: string, rep: NFS ## ## info: Reports the status of the dialogue, along with some meta information. ## -## fh: The file handle passed in the request. +## req: TODO. ## ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. @@ -2897,8 +2934,8 @@ event nfs_proc_readlink%(c: connection, info: NFS3::info_t, fh: string, rep: NFS event nfs_proc_write%(c: connection, info: NFS3::info_t, req: NFS3::writeargs_t, rep: NFS3::write_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *create*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2908,7 +2945,7 @@ event nfs_proc_write%(c: connection, info: NFS3::info_t, req: NFS3::writeargs_t, ## ## info: Reports the status of the dialogue, along with some meta information. ## -## fh: The file handle passed in the request. +## req: TODO. ## ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. @@ -2925,8 +2962,8 @@ event nfs_proc_write%(c: connection, info: NFS3::info_t, req: NFS3::writeargs_t, event nfs_proc_create%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::newobj_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *mkdir*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2936,7 +2973,7 @@ event nfs_proc_create%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t ## ## info: Reports the status of the dialogue, along with some meta information. ## -## fh: The file handle passed in the request. +## req: TODO. ## ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. @@ -2953,8 +2990,8 @@ event nfs_proc_create%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t event nfs_proc_mkdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::newobj_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *remove*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2964,7 +3001,7 @@ event nfs_proc_mkdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, ## ## info: Reports the status of the dialogue, along with some meta information. ## -## fh: The file handle passed in the request. +## req: TODO. ## ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. @@ -2981,8 +3018,8 @@ event nfs_proc_mkdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, event nfs_proc_remove%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::delobj_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *rmdir*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -2992,7 +3029,7 @@ event nfs_proc_remove%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t ## ## info: Reports the status of the dialogue, along with some meta information. ## -## fh: The file handle passed in the request. +## req: TODO. ## ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. @@ -3009,8 +3046,8 @@ event nfs_proc_remove%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t event nfs_proc_rmdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::delobj_reply_t%); ## Generated for NFSv3 request/reply dialogues of type *readdir*. The event is -## generated once we have either seen both the request and its corresponding reply, -## or an unanswered request has timed out. +## generated once we have either seen both the request and its corresponding +## reply, or an unanswered request has timed out. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -3020,7 +3057,7 @@ event nfs_proc_rmdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, ## ## info: Reports the status of the dialogue, along with some meta information. ## -## fh: The file handle passed in the request. +## req: TODO. ## ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. @@ -3036,8 +3073,8 @@ event nfs_proc_rmdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event nfs_proc_readdir%(c: connection, info: NFS3::info_t, req: NFS3::readdirargs_t, rep: NFS3::readdir_reply_t%); -## Generated for NFS3 request/reply dialogues of a type that Bro's NFS3 analyzer -## does not implement. +## Generated for NFSv3 request/reply dialogues of a type that Bro's NFSv3 +## analyzer does not implement. ## ## NFS is a service running on top of RPC. See `Wikipedia ## `__ for more @@ -3059,9 +3096,11 @@ event nfs_proc_readdir%(c: connection, info: NFS3::info_t, req: NFS3::readdirarg ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event nfs_proc_not_implemented%(c: connection, info: NFS3::info_t, proc: NFS3::proc_t%); -## Generated for each NFS3 reply message received, reporting just the +## Generated for each NFSv3 reply message received, reporting just the ## status included. ## +## n: The connection. +## ## info: Reports the status included in the reply. ## ## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir @@ -3075,11 +3114,11 @@ event nfs_proc_not_implemented%(c: connection, info: NFS3::info_t, proc: NFS3::p ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event nfs_reply_status%(n: connection, info: NFS3::info_t%); -## Generated for all NTP messages. Different from many other of Bro's events, this -## one is generated for both client-side and server-side messages. +## Generated for all NTP messages. Different from many other of Bro's events, +## this one is generated for both client-side and server-side messages. ## -## See `Wikipedia `__ for more -## information about the NTP protocol. +## See `Wikipedia `__ for +## more information about the NTP protocol. ## ## u: The connection record describing the corresponding UDP flow. ## @@ -3096,21 +3135,21 @@ event nfs_reply_status%(n: connection, info: NFS3::info_t%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event ntp_message%(u: connection, msg: ntp_msg, excess: string%); -## Generated for all NetBIOS SSN and DGM messages. Bro's NetBIOS analyzer processes -## the NetBIOS session service running on TCP port 139, and (despite its name!) the -## NetBIOS datagram service on UDP port 138. +## Generated for all NetBIOS SSN and DGM messages. Bro's NetBIOS analyzer +## processes the NetBIOS session service running on TCP port 139, and (despite +## its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. `RFC 1002 `__ describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## -## c: The connection, which may be a TCP or UDP, depending on the type of the +## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. ## ## is_orig: True if the message was sent by the originator of the connection. ## -## msg_type: The general type of message, as defined in Section 4.3.1 of `RFC 1002 -## `__. +## msg_type: The general type of message, as defined in Section 4.3.1 of +## `RFC 1002 `__. ## ## data_len: The length of the message's payload. ## @@ -3119,8 +3158,8 @@ event ntp_message%(u: connection, msg: ntp_msg, excess: string%); ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's SMB -## anlyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. +## `SMB/CIFS `__. Bro's +## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -3128,15 +3167,15 @@ event ntp_message%(u: connection, msg: ntp_msg, excess: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event netbios_session_message%(c: connection, is_orig: bool, msg_type: count, data_len: count%); -## Generated for NetBIOS messages of type *session request*. Bro's NetBIOS analyzer -## processes the NetBIOS session service running on TCP port 139, and (despite its -## name!) the NetBIOS datagram service on UDP port 138. +## Generated for NetBIOS messages of type *session request*. Bro's NetBIOS +## analyzer processes the NetBIOS session service running on TCP port 139, and +## (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. `RFC 1002 `__ describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## -## c: The connection, which may be a TCP or UDP, depending on the type of the +## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. ## ## msg: The raw payload of the message sent, excluding the common NetBIOS @@ -3147,8 +3186,8 @@ event netbios_session_message%(c: connection, is_orig: bool, msg_type: count, da ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's SMB -## anlyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. +## `SMB/CIFS `__. Bro's +## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -3157,14 +3196,14 @@ event netbios_session_message%(c: connection, is_orig: bool, msg_type: count, da event netbios_session_request%(c: connection, msg: string%); ## Generated for NetBIOS messages of type *positive session response*. Bro's -## NetBIOS analyzer processes the NetBIOS session service running on TCP port 139, -## and (despite its name!) the NetBIOS datagram service on UDP port 138. +## NetBIOS analyzer processes the NetBIOS session service running on TCP port +## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. `RFC 1002 `__ describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## -## c: The connection, which may be a TCP or UDP, depending on the type of the +## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. ## ## msg: The raw payload of the message sent, excluding the common NetBIOS @@ -3175,8 +3214,8 @@ event netbios_session_request%(c: connection, msg: string%); ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's SMB -## anlyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. +## `SMB/CIFS `__. Bro's +## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -3185,14 +3224,14 @@ event netbios_session_request%(c: connection, msg: string%); event netbios_session_accepted%(c: connection, msg: string%); ## Generated for NetBIOS messages of type *negative session response*. Bro's -## NetBIOS analyzer processes the NetBIOS session service running on TCP port 139, -## and (despite its name!) the NetBIOS datagram service on UDP port 138. +## NetBIOS analyzer processes the NetBIOS session service running on TCP port +## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. `RFC 1002 `__ describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## -## c: The connection, which may be a TCP or UDP, depending on the type of the +## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. ## ## msg: The raw payload of the message sent, excluding the common NetBIOS @@ -3203,8 +3242,8 @@ event netbios_session_accepted%(c: connection, msg: string%); ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's SMB -## anlyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. +## `SMB/CIFS `__. Bro's +## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -3212,17 +3251,17 @@ event netbios_session_accepted%(c: connection, msg: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event netbios_session_rejected%(c: connection, msg: string%); -## Generated for NetBIOS message of type *session message* that are not carrying -## SMB payload. +## Generated for NetBIOS messages of type *session message* that are not +## carrying an SMB payload. ## -## NetBIOS analyzer processes the NetBIOS session service running on TCP port 139, -## and (despite its name!) the NetBIOS datagram service on UDP port 138. +## NetBIOS analyzer processes the NetBIOS session service running on TCP port +## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. `RFC 1002 `__ describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## -## c: The connection, which may be a TCP or UDP, depending on the type of the +## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. ## ## is_orig: True if the message was sent by the originator of the connection. @@ -3235,11 +3274,11 @@ event netbios_session_rejected%(c: connection, msg: string%); ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's SMB -## anlyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. +## `SMB/CIFS `__. Bro's +## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## -## .. todo:: This is an oddly named event. In fact, it's probably an odd event to -## have to begin with. +## .. todo:: This is an oddly named event. In fact, it's probably an odd event +## to have to begin with. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -3255,20 +3294,21 @@ event netbios_session_raw_message%(c: connection, is_orig: bool, msg: string%); ## about NetBIOS. `RFC 1002 `__ describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## -## c: The connection, which may be a TCP or UDP, depending on the type of the +## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. ## -## msg: The raw payload of the message sent, excluding the common NetBIOS header. +## msg: The raw payload of the message sent, excluding the common NetBIOS +## header. ## ## .. bro:see:: netbios_session_accepted netbios_session_keepalive ## netbios_session_message netbios_session_raw_message netbios_session_rejected ## netbios_session_request decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's SMB -## anlyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. +## `SMB/CIFS `__. Bro's +## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## -## .. todo: This is an oddly named event. +## .. todo:: This is an oddly named event. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -3277,25 +3317,26 @@ event netbios_session_raw_message%(c: connection, is_orig: bool, msg: string%); event netbios_session_ret_arg_resp%(c: connection, msg: string%); ## Generated for NetBIOS messages of type *keep-alive*. Bro's NetBIOS analyzer -## processes the NetBIOS session service running on TCP port 139, and (despite its -## name!) the NetBIOS datagram service on UDP port 138. +## processes the NetBIOS session service running on TCP port 139, and (despite +## its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. `RFC 1002 `__ describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## -## c: The connection, which may be a TCP or UDP, depending on the type of the +## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. ## -## msg: The raw payload of the message sent, excluding the common NetBIOS header. +## msg: The raw payload of the message sent, excluding the common NetBIOS +## header. ## ## .. bro:see:: netbios_session_accepted netbios_session_message ## netbios_session_raw_message netbios_session_rejected netbios_session_request ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's SMB -## anlyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. +## `SMB/CIFS `__. Bro's +## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -3305,9 +3346,9 @@ event netbios_session_keepalive%(c: connection, msg: string%); ## Generated for all SMB/CIFS messages. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3316,7 +3357,7 @@ event netbios_session_keepalive%(c: connection, msg: string%); ## is_orig: True if the message was sent by the originator of the underlying ## transport-level connection. ## -## cmd: A string mmenonic of the SMB command code. +## cmd: A string mnemonic of the SMB command code. ## ## body_length: The length of the SMB message body, i.e. the data starting after ## the SMB header. @@ -3338,15 +3379,15 @@ event smb_message%(c: connection, hdr: smb_hdr, is_orig: bool, cmd: string, body ## Generated for SMB/CIFS messages of type *tree connect andx*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## ## hdr: The parsed header of the SMB message. ## -## path: The ``path`` attribute specified in the message. +## path: The ``path`` attribute specified in the message. ## ## service: The ``service`` attribute specified in the message. ## @@ -3365,16 +3406,14 @@ event smb_com_tree_connect_andx%(c: connection, hdr: smb_hdr, path: string, serv ## Generated for SMB/CIFS messages of type *tree disconnect*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## ## hdr: The parsed header of the SMB message. ## -## path: The ``path`` attribute specified in the message. -## ## .. bro:see:: smb_com_close smb_com_generic_andx smb_com_logoff_andx ## smb_com_negotiate smb_com_negotiate_response smb_com_nt_create_andx ## smb_com_read_andx smb_com_setup_andx smb_com_trans_mailslot @@ -3390,15 +3429,15 @@ event smb_com_tree_disconnect%(c: connection, hdr: smb_hdr%); ## Generated for SMB/CIFS messages of type *nt create andx*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## ## hdr: The parsed header of the SMB message. ## -## name: The ``name`` attribute specified in the message. +## name: The ``name`` attribute specified in the message. ## ## .. bro:see:: smb_com_close smb_com_generic_andx smb_com_logoff_andx ## smb_com_negotiate smb_com_negotiate_response smb_com_read_andx @@ -3415,9 +3454,9 @@ event smb_com_nt_create_andx%(c: connection, hdr: smb_hdr, name: string%); ## Generated for SMB/CIFS messages of type *nt transaction*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3444,9 +3483,9 @@ event smb_com_transaction%(c: connection, hdr: smb_hdr, trans: smb_trans, data: ## Generated for SMB/CIFS messages of type *nt transaction 2*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3473,9 +3512,9 @@ event smb_com_transaction2%(c: connection, hdr: smb_hdr, trans: smb_trans, data: ## Generated for SMB/CIFS messages of type *transaction mailslot*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3502,9 +3541,9 @@ event smb_com_trans_mailslot%(c: connection, hdr: smb_hdr, trans: smb_trans, dat ## Generated for SMB/CIFS messages of type *transaction rap*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3531,9 +3570,9 @@ event smb_com_trans_rap%(c: connection, hdr: smb_hdr, trans: smb_trans, data: sm ## Generated for SMB/CIFS messages of type *transaction pipe*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3560,9 +3599,9 @@ event smb_com_trans_pipe%(c: connection, hdr: smb_hdr, trans: smb_trans, data: s ## Generated for SMB/CIFS messages of type *read andx*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3570,8 +3609,6 @@ event smb_com_trans_pipe%(c: connection, hdr: smb_hdr, trans: smb_trans, data: s ## ## data: Always empty. ## -## is_orig: True if the message was sent by the originator of the connection. -## ## .. bro:see:: smb_com_close smb_com_generic_andx smb_com_logoff_andx ## smb_com_negotiate smb_com_negotiate_response smb_com_nt_create_andx ## smb_com_setup_andx smb_com_trans_mailslot smb_com_trans_pipe smb_com_trans_rap @@ -3587,9 +3624,9 @@ event smb_com_read_andx%(c: connection, hdr: smb_hdr, data: string%); ## Generated for SMB/CIFS messages of type *read andx*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3597,8 +3634,6 @@ event smb_com_read_andx%(c: connection, hdr: smb_hdr, data: string%); ## ## data: Always empty. ## -## is_orig: True if the message was sent by the originator of the connection. -## ## .. bro:see:: smb_com_close smb_com_generic_andx smb_com_logoff_andx ## smb_com_negotiate smb_com_negotiate_response smb_com_nt_create_andx ## smb_com_read_andx smb_com_setup_andx smb_com_trans_mailslot @@ -3614,9 +3649,9 @@ event smb_com_write_andx%(c: connection, hdr: smb_hdr, data: string%); ## Generated for SMB/CIFS messages of type *get dfs referral*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3642,9 +3677,9 @@ event smb_get_dfs_referral%(c: connection, hdr: smb_hdr, max_referral_level: cou ## Generated for SMB/CIFS messages of type *negotiate*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3664,9 +3699,9 @@ event smb_com_negotiate%(c: connection, hdr: smb_hdr%); ## Generated for SMB/CIFS messages of type *negotiate response*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3688,9 +3723,9 @@ event smb_com_negotiate_response%(c: connection, hdr: smb_hdr, dialect_index: co ## Generated for SMB/CIFS messages of type *setup andx*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3711,9 +3746,9 @@ event smb_com_setup_andx%(c: connection, hdr: smb_hdr%); ## Generated for SMB/CIFS messages of type *generic andx*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3734,9 +3769,9 @@ event smb_com_generic_andx%(c: connection, hdr: smb_hdr%); ## Generated for SMB/CIFS messages of type *close*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3757,9 +3792,9 @@ event smb_com_close%(c: connection, hdr: smb_hdr%); ## Generated for SMB/CIFS messages of type *logoff andx*. ## -## See `Wikipedia `__ for more -## information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses both -## SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. +## See `Wikipedia `__ for +## more information about the SMB/CIFS protocol. Bro's SMB/CIFS analyzer parses +## both SMB-over-NetBIOS on ports 138/139 and SMB-over-TCP on port 445. ## ## c: The connection. ## @@ -3778,8 +3813,8 @@ event smb_com_close%(c: connection, hdr: smb_hdr%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event smb_com_logoff_andx%(c: connection, hdr: smb_hdr%); -## Generated for SMB/CIFS messages that indicate an error. This event is triggered -## by an SMB header including a status that signals an error. +## Generated for SMB/CIFS messages that indicate an error. This event is +## triggered by an SMB header including a status that signals an error. ## ## c: The connection. ## @@ -3787,9 +3822,9 @@ event smb_com_logoff_andx%(c: connection, hdr: smb_hdr%); ## ## cmd: The SMB command code. ## -## cmd_str: A string mmenonic of the SMB command code. +## cmd_str: A string mnemonic of the SMB command code. ## -## body: The raw SMB message body, i.e., the data starting after the SMB header. +## data: The raw SMB message body, i.e., the data starting after the SMB header. ## ## .. bro:see:: smb_com_close smb_com_generic_andx smb_com_logoff_andx ## smb_com_negotiate smb_com_negotiate_response smb_com_nt_create_andx @@ -3807,7 +3842,8 @@ event smb_error%(c: connection, hdr: smb_hdr, cmd: count, cmd_str: string, data: ## Generated for all DNS messages. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3816,7 +3852,7 @@ event smb_error%(c: connection, hdr: smb_hdr, cmd: count, cmd_str: string, data: ## ## msg: The parsed DNS message header. ## -## len: The length of the message's raw representation (i.e, the DNS payload). +## len: The length of the message's raw representation (i.e., the DNS payload). ## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply @@ -3831,7 +3867,8 @@ event dns_message%(c: connection, is_orig: bool, msg: dns_msg, len: count%) &gro ## is raised once for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3855,11 +3892,12 @@ event dns_request%(c: connection, msg: dns_msg, query: string, qtype: count, qcl ## Generated for DNS replies that reject a query. This event is raised if a DNS ## reply either indicates failure via its status code or does not pass on any -## answers to a query. Note that all of the event's paramaters are parsed out of +## answers to a query. Note that all of the event's parameters are parsed out of ## the reply; there's no stateful correlation with the query. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3884,7 +3922,8 @@ event dns_rejected%(c: connection, msg: dns_msg, query: string, qtype: count, qc ## Generated for DNS replies with an *ok* status code but no question section. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3907,7 +3946,7 @@ event dns_rejected%(c: connection, msg: dns_msg, query: string, qtype: count, qc event dns_query_reply%(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count%) &group="dns"; -## Generated when the DNS analyzer processes what seems to be a non-DNS packets. +## Generated when the DNS analyzer processes what seems to be a non-DNS packet. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3922,7 +3961,8 @@ event non_dns_request%(c: connection, msg: string%) &group="dns"; ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3942,11 +3982,12 @@ event non_dns_request%(c: connection, msg: string%) &group="dns"; ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_A_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%) &group="dns"; -## Generated for DNS replies of type *AAAA*. For replies with multiple answers, an -## individual event of the corresponding type is raised for each. +## Generated for DNS replies of type *AAAA*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3970,7 +4011,8 @@ event dns_AAAA_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%) &g ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -3994,7 +4036,8 @@ event dns_A6_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%) &gro ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4018,7 +4061,8 @@ event dns_NS_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string%) ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4042,7 +4086,8 @@ event dns_CNAME_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: strin ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4066,7 +4111,8 @@ event dns_PTR_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string% ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4075,7 +4121,7 @@ event dns_PTR_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string% ## ## ans: The type-independent part of the parsed answer record. ## -## soa: The parsed SOA value +## soa: The parsed SOA value. ## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SRV_reply @@ -4086,11 +4132,12 @@ event dns_PTR_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string% ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_SOA_reply%(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa%) &group="dns"; -## Generated for DNS replies of type *WKS*. For replies with multiple answers, an -## individual event of the corresponding type is raised for each. +## Generated for DNS replies of type *WKS*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4108,11 +4155,12 @@ event dns_SOA_reply%(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa% ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_WKS_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &group="dns"; -## Generated for DNS replies of type *HINFO*. For replies with multiple answers, an -## individual event of the corresponding type is raised for each. +## Generated for DNS replies of type *HINFO*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4121,8 +4169,6 @@ event dns_WKS_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &group="dns" ## ## ans: The type-independent part of the parsed answer record. ## -## name: The name returned by the reply. -## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl dns_MX_reply ## dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply dns_TSIG_addl ## dns_TXT_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered @@ -4136,7 +4182,8 @@ event dns_HINFO_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &group="dn ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4147,7 +4194,7 @@ event dns_HINFO_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &group="dn ## ## name: The name returned by the reply. ## -## preference: The preference for *name* specificed by the reply. +## preference: The preference for *name* specified by the reply. ## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply @@ -4158,11 +4205,12 @@ event dns_HINFO_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &group="dn ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_MX_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string, preference: count%) &group="dns"; -## Generated for DNS replies of type *TXT*. For replies with multiple answers, an -## individual event of the corresponding type is raised for each. +## Generated for DNS replies of type *TXT*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4182,11 +4230,12 @@ event dns_MX_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string, ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_TXT_reply%(c: connection, msg: dns_msg, ans: dns_answer, str: string%) &group="dns"; -## Generated for DNS replies of type *SRV*. For replies with multiple answers, an -## individual event of the corresponding type is raised for each. +## Generated for DNS replies of type *SRV*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4204,11 +4253,12 @@ event dns_TXT_reply%(c: connection, msg: dns_msg, ans: dns_answer, str: string%) ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_SRV_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &group="dns"; -## Generated for DNS replies of type *EDNS*. For replies with multiple answers, an -## individual event of the corresponding type is raised for each. +## Generated for DNS replies of type *EDNS*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4226,11 +4276,12 @@ event dns_SRV_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &group="dns" ## dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_EDNS_addl%(c: connection, msg: dns_msg, ans: dns_edns_additional%) &group="dns"; -## Generated for DNS replies of type *TSIG*. For replies with multiple answers, an -## individual event of the corresponding type is raised for each. +## Generated for DNS replies of type *TSIG*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4253,7 +4304,8 @@ event dns_TSIG_addl%(c: connection, msg: dns_msg, ans: dns_tsig_additional%) &gr ## all resource records have been passed on. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS sessions. +## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -4272,12 +4324,12 @@ event dns_end%(c: connection, msg: dns_msg%) &group="dns"; ## Generated for DHCP messages of type *discover*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## msg: The parsed type-indepedent part of the DHCP message. +## msg: The parsed type-independent part of the DHCP message. ## ## req_addr: The specific address requested by the client. ## @@ -4289,9 +4341,9 @@ event dns_end%(c: connection, msg: dns_msg%) &group="dns"; ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4302,16 +4354,18 @@ event dhcp_discover%(c: connection, msg: dhcp_msg, req_addr: addr%); ## Generated for DHCP messages of type *offer*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## mask: The subnet mask specified by the mesage. +## msg: TODO. +## +## mask: The subnet mask specified by the message. ## ## router: The list of routers specified by the message. ## -## lease: The least interval specificed by the message. +## lease: The least interval specified by the message. ## ## serv_addr: The server address specified by the message. ## @@ -4322,9 +4376,9 @@ event dhcp_discover%(c: connection, msg: dhcp_msg, req_addr: addr%); ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4335,12 +4389,12 @@ event dhcp_offer%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_ ## Generated for DHCP messages of type *request*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## msg: The parsed type-indepedent part of the DHCP message. +## msg: The parsed type-independent part of the DHCP message. ## ## req_addr: The client address specified by the message. ## @@ -4353,9 +4407,9 @@ event dhcp_offer%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_ ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4366,12 +4420,12 @@ event dhcp_request%(c: connection, msg: dhcp_msg, req_addr: addr, serv_addr: add ## Generated for DHCP messages of type *decline*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## msg: The parsed type-indepedent part of the DHCP message. +## msg: The parsed type-independent part of the DHCP message. ## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply @@ -4380,9 +4434,9 @@ event dhcp_request%(c: connection, msg: dhcp_msg, req_addr: addr, serv_addr: add ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4393,18 +4447,18 @@ event dhcp_decline%(c: connection, msg: dhcp_msg%); ## Generated for DHCP messages of type *acknowledgment*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## msg: The parsed type-indepedent part of the DHCP message. +## msg: The parsed type-independent part of the DHCP message. ## -## mask: The subnet mask specified by the mesage. +## mask: The subnet mask specified by the message. ## ## router: The list of routers specified by the message. ## -## lease: The least interval specificed by the message. +## lease: The least interval specified by the message. ## ## serv_addr: The server address specified by the message. ## @@ -4415,9 +4469,9 @@ event dhcp_decline%(c: connection, msg: dhcp_msg%); ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4428,12 +4482,12 @@ event dhcp_ack%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_li ## Generated for DHCP messages of type *negative acknowledgment*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## msg: The parsed type-indepedent part of the DHCP message. +## msg: The parsed type-independent part of the DHCP message. ## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply @@ -4442,9 +4496,9 @@ event dhcp_ack%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_li ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4455,12 +4509,12 @@ event dhcp_nak%(c: connection, msg: dhcp_msg%); ## Generated for DHCP messages of type *release*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## msg: The parsed type-indepedent part of the DHCP message. +## msg: The parsed type-independent part of the DHCP message. ## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply @@ -4469,9 +4523,9 @@ event dhcp_nak%(c: connection, msg: dhcp_msg%); ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4482,12 +4536,12 @@ event dhcp_release%(c: connection, msg: dhcp_msg%); ## Generated for DHCP messages of type *inform*. ## ## See `Wikipedia -## `__ for more -## information about the DHCP protocol. +## `__ for +## more information about the DHCP protocol. ## -## c: The connection record describing the underlying UDP flow.. +## c: The connection record describing the underlying UDP flow. ## -## msg: The parsed type-indepedent part of the DHCP message. +## msg: The parsed type-independent part of the DHCP message. ## ## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply @@ -4496,9 +4550,9 @@ event dhcp_release%(c: connection, msg: dhcp_msg%); ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request ## -## .. note: Bro does not support broadcast packets (as used by the DHCP protocol). -## It treats broadcast addresses just like any other and associates packets into -## transport-level flows in the same way as usual. +## .. note:: Bro does not support broadcast packets (as used by the DHCP +## protocol). It treats broadcast addresses just like any other and +## associates packets into transport-level flows in the same way as usual. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -4506,13 +4560,13 @@ event dhcp_release%(c: connection, msg: dhcp_msg%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event dhcp_inform%(c: connection, msg: dhcp_msg%); -## Generated for HTTP requests. Bro supports persistent and pipelined HTTP sessions -## and raises corresponding events as it parses client/server dialogues. This event -## is generated as soon as a request's initial line has been parsed, and before any -## :bro:id:`http_header` events are raised. +## Generated for HTTP requests. Bro supports persistent and pipelined HTTP +## sessions and raises corresponding events as it parses client/server +## dialogues. This event is generated as soon as a request's initial line has +## been parsed, and before any :bro:id:`http_header` events are raised. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## @@ -4529,13 +4583,13 @@ event dhcp_inform%(c: connection, msg: dhcp_msg%); ## truncate_http_URI event http_request%(c: connection, method: string, original_URI: string, unescaped_URI: string, version: string%) &group="http-request"; -## Generated for HTTP replies. Bro supports persistent and pipelined HTTP sessions -## and raises corresponding events as it parses client/server dialogues. This event -## is generated as soon as a reply's initial line has been parsed, and before any -## :bro:id:`http_header` events are raised. +## Generated for HTTP replies. Bro supports persistent and pipelined HTTP +## sessions and raises corresponding events as it parses client/server +## dialogues. This event is generated as soon as a reply's initial line has +## been parsed, and before any :bro:id:`http_header` events are raised. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## @@ -4550,11 +4604,12 @@ event http_request%(c: connection, method: string, original_URI: string, unescap ## http_stats event http_reply%(c: connection, version: string, code: count, reason: string%) &group="http-reply"; -## Generated for HTTP headers. Bro supports persistent and pipelined HTTP sessions -## and raises corresponding events as it parses client/server dialogues. +## Generated for HTTP headers. Bro supports persistent and pipelined HTTP +## sessions and raises corresponding events as it parses client/server +## dialogues. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## @@ -4568,41 +4623,44 @@ event http_reply%(c: connection, version: string, code: count, reason: string%) ## http_entity_data http_event http_message_done http_reply http_request ## http_stats ## -## .. note:: This event is also raised for headers found in nested body entities. +## .. note:: This event is also raised for headers found in nested body +## entities. event http_header%(c: connection, is_orig: bool, name: string, value: string%) &group="http-header"; -## Generated for HTTP headers, passing on all headers of an HTTP message at once. -## Bro supports persistent and pipelined HTTP sessions and raises corresponding -## events as it parses client/server dialogues. +## Generated for HTTP headers, passing on all headers of an HTTP message at +## once. Bro supports persistent and pipelined HTTP sessions and raises +## corresponding events as it parses client/server dialogues. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## ## is_orig: True if the header was sent by the originator of the TCP connection. ## ## hlist: A *table* containing all headers extracted from the current entity. -## The table is indexed by the position of the header (1 for the first, 2 for the -## second, etc.). +## The table is indexed by the position of the header (1 for the first, +## 2 for the second, etc.). ## ## .. bro:see:: http_begin_entity http_content_type http_end_entity http_entity_data ## http_event http_header http_message_done http_reply http_request http_stats ## -## .. note:: This event is also raised for headers found in nested body entities. +## .. note:: This event is also raised for headers found in nested body +## entities. event http_all_headers%(c: connection, is_orig: bool, hlist: mime_header_list%) &group="http-header"; -## Generated when starting to parse an HTTP body entity. This event is generated -## at least once for each non-empty (client or server) HTTP body; and potentially -## more than once if the body contains further nested MIME entities. Bro raises -## this event just before it starts parsing each entity's content. +## Generated when starting to parse an HTTP body entity. This event is generated +## at least once for each non-empty (client or server) HTTP body; and +## potentially more than once if the body contains further nested MIME +## entities. Bro raises this event just before it starts parsing each entity's +## content. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## -## is_orig: True if the entity was was sent by the originator of the TCP +## is_orig: True if the entity was sent by the originator of the TCP ## connection. ## ## .. bro:see:: http_all_headers http_content_type http_end_entity http_entity_data @@ -4610,17 +4668,18 @@ event http_all_headers%(c: connection, is_orig: bool, hlist: mime_header_list%) ## mime_begin_entity event http_begin_entity%(c: connection, is_orig: bool%) &group="http-body"; -## Generated when finishing parsing an HTTP body entity. This event is generated -## at least once for each non-empty (client or server) HTTP body; and potentially -## more than once if the body contains further nested MIME entities. Bro raises -## this event at the point when it has finished parsing an entity's content. +## Generated when finishing parsing an HTTP body entity. This event is generated +## at least once for each non-empty (client or server) HTTP body; and +## potentially more than once if the body contains further nested MIME +## entities. Bro raises this event at the point when it has finished parsing an +## entity's content. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## -## is_orig: True if the entity was was sent by the originator of the TCP +## is_orig: True if the entity was sent by the originator of the TCP ## connection. ## ## .. bro:see:: http_all_headers http_begin_entity http_content_type http_entity_data @@ -4633,18 +4692,18 @@ event http_end_entity%(c: connection, is_orig: bool%) &group="http-body"; ## chunk of the data of not further defined size. ## ## A common idiom for using this event is to first *reassemble* the data -## at the scripting layer by concatening it to a successvily growing +## at the scripting layer by concatenating it to a successively growing ## string; and only perform further content analysis once the corresponding -## :bro:id:`http_end_entity` event has been raised. Note, however, that doing so +## :bro:id:`http_end_entity` event has been raised. Note, however, that doing so ## can be quite expensive for HTTP tranders. At the very least, one should ## impose an upper size limit on how much data is being buffered. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## -## is_orig: True if the entity was was sent by the originator of the TCP +## is_orig: True if the entity was sent by the originator of the TCP ## connection. ## ## length: The length of *data*. @@ -4656,17 +4715,17 @@ event http_end_entity%(c: connection, is_orig: bool%) &group="http-body"; ## mime_entity_data http_entity_data_delivery_size skip_http_data event http_entity_data%(c: connection, is_orig: bool, length: count, data: string%) &group="http-body"; -## Generated for reporting an HTTP bodie's content type. This event is +## Generated for reporting an HTTP body's content type. This event is ## generated at the end of parsing an HTTP header, passing on the MIME ## type as specified by the ``Content-Type`` header. If that header is ## missing, this event is still raised with a default value of ``text/plain``. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## -## is_orig: True if the entity was was sent by the originator of the TCP +## is_orig: True if the entity was sent by the originator of the TCP ## connection. ## ## ty: The main type. @@ -4688,12 +4747,12 @@ event http_content_type%(c: connection, is_orig: bool, ty: string, subty: string ## message have been processed (and their corresponding ``http_entity_*`` events ## generated). ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## -## is_orig: True if the entity was was sent by the originator of the TCP +## is_orig: True if the entity was sent by the originator of the TCP ## connection. ## ## stat: Further meta information about the message. @@ -4704,13 +4763,13 @@ event http_message_done%(c: connection, is_orig: bool, stat: http_message_stat%) ## Generated for errors found when decoding HTTP requests or replies. ## -## See `Wikipedia `__ for -## more information about the HTTP protocol. +## See `Wikipedia `__ +## for more information about the HTTP protocol. ## ## c: The connection. ## -## event_type: A string describing the general category of the problem found (e.g., -## ``illegal format``). +## event_type: A string describing the general category of the problem found +## (e.g., ``illegal format``). ## ## detail: Further more detailed description of the error. ## @@ -4725,17 +4784,18 @@ event http_event%(c: connection, event_type: string, detail: string%); ## ## c: The connection. ## -## stats: Statistics summarizing HTTP-level properties of the finished connection. +## stats: Statistics summarizing HTTP-level properties of the finished +## connection. ## ## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_event http_header http_message_done http_reply ## http_request event http_stats%(c: connection, stats: http_stats_rec%); -## Generated when seeing an SSH client's version identification. The SSH protocol -## starts with a clear-test handshake message that reports client and server -## protocol/software versions. This event provides access to what the client -## sent. +## Generated when seeing an SSH client's version identification. The SSH +## protocol starts with a clear-text handshake message that reports client and +## server protocol/software versions. This event provides access to what the +## client sent. ## ## ## See `Wikipedia `__ for more @@ -4747,14 +4807,14 @@ event http_stats%(c: connection, stats: http_stats_rec%); ## ## .. bro:see:: ssh_server_version ## -## .. note:: As everything after the initial version handshake proceeds encrypted, -## Bro cannot further analyze SSH sessions. +## .. note:: As everything after the initial version handshake proceeds +## encrypted, Bro cannot further analyze SSH sessions. event ssh_client_version%(c: connection, version: string%); -## Generated when seeing an SSH server's version identification. The SSH protocol -## starts with a clear-test handshake message that reports client and server -## protocol/software versions. This event provides access to what the server -## sent. +## Generated when seeing an SSH server's version identification. The SSH +## protocol starts with a clear-text handshake message that reports client and +## server protocol/software versions. This event provides access to what the +## server sent. ## ## See `Wikipedia `__ for more ## information about the SSH protocol. @@ -4772,53 +4832,53 @@ event ssh_server_version%(c: connection, version: string%); ## Generated for an SSL/TLS client's initial *hello* message. SSL/TLS sessions ## start with an unencrypted handshake, and Bro extracts as much information out -## that it as it can. This event provides access to the initial information sent by -## the client. +## of that as it can. This event provides access to the initial information +## sent by the client. ## ## See `Wikipedia `__ for ## more information about the SSL/TLS protocol. ## ## c: The connection. ## -## version: The protocol version as extracted from the client's -## message. The values are standardized as part of the SSL/TLS protocol. The +## version: The protocol version as extracted from the client's message. The +## values are standardized as part of the SSL/TLS protocol. The ## :bro:id:`SSL::version_strings` table maps them to descriptive names. ## -## possible_ts: The current time as sent by the client. Note that SSL/TLS does not -## require clocks to be set correctly, so treat with care. +## possible_ts: The current time as sent by the client. Note that SSL/TLS does +## not require clocks to be set correctly, so treat with care. ## ## session_id: The session ID sent by the client (if any). ## ## ciphers: The list of ciphers the client offered to use. The values are -## standardized as part of the SSL/TLS protocol. The :bro:id:`SSL::cipher_desc` table -## maps them to descriptive names. +## standardized as part of the SSL/TLS protocol. The +## :bro:id:`SSL::cipher_desc` table maps them to descriptive names. ## ## .. bro:see:: ssl_alert ssl_established ssl_extension ssl_server_hello ## ssl_session_ticket_handshake x509_certificate x509_error x509_extension event ssl_client_hello%(c: connection, version: count, possible_ts: time, session_id: string, ciphers: count_set%); -## Generated for an SSL/TLS servers's initial *hello* message. SSL/TLS sessions +## Generated for an SSL/TLS server's initial *hello* message. SSL/TLS sessions ## start with an unencrypted handshake, and Bro extracts as much information out -## of that as it can. This event provides access to the initial information sent by -## the client. +## of that as it can. This event provides access to the initial information +## sent by the client. ## ## See `Wikipedia `__ for ## more information about the SSL/TLS protocol. ## ## c: The connection. ## -## version: The protocol version as extracted from the servers's message. +## version: The protocol version as extracted from the server's message. ## The values are standardized as part of the SSL/TLS protocol. The ## :bro:id:`SSL::version_strings` table maps them to descriptive names. ## -## possible_ts: The current time as sent by the server. Note that SSL/TLS does not -## require clocks to be set correctly, so treat with care. +## possible_ts: The current time as sent by the server. Note that SSL/TLS does +## not require clocks to be set correctly, so treat with care. ## ## session_id: The session ID as sent back by the server (if any). ## ## cipher: The cipher chosen by the server. The values are standardized as part -## of the SSL/TLS protocol. The :bro:id:`SSL::cipher_desc` table maps them to -## descriptive names. +## of the SSL/TLS protocol. The :bro:id:`SSL::cipher_desc` table maps +## them to descriptive names. ## ## comp_method: The compression method chosen by the client. The values are ## standardized as part of the SSL/TLS protocol. @@ -4827,18 +4887,18 @@ event ssl_client_hello%(c: connection, version: count, possible_ts: time, sessio ## ssl_session_ticket_handshake x509_certificate x509_error x509_extension event ssl_server_hello%(c: connection, version: count, possible_ts: time, session_id: string, cipher: count, comp_method: count%); -## Generated for SSL/TLS extensions seen in an initial handshake. SSL/TLS sessions -## start with an unencrypted handshake, and Bro extracts as much information out of -## that as it can. This event provides access to any extensions either side sents -## as part of extended *hello* message. +## Generated for SSL/TLS extensions seen in an initial handshake. SSL/TLS +## sessions start with an unencrypted handshake, and Bro extracts as much +## information out of that as it can. This event provides access to any +## extensions either side sends as part of an extended *hello* message. ## ## c: The connection. ## ## is_orig: True if event is raised for originator side of the connection. ## ## code: The numerical code of the extension. The values are standardized as -## part of the SSL/TLS protocol. The :bro:id:`SSL::extensions` table maps them to -## descriptive names. +## part of the SSL/TLS protocol. The :bro:id:`SSL::extensions` table maps +## them to descriptive names. ## ## val: The raw extension value that was sent in the message. ## @@ -4847,10 +4907,10 @@ event ssl_server_hello%(c: connection, version: count, possible_ts: time, sessio event ssl_extension%(c: connection, is_orig: bool, code: count, val: string%); ## Generated at the end of an SSL/TLS handshake. SSL/TLS sessions start with -## an unencrypted handshake, and Bro extracts as much information out of that as -## it can. This event signals the time when an SSL/TLS has finished the handshake -## and its endpoints consider it as fully established. Typically, everything from -## now on will be encrypted. +## an unencrypted handshake, and Bro extracts as much information out of that +## as it can. This event signals the time when an SSL/TLS has finished the +## handshake and its endpoints consider it as fully established. Typically, +## everything from now on will be encrypted. ## ## See `Wikipedia `__ for ## more information about the SSL/TLS protocol. @@ -4861,11 +4921,11 @@ event ssl_extension%(c: connection, is_orig: bool, code: count, val: string%); ## ssl_session_ticket_handshake x509_certificate x509_error x509_extension event ssl_established%(c: connection%); -## Generated for SSL/TLS alert records. SSL/TLS sessions start with an unencrypted -## handshake, and Bro extracts as much information out of that as it can. If during -## that handshake, an endpoint encounteres a fatal error, it sends an *alert* -## record, that it turns triggers this event. After an *alert*, any endpoint -## may close the connection immediately. +## Generated for SSL/TLS alert records. SSL/TLS sessions start with an +## unencrypted handshake, and Bro extracts as much information out of that as +## it can. If during that handshake, an endpoint encounters a fatal error, it +## sends an *alert* record, that in turn triggers this event. After an *alert*, +## any endpoint may close the connection immediately. ## ## See `Wikipedia `__ for ## more information about the SSL/TLS protocol. @@ -4884,12 +4944,12 @@ event ssl_established%(c: connection%); ## ssl_session_ticket_handshake x509_certificate x509_error x509_extension event ssl_alert%(c: connection, is_orig: bool, level: count, desc: count%); -## Generated for SSL/TLS handshake messages that are a part of the stateless-server -## session resumption mechanism. SSL/TLS sessions start with an unencrypted -## handshake, and Bro extracts as much information out of that as it can. This -## event is raised when an SSL/TLS server passes session ticket to the client that -## can later be used for resuming the session. The mechanism is described in -## :rfc:`4507` +## Generated for SSL/TLS handshake messages that are a part of the +## stateless-server session resumption mechanism. SSL/TLS sessions start with +## an unencrypted handshake, and Bro extracts as much information out of that +## as it can. This event is raised when an SSL/TLS server passes a session +## ticket to the client that can later be used for resuming the session. The +## mechanism is described in :rfc:`4507` ## ## See `Wikipedia `__ for ## more information about the SSL/TLS protocol. @@ -4905,13 +4965,13 @@ event ssl_alert%(c: connection, is_orig: bool, level: count, desc: count%); ## x509_certificate x509_error x509_extension ssl_alert event ssl_session_ticket_handshake%(c: connection, ticket_lifetime_hint: count, ticket: string%); -## Generated for x509 certificates seen in SSL/TLS connections. During the initial -## SSL/TLS handshake, certificates are exchanged in the clear. Bro raises this -## event for each certificate seen (including both a site's primary cert, and -## further certs sent as part of the validation chain). +## Generated for X509 certificates seen in SSL/TLS connections. During the +## initial SSL/TLS handshake, certificates are exchanged in the clear. Bro +## raises this event for each certificate seen (including both a site's primary +## cert, and further certs sent as part of the validation chain). ## -## See `Wikipedia `__ for more information about -## the X.509 format. +## See `Wikipedia `__ for more information +## about the X.509 format. ## ## c: The connection. ## @@ -4920,7 +4980,7 @@ event ssl_session_ticket_handshake%(c: connection, ticket_lifetime_hint: count, ## cert: The parsed certificate. ## ## chain_idx: The index in the validation chain that this cert has. Index zero -## indicates an endpoints primary cert, while higher indices +## indicates an endpoint's primary cert, while higher indices ## indicate the place in the validation chain (which has length ## *chain_len*). ## @@ -4928,16 +4988,17 @@ event ssl_session_ticket_handshake%(c: connection, ticket_lifetime_hint: count, ## of. ## ## der_cert: The complete cert encoded in `DER -## `__ format. +## `__ +## format. ## ## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_extension ## ssl_server_hello x509_error x509_extension x509_verify event x509_certificate%(c: connection, is_orig: bool, cert: X509, chain_idx: count, chain_len: count, der_cert: string%); -## Generated for X.509 extensions seen in a certificate. +## Generated for X509 extensions seen in a certificate. ## -## See `Wikipedia `__ for more information about -## the X.509 format. +## See `Wikipedia `__ for more information +## about the X.509 format. ## ## c: The connection. ## @@ -4949,17 +5010,17 @@ event x509_certificate%(c: connection, is_orig: bool, cert: X509, chain_idx: cou ## ssl_server_hello x509_certificate x509_error x509_verify event x509_extension%(c: connection, is_orig: bool, data: string%); -## Generated when errors occur during parsing an X.509 certificate. +## Generated when errors occur during parsing an X509 certificate. ## -## See `Wikipedia `__ for more information about -## the X.509 format. +## See `Wikipedia `__ for more information +## about the X.509 format. ## ## c: The connection. ## ## is_orig: True if event is raised for originator side of the connection. ## -## err: An error code describing what went wrong. :bro:id:`SSL::x509_errors` maps -## error codes to a textual description. +## err: An error code describing what went wrong. :bro:id:`SSL::x509_errors` +## maps error codes to a textual description. ## ## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_extension ## ssl_server_hello x509_certificate x509_extension x509_err2str x509_verify @@ -5022,16 +5083,16 @@ event epm_map_response%(c: connection, uuid: string, p: port, h: addr%); ## Generated for NCP requests (Netware Core Protocol). ## -## See `Wikipedia `__ for more -## information about the NCP protocol. +## See `Wikipedia `__ for +## more information about the NCP protocol. ## ## c: The connection. ## ## frame_type: The frame type, as specified by the protocol. ## -## length: The length of the request body, excluding the frame header, +## length: The length of the request body, excluding the frame header. ## -## func: The requested function, as specified by the protocol. +## func: The requested function, as specified by the protocol. ## ## .. bro:see:: ncp_reply ## @@ -5043,20 +5104,20 @@ event ncp_request%(c: connection, frame_type: count, length: count, func: count% ## Generated for NCP replies (Netware Core Protocol). ## -## See `Wikipedia `__ for more -## information about the NCP protocol. +## See `Wikipedia `__ for +## more information about the NCP protocol. ## ## c: The connection. ## ## frame_type: The frame type, as specified by the protocol. ## -## length: The length of the request body, excluding the frame header, +## length: The length of the request body, excluding the frame header. ## ## req_frame: The frame type from the corresponding request. ## -## req_frame: The function code from the corresponding request. +## req_func: The function code from the corresponding request. ## -## completion_code: The replie's completion code, as specified by the protocol. +## completion_code: The reply's completion code, as specified by the protocol. ## ## .. bro:see:: ncp_request ## @@ -5068,12 +5129,13 @@ event ncp_reply%(c: connection, frame_type: count, length: count, req_frame: cou ## Generated for client-side commands on POP3 connections. ## -## See `Wikipedia `__ for more information about -## the POP3 protocol. +## See `Wikipedia `__ for more information +## about the POP3 protocol. ## ## c: The connection. ## -## is_orig: True if the command was sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## command: The command sent. ## @@ -5091,24 +5153,23 @@ event pop3_request%(c: connection, is_orig: bool, ## Generated for server-side replies to commands on POP3 connections. ## -## See `Wikipedia `__ for more information about -## the POP3 protocol. +## See `Wikipedia `__ for more information +## about the POP3 protocol. ## ## c: The connection. ## -## is_orig: True if the command was sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## cmd: The success indicator sent by the server. This corresponds to the ## first token on the line sent, and should be either ``OK`` or ``ERR``. ## ## msg: The textual description the server sent along with *cmd*. ## -## arg: The argument to the command. -## ## .. bro:see:: pop3_data pop3_login_failure pop3_login_success pop3_request ## pop3_terminate pop3_unexpected ## -## .. todo: This event is receiving odd parameters, should unify. +## .. todo:: This event is receiving odd parameters, should unify. ## ## .. todo:: Bro's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet @@ -5116,13 +5177,13 @@ event pop3_request%(c: connection, is_orig: bool, ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event pop3_reply%(c: connection, is_orig: bool, cmd: string, msg: string%); -## Generated for server-side multi-lines responses on POP3 connections. POP3 -## connection use multi-line responses to send buld data, such as the actual +## Generated for server-side multi-line responses on POP3 connections. POP3 +## connections use multi-line responses to send bulk data, such as the actual ## mails. This event is generated once for each line that's part of such a ## response. ## -## See `Wikipedia `__ for more information about -## the POP3 protocol. +## See `Wikipedia `__ for more information +## about the POP3 protocol. ## ## c: The connection. ## @@ -5139,12 +5200,12 @@ event pop3_reply%(c: connection, is_orig: bool, cmd: string, msg: string%); ## corresponding entry to :bro:see:`dpd_config` or a DPD payload signature. event pop3_data%(c: connection, is_orig: bool, data: string%); -## Generated for errors encountered on POP3 sessions. If the POP3 analyzers finds -## state transition that do not confirm to the protocol specification, or other -## situations it can't handle, it raises this event. +## Generated for errors encountered on POP3 sessions. If the POP3 analyzer +## finds state transitions that do not conform to the protocol specification, +## or other situations it can't handle, it raises this event. ## -## See `Wikipedia `__ for more information about -## the POP3 protocol. +## See `Wikipedia `__ for more information +## about the POP3 protocol. ## ## c: The connection. ## @@ -5164,13 +5225,13 @@ event pop3_data%(c: connection, is_orig: bool, data: string%); event pop3_unexpected%(c: connection, is_orig: bool, msg: string, detail: string%); -## Generated when POP3 connection go encrypted. While POP3 is by default a +## Generated when a POP3 connection goes encrypted. While POP3 is by default a ## clear-text protocol, extensions exist to switch to encryption. This event is -## generated if that happens and the analyzers then stops processing the +## generated if that happens and the analyzer then stops processing the ## connection. ## -## See `Wikipedia `__ for more information about -## the POP3 protocol. +## See `Wikipedia `__ for more information +## about the POP3 protocol. ## ## c: The connection. ## @@ -5192,8 +5253,8 @@ event pop3_terminate%(c: connection, is_orig: bool, msg: string%); ## Generated for successful authentications on POP3 connections. ## -## See `Wikipedia `__ for more information about -## the POP3 protocol. +## See `Wikipedia `__ for more information +## about the POP3 protocol. ## ## c: The connection. ## @@ -5216,15 +5277,15 @@ event pop3_login_success%(c: connection, is_orig: bool, ## Generated for unsuccessful authentications on POP3 connections. ## -## See `Wikipedia `__ for more information about -## the POP3 protocol. +## See `Wikipedia `__ for more information +## about the POP3 protocol. ## ## c: The connection. ## ## is_orig: Always false. ## -## user: The user name attempted for authentication. The event is only generated if -## a non-empty user name was used. +## user: The user name attempted for authentication. The event is only +## generated if a non-empty user name was used. ## ## password: The password attempted for authentication. ## @@ -5261,9 +5322,9 @@ event pop3_login_failure%(c: connection, is_orig: bool, ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response ## irc_part_message irc_password_message ## -## .. note:: This event is generated only for message that originate at the -## clients-side. Commands coming in from remote trigger the ge:bro:id:`irc_message` -## event instead. +## .. note:: This event is generated only for messages that originate +## at the client-side. Commands coming in from remote trigger +## the :bro:id:`irc_message` event instead. event irc_request%(c: connection, is_orig: bool, prefix: string, command: string, arguments: string%); @@ -5275,9 +5336,10 @@ event irc_request%(c: connection, is_orig: bool, prefix: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## -## prefix: The optional prefix comming with the reply. IRC uses the prefix to +## prefix: The optional prefix coming with the reply. IRC uses the prefix to ## indicate the true origin of a message. ## ## code: The reply code, as specified by the protocol. @@ -5306,7 +5368,7 @@ event irc_reply%(c: connection, is_orig: bool, prefix: string, ## ## command: The command. ## -## arguments: The arguments for the command. +## message: TODO. ## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message @@ -5317,22 +5379,23 @@ event irc_reply%(c: connection, is_orig: bool, prefix: string, ## .. note:: ## ## This event is generated only for messages that are forwarded by the server -## to the client. Commands coming from client trigger the :bro:id:`irc_request` -## event instead. +## to the client. Commands coming from client trigger the +## :bro:id:`irc_request` event instead. event irc_message%(c: connection, is_orig: bool, prefix: string, command: string, message: string%); -## Generated for IRC messages of type *quit*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *quit*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## -## nick: The nick name coming with the message. +## nick: The nickname coming with the message. ## ## message: The text included with the message. ## @@ -5343,15 +5406,16 @@ event irc_message%(c: connection, is_orig: bool, prefix: string, ## irc_part_message irc_password_message event irc_quit_message%(c: connection, is_orig: bool, nick: string, message: string%); -## Generated for IRC messages of type *privmsg*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *privmsg*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## source: The source of the private communication. ## @@ -5375,7 +5439,8 @@ event irc_privmsg_message%(c: connection, is_orig: bool, source: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## source: The source of the private communication. ## @@ -5399,7 +5464,8 @@ event irc_notice_message%(c: connection, is_orig: bool, source: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## source: The source of the private communication. ## @@ -5423,12 +5489,11 @@ event irc_squery_message%(c: connection, is_orig: bool, source: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## info_list: The user information coming with the command. ## -## message: The text of communication. -## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message @@ -5436,15 +5501,16 @@ event irc_squery_message%(c: connection, is_orig: bool, source: string, ## irc_password_message event irc_join_message%(c: connection, is_orig: bool, info_list: irc_join_list%); -## Generated for IRC messages of type *part*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *part*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## nick: The nickname coming with the message. ## @@ -5460,15 +5526,16 @@ event irc_join_message%(c: connection, is_orig: bool, info_list: irc_join_list%) event irc_part_message%(c: connection, is_orig: bool, nick: string, chans: string_set, message: string%); -## Generated for IRC messages of type *nick*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *nick*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## who: The user changing its nickname. ## @@ -5488,7 +5555,8 @@ event irc_nick_message%(c: connection, is_orig: bool, who: string, newnick: stri ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invite_message irc_join_message irc_kick_message @@ -5504,7 +5572,8 @@ event irc_invalid_nick%(c: connection, is_orig: bool%); ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## users: The number of users as returned in the reply. ## @@ -5527,7 +5596,8 @@ event irc_network_info%(c: connection, is_orig: bool, users: count, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## users: The number of users as returned in the reply. ## @@ -5550,7 +5620,8 @@ event irc_server_info%(c: connection, is_orig: bool, users: count, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## chans: The number of channels as returned in the reply. ## @@ -5568,9 +5639,10 @@ event irc_channel_info%(c: connection, is_orig: bool, chans: count%); ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## -## target_nick: The target nick name. +## target_nick: The target nickname. ## ## channel: The channel. ## @@ -5580,7 +5652,7 @@ event irc_channel_info%(c: connection, is_orig: bool, chans: count%); ## ## server: The server. ## -## nick: The nick name. +## nick: The nickname. ## ## params: The parameters. ## @@ -5606,7 +5678,8 @@ event irc_who_line%(c: connection, is_orig: bool, target_nick: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## c_type: The channel type. ## @@ -5629,9 +5702,10 @@ event irc_names_info%(c: connection, is_orig: bool, c_type: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## -## nick: The nick name specified in the reply. +## nick: The nickname specified in the reply. ## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message @@ -5647,9 +5721,10 @@ event irc_whois_operator_line%(c: connection, is_orig: bool, nick: string%); ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## -## nick: The nick name specified in the reply. +## nick: The nickname specified in the reply. ## ## chans: The set of channels returned. ## @@ -5668,16 +5743,15 @@ event irc_whois_channel_line%(c: connection, is_orig: bool, nick: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## -## nick: The nick name specified in the reply. +## nick: The nickname specified in the reply. ## ## user: The user name specified in the reply. ## ## host: The host name specified in the reply. ## -## user: The user name specified in the reply. -## ## real_name: The real name specified in the reply. ## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message @@ -5695,7 +5769,8 @@ event irc_whois_user_line%(c: connection, is_orig: bool, nick: string, ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## got_oper: True if the *oper* command was executed successfully ## (*youreport*) and false otherwise (*nooperhost*). @@ -5714,7 +5789,8 @@ event irc_oper_response%(c: connection, is_orig: bool, got_oper: bool%); ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## prefix: The optional prefix coming with the command. IRC uses the prefix to ## indicate the true origin of a message. @@ -5735,6 +5811,9 @@ event irc_global_users%(c: connection, is_orig: bool, prefix: string, msg: strin ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## channel: The channel name specified in the reply. ## ## topic: The topic specified in the reply. @@ -5746,15 +5825,16 @@ event irc_global_users%(c: connection, is_orig: bool, prefix: string, msg: strin ## irc_password_message event irc_channel_topic%(c: connection, is_orig: bool, channel: string, topic: string%); -## Generated for IRC messages of type *who*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *who*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## -## is_orig: True if the command what sent by the originator of the TCP connection. +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## ## mask: The mask specified in the message. ## @@ -5767,15 +5847,20 @@ event irc_channel_topic%(c: connection, is_orig: bool, channel: string, topic: s ## irc_part_message irc_password_message event irc_who_message%(c: connection, is_orig: bool, mask: string, oper: bool%); -## Generated for IRC messages of type *whois*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *whois*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. ## +## server: TODO. +## +## users: TODO. ## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message @@ -5784,14 +5869,17 @@ event irc_who_message%(c: connection, is_orig: bool, mask: string, oper: bool%); ## irc_part_message irc_password_message event irc_whois_message%(c: connection, is_orig: bool, server: string, users: string%); -## Generated for IRC messages of type *oper*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *oper*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## user: The user specified in the message. ## ## password: The password specified in the message. @@ -5803,14 +5891,17 @@ event irc_whois_message%(c: connection, is_orig: bool, server: string, users: st ## irc_password_message event irc_oper_message%(c: connection, is_orig: bool, user: string, password: string%); -## Generated for IRC messages of type *kick*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *kick*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## prefix: The optional prefix coming with the command. IRC uses the prefix to ## indicate the true origin of a message. ## @@ -5828,14 +5919,17 @@ event irc_oper_message%(c: connection, is_orig: bool, user: string, password: st event irc_kick_message%(c: connection, is_orig: bool, prefix: string, chans: string, users: string, comment: string%); -## Generated for IRC messages of type *error*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *error*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## prefix: The optional prefix coming with the command. IRC uses the prefix to ## indicate the true origin of a message. ## @@ -5856,10 +5950,13 @@ event irc_error_message%(c: connection, is_orig: bool, prefix: string, message: ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## prefix: The optional prefix coming with the command. IRC uses the prefix to ## indicate the true origin of a message. ## -## nickname: The nick name specified in the message. +## nickname: The nickname specified in the message. ## ## channel: The channel specified in the message. ## @@ -5871,14 +5968,17 @@ event irc_error_message%(c: connection, is_orig: bool, prefix: string, message: event irc_invite_message%(c: connection, is_orig: bool, prefix: string, nickname: string, channel: string%); -## Generated for IRC messages of type *mode*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *mode*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## prefix: The optional prefix coming with the command. IRC uses the prefix to ## indicate the true origin of a message. ## @@ -5891,20 +5991,23 @@ event irc_invite_message%(c: connection, is_orig: bool, prefix: string, ## irc_password_message event irc_mode_message%(c: connection, is_orig: bool, prefix: string, params: string%); -## Generated for IRC messages of type *squit*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *squit*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## prefix: The optional prefix coming with the command. IRC uses the prefix to ## indicate the true origin of a message. ## ## server: The server specified in the message. ## -## messate: The textual description specified in the message. +## message: The textual description specified in the message. ## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message @@ -5914,14 +6017,17 @@ event irc_mode_message%(c: connection, is_orig: bool, prefix: string, params: st event irc_squit_message%(c: connection, is_orig: bool, prefix: string, server: string, message: string%); -## Generated for IRC messages of type *dcc*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *dcc*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## prefix: The optional prefix coming with the command. IRC uses the prefix to ## indicate the true origin of a message. ## @@ -5947,14 +6053,17 @@ event irc_dcc_message%(c: connection, is_orig: bool, dcc_type: string, argument: string, address: addr, dest_port: count, size: count%); -## Generated for IRC messages of type *user*. This event is generated for messages -## coming from both the client and the server. +## Generated for IRC messages of type *user*. This event is generated for +## messages coming from both the client and the server. ## ## See `Wikipedia `__ for more ## information about the IRC protocol. ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## user: The user specified in the message. ## ## host: The host name specified in the message. @@ -5978,6 +6087,9 @@ event irc_user_message%(c: connection, is_orig: bool, user: string, host: string ## ## c: The connection. ## +## is_orig: True if the command was sent by the originator of the TCP +## connection. +## ## password: The password specified in the message. ## ## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message @@ -5989,7 +6101,6 @@ event irc_password_message%(c: connection, is_orig: bool, password: string%); ## TODO. ## -## .. bro:see:: event file_transferred%(c: connection, prefix: string, descr: string, mime_type: string%); ## Generated for monitored Syslog messages. @@ -6005,13 +6116,14 @@ event file_transferred%(c: connection, prefix: string, descr: string, mime_type: ## ## msg: The message logged. ## -## .. note:: Bro currently parses only UDP syslog traffic. Support for TCP syslog -## will be added soon. +## .. note:: Bro currently parses only UDP syslog traffic. Support for TCP +## syslog will be added soon. event syslog_message%(c: connection, facility: count, severity: count, msg: string%); -## Generated when a signature matches. Bro's signature engine provide -## high-performance pattern matching separately from the normal script processing. -## If a signature with an ``event`` action matches, this event is raised. +## Generated when a signature matches. Bro's signature engine provides +## high-performance pattern matching separately from the normal script +## processing. If a signature with an ``event`` action matches, this event is +## raised. ## ## See the :doc:`user manual ` for more information about Bro's ## signature engine. @@ -6021,10 +6133,10 @@ event syslog_message%(c: connection, facility: count, severity: count, msg: stri ## ## msg: The message passed to the ``event`` signature action. ## -## data; The last chunk of input that triggered the match. Note that the specifics -## here are no well-defined as Bro does not buffer any input. If a match is split -## across packet boundaries, only the last chunk triggering the will be passed on -## to the event. +## data: The last chunk of input that triggered the match. Note that the +## specifics here are not well-defined as Bro does not buffer any input. +## If a match is split across packet boundaries, only the last chunk +## triggering the match will be passed on to the event. event signature_match%(state: signature_state, msg: string, data: string%); ## Generated when a protocol analyzer finds an identification of a software @@ -6039,25 +6151,26 @@ event signature_match%(state: signature_state, msg: string, data: string%); ## ## s: A description of the software found. ## -## descr: The raw (unparsed) software identification string as extracted from the -## protocol. +## descr: The raw (unparsed) software identification string as extracted from +## the protocol. ## ## .. bro:see:: software_parse_error software_unparsed_version_found OS_version_found event software_version_found%(c: connection, host: addr, s: software, descr: string%); -## Generated when a protocol analyzer finds an identification of a software used on -## a system but cannot parse it. This is a protocol-independent event that is fed -## by different analyzers. For example, the HTTP analyzer reports user-agent and -## server software by raising this event if it cannot parse them directly (if canit -## :bro:id:`software_version_found` will be generated instead). +## Generated when a protocol analyzer finds an identification of a software +## used on a system but cannot parse it. This is a protocol-independent event +## that is fed by different analyzers. For example, the HTTP analyzer reports +## user-agent and server software by raising this event if it cannot parse them +## directly (if it can :bro:id:`software_version_found` will be generated +## instead). ## ## c: The connection. ## ## host: The host running the reported software. ## -## descr: The raw (unparsed) software identification string as extracted from the -## protocol. +## descr: The raw (unparsed) software identification string as extracted from +## the protocol. ## ## .. bro:see:: software_version_found software_unparsed_version_found ## OS_version_found @@ -6075,7 +6188,7 @@ event software_parse_error%(c: connection, host: addr, descr: string%); ## ## host: The host running the reported software. ## -## descr: The software identification string as extracted from the protocol. +## str: The software identification string as extracted from the protocol. ## ## .. bro:see:: software_parse_error software_version_found OS_version_found event software_unparsed_version_found%(c: connection, host: addr, str: string%); @@ -6085,14 +6198,16 @@ event software_unparsed_version_found%(c: connection, host: addr, str: string%); ## and it raises this event for each system identified. The p0f fingerprints are ## defined by :bro:id:`passive_fingerprint_file`. ## +## TODO. +## ## .. bro:see:: passive_fingerprint_file software_parse_error ## software_version_found software_unparsed_version_found ## generate_OS_version_event event OS_version_found%(c: connection, host: addr, OS: OS_version%); ## Generated when a connection to a remote Bro has been established. This event -## is intended primarily for use by Bro's communication framework, but it can also -## trigger additional code if helpful. +## is intended primarily for use by Bro's communication framework, but it can +## also trigger additional code if helpful. ## ## p: A record describing the peer. ## @@ -6113,9 +6228,9 @@ event remote_connection_established%(p: event_peer%); ## remote_state_inconsistency print_hook event remote_connection_closed%(p: event_peer%); -## Generated when a remote connection's initial handshake has been completed. This -## event is intended primarily for use by Bro's communication framework, but it can -## also trigger additional code if helpful. +## Generated when a remote connection's initial handshake has been completed. +## This event is intended primarily for use by Bro's communication framework, +## but it can also trigger additional code if helpful. ## ## p: A record describing the peer. ## @@ -6130,6 +6245,8 @@ event remote_connection_handshake_done%(p: event_peer%); ## ## p: A record describing the peer. ## +## name: TODO. +## ## .. bro:see:: remote_capture_filter remote_connection_closed ## remote_connection_error remote_connection_established ## remote_connection_handshake_done remote_log remote_pong @@ -6137,8 +6254,8 @@ event remote_connection_handshake_done%(p: event_peer%); event remote_event_registered%(p: event_peer, name: string%); ## Generated when a connection to a remote Bro encountered an error. This event -## is intended primarily for use by Bro's communication framework, but it can also -## trigger additional code if helpful. +## is intended primarily for use by Bro's communication framework, but it can +## also trigger additional code if helpful. ## ## p: A record describing the peer. ## @@ -6153,8 +6270,8 @@ event remote_connection_error%(p: event_peer, reason: string%); ## Generated when a remote peer sent us a capture filter. While this event is -## intended primarily for use by Bro's communication framework, it can also trigger -## additional code if helpful. +## intended primarily for use by Bro's communication framework, it can also +## trigger additional code if helpful. ## ## p: A record describing the peer. ## @@ -6168,8 +6285,8 @@ event remote_capture_filter%(p: event_peer, filter: string%); ## Generated after a call to :bro:id:`send_state` when all data has been ## successfully sent to the remote side. While this event is -## intended primarily for use by Bro's communication framework, it can also trigger -## additional code if helpful. +## intended primarily for use by Bro's communication framework, it can also +## trigger additional code if helpful. ## ## p: A record describing the remote peer. ## @@ -6179,20 +6296,20 @@ event remote_capture_filter%(p: event_peer, filter: string%); ## remote_state_access_performed remote_state_inconsistency print_hook event finished_send_state%(p: event_peer%); -## Generated if state synchronization detects an inconsistency. While this event -## is intended primarily for use by Bro's communication framework, it can also -## trigger additional code if helpful. This event is only raised if +## Generated if state synchronization detects an inconsistency. While this +## event is intended primarily for use by Bro's communication framework, it can +## also trigger additional code if helpful. This event is only raised if ## :bro:id:`remote_check_sync_consistency` is false. ## ## operation: The textual description of the state operation performed. ## ## id: The name of the Bro script identifier that was operated on. ## -## expected_old: A textual representation of the value of *id* that was expected to -## be found before the operation was carried out. +## expected_old: A textual representation of the value of *id* that was +## expected to be found before the operation was carried out. ## -## real_old: A textual representation of the value of *id* that was actually found -## before the operation was carried out. The difference between +## real_old: A textual representation of the value of *id* that was actually +## found before the operation was carried out. The difference between ## *real_old* and *expected_old* is the inconsistency being reported. ## ## .. bro:see:: remote_capture_filter remote_connection_closed @@ -6203,13 +6320,13 @@ event remote_state_inconsistency%(operation: string, id: string, expected_old: string, real_old: string%); ## Generated for communication log messages. While this event is -## intended primarily for use by Bro's communication framework, it can also trigger -## additional code if helpful. +## intended primarily for use by Bro's communication framework, it can also +## trigger additional code if helpful. ## ## level: The log level, which is either :bro:id:`REMOTE_LOG_INFO` or ## :bro:id:`REMOTE_LOG_ERROR`. ## -## src: The component of the comminication system that logged the message. +## src: The component of the communication system that logged the message. ## Currently, this will be one of :bro:id:`REMOTE_SRC_CHILD` (Bro's ## child process), :bro:id:`REMOTE_SRC_PARENT` (Bro's main process), or ## :bro:id:`REMOTE_SRC_SCRIPT` (the script level). @@ -6223,8 +6340,8 @@ event remote_state_inconsistency%(operation: string, id: string, event remote_log%(level: count, src: count, msg: string%); ## Generated for communication log messages. While this event is -## intended primarily for use by Bro's communication framework, it can also trigger -## additional code if helpful. This event is equivalent to +## intended primarily for use by Bro's communication framework, it can also +## trigger additional code if helpful. This event is equivalent to ## :bro:see:`remote_log` except the message is with respect to a certain peer. ## ## p: A record describing the remote peer. @@ -6232,7 +6349,7 @@ event remote_log%(level: count, src: count, msg: string%); ## level: The log level, which is either :bro:id:`REMOTE_LOG_INFO` or ## :bro:id:`REMOTE_LOG_ERROR`. ## -## src: The component of the comminication system that logged the message. +## src: The component of the communication system that logged the message. ## Currently, this will be one of :bro:id:`REMOTE_SRC_CHILD` (Bro's ## child process), :bro:id:`REMOTE_SRC_PARENT` (Bro's main process), or ## :bro:id:`REMOTE_SRC_SCRIPT` (the script level). @@ -6247,12 +6364,12 @@ event remote_log_peer%(p: event_peer, level: count, src: count, msg: string%); ## Generated when a remote peer has answered to our ping. This event is part of ## Bro's infrastructure for measuring communication latency. One can send a ping -## by calling :bro:id:`send_ping` and when a corresponding reply is received, this -## event will be raised. +## by calling :bro:id:`send_ping` and when a corresponding reply is received, +## this event will be raised. ## ## p: The peer sending us the pong. ## -## seq: The sequence number passed to the original :bro:id:`send_ping` call. +## seq: The sequence number passed to the original :bro:id:`send_ping` call. ## The number is sent back by the peer in its response. ## ## d1: The time interval between sending the ping and receiving the pong. This @@ -6263,7 +6380,7 @@ event remote_log_peer%(p: event_peer, level: count, src: count, msg: string%); ## ## d3: The time interval between when the peer's child process received the ## ping and when its parent process sent the pong. This is the -## processing latency at the the peer. +## processing latency at the peer. ## ## .. bro:see:: remote_capture_filter remote_connection_closed remote_connection_error ## remote_connection_established remote_connection_handshake_done @@ -6272,8 +6389,8 @@ event remote_log_peer%(p: event_peer, level: count, src: count, msg: string%); event remote_pong%(p: event_peer, seq: count, d1: interval, d2: interval, d3: interval%); -## Generated each time a remote state access has been replayed locally. This event -## is primarily intended for debugging. measurments. +## Generated each time a remote state access has been replayed locally. This +## event is primarily intended for debugging. ## ## id: The name of the Bro script variable that's being operated on. ## @@ -6287,7 +6404,7 @@ event remote_state_access_performed%(id: string, v: any%); ## Generated each time Bro's internal profiling log is updated. The file is ## defined by :bro:id:`profiling_file`, and its update frequency by -## :bro:id:`profiling_interval` and :bro:id:`expensive_profiling_multiple`. +## :bro:id:`profiling_interval` and :bro:id:`expensive_profiling_multiple`. ## ## f: The profiling file. ## @@ -6299,25 +6416,25 @@ event profiling_update%(f: file, expensive: bool%); ## Generated each time Bro's script interpreter opens a file. This event is ## triggered only for files opened via :bro:id:`open`, and in particular not for -## normal log files as created by a log writers. +## normal log files as created by log writers. ## ## f: The opened file. event file_opened%(f: file%); -## Generated for a received NetFlow v5 header. Bro's NetFlow processor raises this -## event whenever it either receives a NetFlow header on the port it's listening -## on, or reads one from a trace file. +## Generated for a received NetFlow v5 header. Bro's NetFlow processor raises +## this event whenever it either receives a NetFlow header on the port it's +## listening on, or reads one from a trace file. ## ## h: The parsed NetFlow header. ## ## .. bro:see:: netflow_v5_record event netflow_v5_header%(h: nf_v5_header%); -## Generated for a received NetFlow v5 record. Bro's NetFlow processor raises this -## event whenever it either receives a NetFlow record on the port it's listening -## on, or reads one from a trace file. +## Generated for a received NetFlow v5 record. Bro's NetFlow processor raises +## this event whenever it either receives a NetFlow record on the port it's +## listening on, or reads one from a trace file. ## -## h: The parsed NetFlow header. +## r: The parsed NetFlow record. ## ## .. bro:see:: netflow_v5_record event netflow_v5_record%(r: nf_v5_record%); @@ -6330,15 +6447,15 @@ event netflow_v5_record%(r: nf_v5_record%); ## ## msg: The message itself. ## -## location: A (potentially empty) string describing a location associated with the -## message. +## location: A (potentially empty) string describing a location associated with +## the message. ## ## .. bro:see:: reporter_warning reporter_error Reporter::info Reporter::warning ## Reporter::error ## -## .. note:: Bro will not call reporter events recursively. If the handler of any -## reporter event triggers a new reporter message itself, the output will go to -## ``stderr`` instead. +## .. note:: Bro will not call reporter events recursively. If the handler of +## any reporter event triggers a new reporter message itself, the output +## will go to ``stderr`` instead. event reporter_info%(t: time, msg: string, location: string%) &error_handler; ## Raised for warnings reported via Bro's reporter framework. Such messages may @@ -6349,15 +6466,15 @@ event reporter_info%(t: time, msg: string, location: string%) &error_handler; ## ## msg: The warning message. ## -## location: A (potentially empty) string describing a location associated with the -## warning. +## location: A (potentially empty) string describing a location associated with +## the warning. ## ## .. bro:see:: reporter_info reporter_error Reporter::info Reporter::warning ## Reporter::error ## -## .. note:: Bro will not call reporter events recursively. If the handler of any -## reporter event triggers a new reporter message itself, the output will go to -## ``stderr`` instead. +## .. note:: Bro will not call reporter events recursively. If the handler of +## any reporter event triggers a new reporter message itself, the output +## will go to ``stderr`` instead. event reporter_warning%(t: time, msg: string, location: string%) &error_handler; ## Raised for errors reported via Bro's reporter framework. Such messages may @@ -6368,15 +6485,15 @@ event reporter_warning%(t: time, msg: string, location: string%) &error_handler; ## ## msg: The error message. ## -## location: A (potentially empty) string describing a location associated with the -## error. +## location: A (potentially empty) string describing a location associated with +## the error. ## ## .. bro:see:: reporter_info reporter_warning Reporter::info Reporter::warning ## Reporter::error ## -## .. note:: Bro will not call reporter events recursively. If the handler of any -## reporter event triggers a new reporter message itself, the output will go to -## ``stderr`` instead. +## .. note:: Bro will not call reporter events recursively. If the handler of +## any reporter event triggers a new reporter message itself, the output +## will go to ``stderr`` instead. event reporter_error%(t: time, msg: string, location: string%) &error_handler; ## Raised for each policy script loaded by the script interpreter. From b66b022be0090b1327042953b49b3cbbdc5054f3 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 13 Jun 2012 12:21:16 -0500 Subject: [PATCH 388/651] Fix a warning message When building the docs, a WARNING message was visible. Reworded the doc comment to prevent this warning. --- scripts/base/frameworks/communication/main.bro | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/base/frameworks/communication/main.bro b/scripts/base/frameworks/communication/main.bro index b9b15bfd22..ceae357f78 100644 --- a/scripts/base/frameworks/communication/main.bro +++ b/scripts/base/frameworks/communication/main.bro @@ -11,7 +11,8 @@ export { ## The communication logging stream identifier. redef enum Log::ID += { LOG }; - ## Which interface to listen on (``0.0.0.0`` or ``[::]`` are wildcards). + ## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]`` + ## are wildcards. const listen_interface = 0.0.0.0 &redef; ## Which port to listen on. From 22fb039e8384448b94091407ef08c2403f75cfb5 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 13 Jun 2012 16:59:15 -0500 Subject: [PATCH 389/651] Improve an error message in ICMP analyzer. --- src/ICMP.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ICMP.cc b/src/ICMP.cc index b06c6440e1..b8ddb8a292 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -64,7 +64,8 @@ void ICMP_Analyzer::DeliverPacket(int len, const u_char* data, break; default: - reporter->InternalError("unexpected IP proto in ICMP analyzer"); + reporter->InternalError("unexpected IP proto in ICMP analyzer: %d", + ip->NextProto()); break; } From 22efa452796fff41e3ebf91d0e241f21bb7ae8e7 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Fri, 15 Jun 2012 10:48:22 -0400 Subject: [PATCH 390/651] Merging in latest changes from Bro master. --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- cmake | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aux/binpac b/aux/binpac index 6f43a8115d..b4094cb75e 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 6f43a8115d8e6483a50957c5d21c5d69270ab3aa +Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 diff --git a/aux/bro-aux b/aux/bro-aux index c6391412e9..2038e3de04 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit c6391412e902e896836450ab98910309b2ca2d9b +Subproject commit 2038e3de042115c3caa706426e16c830c1fd1e9e diff --git a/aux/broccoli b/aux/broccoli index 0d139c09d5..4e17842743 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 0d139c09d5a9c8623ecc2a5f395178f0ddcd7e16 +Subproject commit 4e17842743fef8df6abf0588c7ca86c6937a2b6d diff --git a/aux/broctl b/aux/broctl index 880f3e48d3..892b60edb9 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 880f3e48d33bb28d17184656f858a4a0e2e1574c +Subproject commit 892b60edb967bb456872638f22ba994e84530137 diff --git a/cmake b/cmake index 2a72c5e08e..96f3d92aca 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 2a72c5e08e018cf632033af3920432d5f684e130 +Subproject commit 96f3d92acadbe1ae64f410e974c5ff503903394b From d3bb4617e96a8ec725e8d103b35813ff5d48f58a Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Fri, 15 Jun 2012 11:21:24 -0400 Subject: [PATCH 391/651] Configuration logic - if libcurl is found, enable elasticsearch plugin. --- CMakeLists.txt | 1 + configure | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 404cdfeeb5..4b1cccf8dc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -128,6 +128,7 @@ if (CURL_FOUND) set(USE_LIBCURL true) include_directories(BEFORE ${CURL_INCLUDE_DIR}) list(APPEND OPTLIBS ${CURL_LIBRARIES}) + set(INSTALL_ELASTICSEARCH true) endif() if (ENABLE_PERFTOOLS_DEBUG) diff --git a/configure b/configure index 7ea5613a6d..801fb1e801 100755 --- a/configure +++ b/configure @@ -35,7 +35,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --disable-auxtools don't build or install auxiliary tools --disable-python don't try to build python bindings for broccoli --disable-ruby don't try to build ruby bindings for broccoli - --enable-elasticsearch build the elasticsearch writer Required Packages in Non-Standard Locations: --with-openssl=PATH path to OpenSSL install root @@ -158,9 +157,6 @@ while [ $# -ne 0 ]; do --disable-auxtools) append_cache_entry INSTALL_AUX_TOOLS BOOL false ;; - --enable-elasticsearch) - append_cache_entry INSTALL_ELASTICSEARCH BOOL true - ;; --disable-python) append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true ;; From 350c93c136ea0d13273c68ecac31835279e4f1aa Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 15 Jun 2012 11:22:15 -0500 Subject: [PATCH 392/651] Remove an old, unused diff canonifier. --- testing/scripts/doc/example-diff-canonifier.py | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100755 testing/scripts/doc/example-diff-canonifier.py diff --git a/testing/scripts/doc/example-diff-canonifier.py b/testing/scripts/doc/example-diff-canonifier.py deleted file mode 100755 index e0b8c110cc..0000000000 --- a/testing/scripts/doc/example-diff-canonifier.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/python - -import sys -import re - -# MutableVal derivatives (e.g. sets/tables) don't always generate the same -# ordering in the reST documentation, so just don't bother diffing -# the places where example.bro uses them. - -RE1 = "\d*/tcp" -RE2 = "tcp port \d*" - -for line in sys.stdin.readlines(): - if re.search(RE1, line) is None and re.search(RE2, line) is None: - print line From 0bb8b69c95191b7e25296770010b201f67f9cc9c Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 15 Jun 2012 16:30:54 -0400 Subject: [PATCH 393/651] Reworked bulk operation string construction to use ODesc and added json escaping. --- src/logging/writers/ElasticSearch.cc | 410 ++++++++++++--------------- src/logging/writers/ElasticSearch.h | 20 +- 2 files changed, 187 insertions(+), 243 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 1817ce63ef..d8c4bee306 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -8,6 +8,7 @@ #include #include "util.h" +#include "BroString.h" #include "NetVar.h" #include "threading/SerialTypes.h" @@ -22,38 +23,16 @@ using namespace writer; using threading::Value; using threading::Field; -#define MAX_EVENT_SIZE 1024 - ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) { cluster_name_len = BifConst::LogElasticSearch::cluster_name->Len(); cluster_name = new char[cluster_name_len + 1]; memcpy(cluster_name, BifConst::LogElasticSearch::cluster_name->Bytes(), cluster_name_len); cluster_name[cluster_name_len] = 0; - - server_host_len = BifConst::LogElasticSearch::server_host->Len(); - server_host = new char[server_host_len + 1]; - memcpy(server_host, BifConst::LogElasticSearch::server_host->Bytes(), server_host_len); - server_host[server_host_len] = 0; - - index_name_len = BifConst::LogElasticSearch::index_name->Len(); - index_name = new char[index_name_len + 1]; - memcpy(index_name, BifConst::LogElasticSearch::index_name->Bytes(), index_name_len); - index_name[index_name_len] = 0; - - type_prefix_len = BifConst::LogElasticSearch::type_prefix->Len(); - type_prefix = new char[type_prefix_len + 1]; - memcpy(type_prefix, BifConst::LogElasticSearch::type_prefix->Bytes(), type_prefix_len); - type_prefix[type_prefix_len] = 0; - - server_port = BifConst::LogElasticSearch::server_port; - batch_size = BifConst::LogElasticSearch::batch_size; - - buffer = (char *)safe_malloc(MAX_EVENT_SIZE * batch_size); - current_offset = 0; - buffer[current_offset] = 0; + + buffer.Clear(); counter = 0; - + curl_handle = HTTPSetup(); curl_result = new char[1024]; } @@ -61,21 +40,17 @@ ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) ElasticSearch::~ElasticSearch() { delete [] cluster_name; - delete [] server_host; - delete [] index_name; - delete [] type_prefix; - delete [] buffer; } bool ElasticSearch::DoInit(string path, int num_fields, const Field* const * fields) { - //TODO: Determine what, if anything, needs to be done here. + //TODO: Determine what, if anything, needs to be done here. return true; } bool ElasticSearch::DoFlush() { - //TODO: Send flush command to ElasticSearch + //TODO: Send flush command to ElasticSearch return true; } @@ -84,174 +59,155 @@ bool ElasticSearch::DoFinish() return WriterBackend::DoFinish(); } -bool ElasticSearch::BatchIndex() -{ - return HTTPSend(); -} - -char* ElasticSearch::FieldToString(Value* val, const Field* field) -{ - char* result = new char[MAX_EVENT_SIZE]; - - switch ( val->type ) { - - // ElasticSearch defines bools as: 0 == false, everything else == true. So we treat it as an int. - case TYPE_BOOL: - case TYPE_INT: - sprintf(result, "%d", (int) val->val.int_val); return result; - - case TYPE_COUNT: - case TYPE_COUNTER: - sprintf(result, "%d", (int) val->val.uint_val); return result; - - case TYPE_PORT: - sprintf(result, "%d", (int) val->val.port_val.port); return result; - - case TYPE_SUBNET: - sprintf(result, "\"%s\"", Render(val->val.subnet_val).c_str()); return result; - - case TYPE_ADDR: - sprintf(result, "\"%s\"", Render(val->val.addr_val).c_str()); return result; - - case TYPE_INTERVAL: - case TYPE_TIME: - sprintf(result, "%"PRIu64"", (uint64) (val->val.double_val * 1000)); return result; - case TYPE_DOUBLE: - sprintf(result, "%s", Render(val->val.double_val).c_str()); return result; - - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_FUNC: - { - int size = val->val.string_val->size(); - const char* data = val->val.string_val->data(); - - if ( ! size ) - return 0; - sprintf(result, "\"%s\"", data); return result; - } - - case TYPE_TABLE: - { - char* tmp = new char[MAX_EVENT_SIZE]; - int tmp_offset = 0; - strcpy(tmp, "{"); - tmp_offset = 1; - bool result_seen = false; - for ( int j = 0; j < val->val.set_val.size; j++ ) +bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) { - char* sub_field = FieldToString(val->val.set_val.vals[j], field); - if ( sub_field ){ - - if ( result_seen ){ - strcpy(tmp + tmp_offset, ","); - tmp_offset += 1; - } - else - result_seen = true; - - sprintf(tmp + tmp_offset, "\"%s\":%s", field->name.c_str(), sub_field); - tmp_offset = strlen(tmp); - } + switch ( val->type ) + { + // ElasticSearch defines bools as: 0 == false, everything else == true. So we treat it as an int. + case TYPE_BOOL: + case TYPE_INT: + buffer.Add(val->val.int_val); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + buffer.Add(val->val.uint_val); + break; + + case TYPE_PORT: + buffer.Add(val->val.port_val.port); + break; + + case TYPE_SUBNET: + buffer.AddRaw("\"", 1); + buffer.Add(Render(val->val.subnet_val)); + buffer.AddRaw("\"", 1); + break; + + case TYPE_ADDR: + buffer.AddRaw("\"", 1); + buffer.Add(Render(val->val.addr_val)); + buffer.AddRaw("\"", 1); + break; + + case TYPE_DOUBLE: + buffer.Add(val->val.double_val); + break; + + case TYPE_INTERVAL: + case TYPE_TIME: + // ElasticSearch uses milliseconds for timestamps + buffer.Add((uint64_t) (val->val.double_val * 1000)); + break; + + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_FUNC: + { + buffer.AddRaw("\"", 1); + for ( uint i = 0; i < val->val.string_val->size(); ++i ) + { + char c = val->val.string_val->data()[i]; + // HTML entity encode special characters. + if ( c < 32 || c > 126 || c == '\n' || c == '"' || c == '\'' || c == '\\' ) + { + buffer.AddRaw("&#", 2); + buffer.Add((uint8_t) c); + buffer.AddRaw(";", 1); + } + else + buffer.AddRaw(&c, 1); + } + buffer.AddRaw("\"", 1); + break; + } + + case TYPE_TABLE: + { + buffer.AddRaw("[", 1); + for ( int j = 0; j < val->val.set_val.size; j++ ) + { + if ( j > 0 ) + buffer.AddRaw(",", 1); + AddFieldValueToBuffer(val->val.set_val.vals[j], field); + } + buffer.AddRaw("]", 1); + break; + } + + case TYPE_VECTOR: + { + buffer.AddRaw("[", 1); + for ( int j = 0; j < val->val.vector_val.size; j++ ) + { + if ( j > 0 ) + buffer.AddRaw(",", 1); + AddFieldValueToBuffer(val->val.vector_val.vals[j], field); + } + buffer.AddRaw("]", 1); + break; + } + + default: + return false; + } + return true; } - strcpy(tmp + tmp_offset, "}"); - tmp_offset += 1; - sprintf(result, "%s", tmp); - return result; - } - - case TYPE_VECTOR: - { - char* tmp = new char[MAX_EVENT_SIZE]; - int tmp_offset = 0; - strcpy(tmp, "{"); - tmp_offset = 1; - bool result_seen = false; - for ( int j = 0; j < val->val.vector_val.size; j++ ) + +bool ElasticSearch::AddFieldToBuffer(Value* val, const Field* field) { - char* sub_field = FieldToString(val->val.vector_val.vals[j], field); - if ( sub_field ){ - - if ( result_seen ){ - strcpy(tmp + tmp_offset, ","); - tmp_offset += 1; - } - else - result_seen = true; - - sprintf(tmp + tmp_offset, "\"%s\":%s", field->name.c_str(), sub_field); - tmp_offset = strlen(tmp); - } - } - strcpy(tmp + tmp_offset, "}"); - tmp_offset += 1; - sprintf(result, "%s", tmp); - return result; - } - - default: - { - return (char *)"{}"; - } - - } - -} - -char* ElasticSearch::AddFieldToBuffer(Value* val, const Field* field) - { - if ( ! val->present ) - return 0; + if ( ! val->present ) + return false; - char* result = new char[MAX_EVENT_SIZE]; - sprintf(result, "\"%s\":%s", field->name.c_str(), FieldToString(val, field)); - return result; - + buffer.AddRaw("\"", 1); + buffer.Add(field->name); + buffer.AddRaw("\":", 2); + AddFieldValueToBuffer(val, field); + return true; } bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, Value** vals) { - // Our action line looks like: - // {"index":{"_index":"$index_name","_type":"$type_prefix$path"}}\n{ - - bool resultSeen = false; - - for ( int i = 0; i < num_fields; i++ ) + // Our action line looks like: + // {"index":{"_index":"$index_name","_type":"$type_prefix$path"}}\n + if ( counter == 0 ) { - char* result = AddFieldToBuffer(vals[i], fields[i]); - if ( result ) { - if ( ! resultSeen ) { - current_offset += sprintf(buffer + current_offset, "{\"index\":{\"_index\":\"%s\",\"_type\":\"%s%s\"}\n{", index_name, type_prefix, Path().c_str()); - resultSeen = true; - } - else { - strcat(buffer, ","); - current_offset += 1; - } - strcat(buffer, result); - current_offset += strlen(result); + buffer.AddRaw("{\"index\":{\"_index\":\"", 20); + buffer.AddN((const char*) BifConst::LogElasticSearch::index_name->Bytes(), + BifConst::LogElasticSearch::index_name->Len()); + buffer.AddRaw("\",\"_type\":\"", 11); + buffer.AddN((const char*) BifConst::LogElasticSearch::type_prefix->Bytes(), + BifConst::LogElasticSearch::type_prefix->Len()); + buffer.Add(Path()); + buffer.AddRaw("\"}\n", 3); + } + + for ( int i = 0; i < num_fields; i++ ) + { + if ( i == 0 ) + buffer.AddRaw("{", 1); + else if ( buffer.Bytes()[buffer.Len()] != ',' && vals[i]->present ) + buffer.AddRaw(",", 1); + AddFieldToBuffer(vals[i], fields[i]); + } + + buffer.AddRaw("}\n", 2); + + counter++; + if ( counter >= BifConst::LogElasticSearch::batch_size ) + { + HTTPSend(); + buffer.Clear(); + counter = 0; } - } - - if ( resultSeen ) { - strcat(buffer, "}\n"); - current_offset += 2; - counter += 1; - if ( counter >= batch_size ){ - BatchIndex(); - current_offset = 0; - buffer[current_offset] = 0; - counter = 0; - } - } return true; } bool ElasticSearch::DoRotate(string rotated_path, double open, double close, bool terminating) { - //TODO: Determine what, if anything, needs to be done here. + //TODO: Determine what, if anything, needs to be done here. return true; } @@ -264,52 +220,54 @@ bool ElasticSearch::DoSetBuf(bool enabled) // HTTP Functions start here. CURL* ElasticSearch::HTTPSetup() -{ - char URL[2048]; - CURL* handle; - struct curl_slist *headers=NULL; - - handle = curl_easy_init(); - if ( ! handle ) - return handle; - - sprintf(URL, "http://%s:%d/_bulk", server_host, (int) server_port); - curl_easy_setopt(handle, CURLOPT_URL, URL); + { + const char *URL = fmt("http://%s:%d/_bulk", BifConst::LogElasticSearch::server_host->CheckString(), + (int) BifConst::LogElasticSearch::server_port);; + CURL* handle; + struct curl_slist *headers=NULL; + + handle = curl_easy_init(); + if ( ! handle ) + return handle; + + //sprintf(URL, "http://%s:%d/_bulk", BifConst::LogElasticSearch::server_host->CheckString(), (int) BifConst::LogElasticSearch::server_port); + curl_easy_setopt(handle, CURLOPT_URL, URL); + + headers = curl_slist_append(NULL, "Content-Type: text/json; charset=utf-8"); + curl_easy_setopt(handle, CURLOPT_HTTPHEADER, headers); + + curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, &logging::writer::ElasticSearch::HTTPReceive); // This gets called with the result. + curl_easy_setopt(handle, CURLOPT_POST, 1); // All requests are POSTs + + // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. The best (only?) way to disable that is to + // just use HTTP 1.0 + curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); + return handle; + } - headers = curl_slist_append(NULL, "Content-Type: text/json; charset=utf-8"); - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, headers); - - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, &logging::writer::ElasticSearch::HTTPReceive); // This gets called with the result. - curl_easy_setopt(handle, CURLOPT_POST, 1); // All requests are POSTs - - // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. The best (only?) way to disable that is to - // just use HTTP 1.0 - curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); - return handle; - -} - -bool ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata){ - //TODO: Do some verification on the result? - return true; -} +bool ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata) + { + //TODO: Do some verification on the result? + return true; + } bool ElasticSearch::HTTPSend(){ - CURLcode return_code; - - curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, curl_result); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE, current_offset); - - return_code = curl_easy_perform(curl_handle); - switch(return_code) { - case CURLE_COULDNT_CONNECT: - case CURLE_COULDNT_RESOLVE_HOST: - case CURLE_WRITE_ERROR: - return false; - default: - return true; - } + CURLcode return_code; + + curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, curl_result); + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE, buffer.Len()); + + return_code = curl_easy_perform(curl_handle); + switch(return_code) { + case CURLE_COULDNT_CONNECT: + case CURLE_COULDNT_RESOLVE_HOST: + case CURLE_WRITE_ERROR: + return false; + + default: + return true; + } } #endif diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h index ad3729f6da..21e9bdfe08 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/ElasticSearch.h @@ -34,17 +34,15 @@ protected: virtual bool DoFinish(); private: - char* AddFieldToBuffer(threading::Value* val, const threading::Field* field); - char* FieldToString(threading::Value* val, const threading::Field* field); - bool BatchIndex(); + bool AddFieldToBuffer(threading::Value* val, const threading::Field* field); + bool AddFieldValueToBuffer(threading::Value* val, const threading::Field* field); CURL* HTTPSetup(); bool HTTPReceive(void* ptr, int size, int nmemb, void* userdata); bool HTTPSend(); // Buffers, etc. - char* buffer; - int current_offset; + ODesc buffer; uint64 counter; CURL* curl_handle; @@ -54,19 +52,7 @@ private: char* cluster_name; int cluster_name_len; - char* server_host; - int server_host_len; - - uint64 server_port; - - char* index_name; - int index_name_len; - - char* type_prefix; - int type_prefix_len; - uint64 batch_size; - }; } From 19cf93be69dc76cfcac5141c9a3f4b8deb6ef88d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 15 Jun 2012 14:45:07 -0700 Subject: [PATCH 394/651] Moving my todos over to the tracker ticket. --- scripts/base/frameworks/tunnels/main.bro | 9 --------- src/Analyzer.h | 4 +--- src/Sessions.cc | 3 +-- src/TunnelEncapsulation.h | 8 -------- src/event.bif | 1 - 5 files changed, 2 insertions(+), 23 deletions(-) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 0fd37e8e59..1f0258e0a3 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -18,8 +18,6 @@ export { ## A tunnel connection has closed. CLOSE, ## No new connections over a tunnel happened in the past day. - ## TODO-Jon: Where is the "past day" coming from? Should be an - ## option. EXPIRE, }; @@ -72,7 +70,6 @@ export { ## Currently active tunnels. That is, tunnels for which new, encapsulated ## connections have been seen in the last day. - ## TODO-Jon: Do we we need the &synchronized here? global active: table[conn_id] of Info = table() &synchronized &read_expire=24hrs &expire_func=expire; } @@ -132,12 +129,6 @@ event new_connection(c: connection) &priority=5 event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 { - ## TODO-Jon: Not sure I understand this. Shouldn't c$tunnel already be - ## registered? And what if a layer goes way, does that need to be - ## removed here? Or is that done separately? - ## - ## Also, conn/main.bro has a tunnel_changed handler at the same - ## priority that *sets* c$tunnel. That's seems undefine behaviour. if ( c?$tunnel ) register_all(c$tunnel); diff --git a/src/Analyzer.h b/src/Analyzer.h index 6fd1b3b444..6ccd7648d3 100644 --- a/src/Analyzer.h +++ b/src/Analyzer.h @@ -217,9 +217,7 @@ public: // Return whether the analyzer previously called ProtocolConfirmation() // at least once before. - // - // TODO-Jon: Why virtual? - virtual bool ProtocolConfirmed() const + bool ProtocolConfirmed() const { return protocol_confirmed; } // Report that we found a significant protocol violation which might diff --git a/src/Sessions.cc b/src/Sessions.cc index 7f62f42c7b..c754a14698 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -546,7 +546,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, Weird("truncated_inner_IP", ip_hdr, encapsulation); else if ( result > 0 ) - Weird("inner_IP_payload_mismatch", ip_hdr, encapsulation); + Weird("inner_IP_payload_length_mismatch", ip_hdr, encapsulation); if ( result != 0 ) { @@ -706,7 +706,6 @@ void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, if ( hdr ) fake_hdr.ts = hdr->ts; else - // TODO-Jon: use network_time? fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; const u_char* pkt = 0; diff --git a/src/TunnelEncapsulation.h b/src/TunnelEncapsulation.h index 01819c0f20..9dcf134536 100644 --- a/src/TunnelEncapsulation.h +++ b/src/TunnelEncapsulation.h @@ -114,9 +114,6 @@ protected: /** * Abstracts an arbitrary amount of nested tunneling. */ - -// TODO-Jon: Rename EncapsulationChain or EncapsulationStack? I'd prefer to -// have notion in there that this covers multiple levels of encapsulations. class Encapsulation { public: Encapsulation() : conns(0) @@ -130,11 +127,6 @@ public: conns = 0; } - // TODO-Jon: I don't like the ptr-version of the ctor. When reading - // the code using that, I can't tell what it does with the pointer - // (i.e., that it deep-copied the object). Can we use just the - // reference version above? That may mean more "if ( not null )" at - // the caller end though. Encapsulation(const Encapsulation* other) { if ( other && other->conns ) diff --git a/src/event.bif b/src/event.bif index 8d4b1d28a1..0531bb8a18 100644 --- a/src/event.bif +++ b/src/event.bif @@ -146,7 +146,6 @@ event new_connection%(c: connection%); ## or from the outer encapsulation changing. Note that the connection's ## *tunnel* field is NOT automatically assigned to the new encapsulation value ## internally after this event is raised. -## TODO-Jon: I'm not sure what the last sentence is supposed to tell me? ## ## c: The connection whose tunnel/encapsulation changed. ## From d727b4f68a711f07b5f1ce170e9f045080b84d47 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 15 Jun 2012 15:05:42 -0700 Subject: [PATCH 395/651] Updating DataSeries baselines. --- .../conn.ds.txt | 72 ++++++++++--------- .../conn.ds.txt | 72 ++++++++++--------- .../http.ds.txt | 24 +++---- 3 files changed, 86 insertions(+), 82 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt index 1d7cba3b3c..620babdd4c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt @@ -28,6 +28,7 @@ + @@ -48,40 +49,41 @@ + # Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -1300475167096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 -1300475167097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 -1300475167099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 -1300475168853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 435 0 89 SHR F 0 Cd 0 0 1 117 -1300475168854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 420 0 99 SHR F 0 Cd 0 0 1 127 -1300475168854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 391 0 183 SHR F 0 Cd 0 0 1 211 -1300475168857956 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 317 0 89 SHR F 0 Cd 0 0 1 117 -1300475168858306 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 343 0 99 SHR F 0 Cd 0 0 1 127 -1300475168858713 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 375 0 183 SHR F 0 Cd 0 0 1 211 -1300475168891644 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 339 0 89 SHR F 0 Cd 0 0 1 117 -1300475168892037 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 334 0 99 SHR F 0 Cd 0 0 1 127 -1300475168892414 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 420 0 183 SHR F 0 Cd 0 0 1 211 -1300475168893988 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 384 0 89 SHR F 0 Cd 0 0 1 117 -1300475168894422 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 316 0 99 SHR F 0 Cd 0 0 1 127 -1300475168894787 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 422 0 183 SHR F 0 Cd 0 0 1 211 -1300475168901749 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 402 0 131 SHR F 0 Cd 0 0 1 159 -1300475168902195 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 374 0 198 SHR F 0 Cd 0 0 1 226 -1300475169899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 -1300475170862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2613016 350 0 S0 F 0 D 7 546 0 0 -1300475171675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 100096 66 0 S0 F 0 D 2 162 0 0 -1300475171677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 100020 66 0 S0 F 0 D 2 122 0 0 -1300475173116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 99801 66 0 S0 F 0 D 2 162 0 0 -1300475173117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 99848 66 0 S0 F 0 D 2 122 0 0 -1300475173153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 -1300475168859163 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 215893 1130 734 S1 F 1130 ShACad 4 216 4 950 -1300475168652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 61328 0 350 OTH F 0 CdA 1 52 1 402 -1300475168895267 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 227283 1178 734 S1 F 1178 ShACad 4 216 4 950 -1300475168902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 120040 534 412 S1 F 534 ShACad 3 164 3 576 -1300475168892936 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 229603 1148 734 S1 F 1148 ShACad 4 216 4 950 -1300475168855305 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 218501 1171 733 S1 F 1171 ShACad 4 216 4 949 -1300475168892913 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 220960 1137 733 S1 F 1137 ShACad 4 216 4 949 -1300475169780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 -1300475168724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 119904 525 232 S1 F 525 ShACad 3 164 3 396 -1300475168855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 219720 1125 734 S1 F 1125 ShACad 4 216 4 950 +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +1300475167096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 +1300475167097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 +1300475167099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 +1300475168853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 435 0 89 SHR F 0 Cd 0 0 1 117 +1300475168854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 420 0 99 SHR F 0 Cd 0 0 1 127 +1300475168854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 391 0 183 SHR F 0 Cd 0 0 1 211 +1300475168857956 fRFu0wcOle6 141.142.220.118 32902 141.142.2.2 53 udp dns 317 0 89 SHR F 0 Cd 0 0 1 117 +1300475168858306 qSsw6ESzHV4 141.142.220.118 59816 141.142.2.2 53 udp dns 343 0 99 SHR F 0 Cd 0 0 1 127 +1300475168858713 iE6yhOq3SF 141.142.220.118 59714 141.142.2.2 53 udp dns 375 0 183 SHR F 0 Cd 0 0 1 211 +1300475168891644 qCaWGmzFtM5 141.142.220.118 58206 141.142.2.2 53 udp dns 339 0 89 SHR F 0 Cd 0 0 1 117 +1300475168892037 70MGiRM1Qf4 141.142.220.118 38911 141.142.2.2 53 udp dns 334 0 99 SHR F 0 Cd 0 0 1 127 +1300475168892414 h5DsfNtYzi1 141.142.220.118 59746 141.142.2.2 53 udp dns 420 0 183 SHR F 0 Cd 0 0 1 211 +1300475168893988 c4Zw9TmAE05 141.142.220.118 45000 141.142.2.2 53 udp dns 384 0 89 SHR F 0 Cd 0 0 1 117 +1300475168894422 EAr0uf4mhq 141.142.220.118 48479 141.142.2.2 53 udp dns 316 0 99 SHR F 0 Cd 0 0 1 127 +1300475168894787 GvmoxJFXdTa 141.142.220.118 48128 141.142.2.2 53 udp dns 422 0 183 SHR F 0 Cd 0 0 1 211 +1300475168901749 slFea8xwSmb 141.142.220.118 56056 141.142.2.2 53 udp dns 402 0 131 SHR F 0 Cd 0 0 1 159 +1300475168902195 UfGkYA2HI2g 141.142.220.118 55092 141.142.2.2 53 udp dns 374 0 198 SHR F 0 Cd 0 0 1 226 +1300475169899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 +1300475170862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2613016 350 0 S0 F 0 D 7 546 0 0 +1300475171675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 100096 66 0 S0 F 0 D 2 162 0 0 +1300475171677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 100020 66 0 S0 F 0 D 2 122 0 0 +1300475173116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 99801 66 0 S0 F 0 D 2 162 0 0 +1300475173117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 99848 66 0 S0 F 0 D 2 122 0 0 +1300475173153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 +1300475168859163 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 tcp 215893 1130 734 S1 F 1130 ShACad 4 216 4 950 +1300475168652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 61328 0 350 OTH F 0 CdA 1 52 1 402 +1300475168895267 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 tcp 227283 1178 734 S1 F 1178 ShACad 4 216 4 950 +1300475168902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 120040 534 412 S1 F 534 ShACad 3 164 3 576 +1300475168892936 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 tcp 229603 1148 734 S1 F 1148 ShACad 4 216 4 950 +1300475168855305 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 tcp 218501 1171 733 S1 F 1171 ShACad 4 216 4 949 +1300475168892913 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 tcp 220960 1137 733 S1 F 1137 ShACad 4 216 4 949 +1300475169780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 +1300475168724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 119904 525 232 S1 F 525 ShACad 3 164 3 396 +1300475168855330 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 tcp 219720 1125 734 S1 F 1125 ShACad 4 216 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt index 3cafa078de..c20e38d4f3 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -28,6 +28,7 @@ + @@ -48,40 +49,41 @@ + # Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes -1300475167.096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 -1300475167.097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 -1300475167.099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 -1300475168.853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 0 89 SHR F 0 Cd 0 0 1 117 -1300475168.854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 0 99 SHR F 0 Cd 0 0 1 127 -1300475168.854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 0 183 SHR F 0 Cd 0 0 1 211 -1300475168.857956 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 0 89 SHR F 0 Cd 0 0 1 117 -1300475168.858306 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 0 99 SHR F 0 Cd 0 0 1 127 -1300475168.858713 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 0 183 SHR F 0 Cd 0 0 1 211 -1300475168.891644 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 0 89 SHR F 0 Cd 0 0 1 117 -1300475168.892037 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 0 99 SHR F 0 Cd 0 0 1 127 -1300475168.892414 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 0 183 SHR F 0 Cd 0 0 1 211 -1300475168.893988 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 0 89 SHR F 0 Cd 0 0 1 117 -1300475168.894422 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 0 99 SHR F 0 Cd 0 0 1 127 -1300475168.894787 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 0 183 SHR F 0 Cd 0 0 1 211 -1300475168.901749 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 0 131 SHR F 0 Cd 0 0 1 159 -1300475168.902195 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 0 198 SHR F 0 Cd 0 0 1 226 -1300475169.899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 85 0 0 -1300475170.862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2.613017 350 0 S0 F 0 D 7 546 0 0 -1300475171.675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0.100096 66 0 S0 F 0 D 2 162 0 0 -1300475171.677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0.100021 66 0 S0 F 0 D 2 122 0 0 -1300475173.116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0.099801 66 0 S0 F 0 D 2 162 0 0 -1300475173.117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0.099849 66 0 S0 F 0 D 2 122 0 0 -1300475173.153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0.000000 0 0 S0 F 0 D 1 78 0 0 -1300475168.859163 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 0.215893 1130 734 S1 F 1130 ShACad 4 216 4 950 -1300475168.652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0.061329 0 350 OTH F 0 CdA 1 52 1 402 -1300475168.895267 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 0.227284 1178 734 S1 F 1178 ShACad 4 216 4 950 -1300475168.902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0.120041 534 412 S1 F 534 ShACad 3 164 3 576 -1300475168.892936 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 0.229603 1148 734 S1 F 1148 ShACad 4 216 4 950 -1300475168.855305 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 0.218501 1171 733 S1 F 1171 ShACad 4 216 4 949 -1300475168.892913 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 0.220961 1137 733 S1 F 1137 ShACad 4 216 4 949 -1300475169.780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0.000000 0 0 OTH F 0 h 0 0 1 48 -1300475168.724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0.119905 525 232 S1 F 525 ShACad 3 164 3 396 -1300475168.855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0.219720 1125 734 S1 F 1125 ShACad 4 216 4 950 +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +1300475167.096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 +1300475167.097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 +1300475167.099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 +1300475168.853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.857956 fRFu0wcOle6 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.858306 qSsw6ESzHV4 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.858713 iE6yhOq3SF 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.891644 qCaWGmzFtM5 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.892037 70MGiRM1Qf4 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.892414 h5DsfNtYzi1 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.893988 c4Zw9TmAE05 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.894422 EAr0uf4mhq 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.894787 GvmoxJFXdTa 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.901749 slFea8xwSmb 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 0 131 SHR F 0 Cd 0 0 1 159 +1300475168.902195 UfGkYA2HI2g 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 0 198 SHR F 0 Cd 0 0 1 226 +1300475169.899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 85 0 0 +1300475170.862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2.613017 350 0 S0 F 0 D 7 546 0 0 +1300475171.675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0.100096 66 0 S0 F 0 D 2 162 0 0 +1300475171.677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0.100021 66 0 S0 F 0 D 2 122 0 0 +1300475173.116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0.099801 66 0 S0 F 0 D 2 162 0 0 +1300475173.117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0.099849 66 0 S0 F 0 D 2 122 0 0 +1300475173.153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0.000000 0 0 S0 F 0 D 1 78 0 0 +1300475168.859163 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 tcp 0.215893 1130 734 S1 F 1130 ShACad 4 216 4 950 +1300475168.652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0.061329 0 350 OTH F 0 CdA 1 52 1 402 +1300475168.895267 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 tcp 0.227284 1178 734 S1 F 1178 ShACad 4 216 4 950 +1300475168.902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0.120041 534 412 S1 F 534 ShACad 3 164 3 576 +1300475168.892936 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 tcp 0.229603 1148 734 S1 F 1148 ShACad 4 216 4 950 +1300475168.855305 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 tcp 0.218501 1171 733 S1 F 1171 ShACad 4 216 4 949 +1300475168.892913 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 tcp 0.220961 1137 733 S1 F 1137 ShACad 4 216 4 949 +1300475169.780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0.000000 0 0 OTH F 0 h 0 0 1 48 +1300475168.724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0.119905 525 232 S1 F 525 ShACad 3 164 3 396 +1300475168.855330 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 tcp 0.219720 1125 734 S1 F 1125 ShACad 4 216 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt index adb7bb3f7b..ae62fbec3d 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt @@ -66,16 +66,16 @@ # Extent, type='http' ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file 1300475168.843894 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 -1300475168.975800 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475168.976327 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475168.979160 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.012666 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.012730 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.014860 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.975800 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.976327 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.979160 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.012666 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.012730 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.014860 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 1300475169.022665 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 0 0 0 304 Not Modified 0 -1300475169.036294 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.036798 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.039923 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.074793 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.074938 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 -1300475169.075065 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.036294 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.036798 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.039923 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.074793 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.074938 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.075065 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 From 98087384122217a13af42c378a1b064e8aafb408 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 15 Jun 2012 15:24:03 -0700 Subject: [PATCH 396/651] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 4697bf4c80..5856453712 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 4697bf4c8046a3ab7d5e00e926c5db883cb44664 +Subproject commit 585645371256e8ec028cabae24c5f4a2108546d2 From c7c3ff7af9fb1ae71c66cba892f5241043cb5b39 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 15 Jun 2012 16:01:59 -0700 Subject: [PATCH 397/651] Adding a SOCKS test case. However, I'm not sure the output is right. --- scripts/base/protocols/socks/main.bro | 101 ------------------ .../Baseline/core.tunnels.socks/conn.log | 8 ++ .../Baseline/core.tunnels.socks/http.log | 8 ++ .../btest/Baseline/core.tunnels.socks/output | 11 ++ .../Baseline/core.tunnels.socks/tunnel.log | 9 ++ testing/btest/Traces/tunnels/socks.pcap | Bin 0 -> 5446 bytes testing/btest/core/tunnels/socks.bro | 19 ++++ 7 files changed, 55 insertions(+), 101 deletions(-) create mode 100644 testing/btest/Baseline/core.tunnels.socks/conn.log create mode 100644 testing/btest/Baseline/core.tunnels.socks/http.log create mode 100644 testing/btest/Baseline/core.tunnels.socks/output create mode 100644 testing/btest/Baseline/core.tunnels.socks/tunnel.log create mode 100644 testing/btest/Traces/tunnels/socks.pcap create mode 100644 testing/btest/core/tunnels/socks.bro diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro index bd27f4fb85..54d181e43e 100644 --- a/scripts/base/protocols/socks/main.bro +++ b/scripts/base/protocols/socks/main.bro @@ -13,104 +13,3 @@ event socks_request(c: connection, request_type: count, dstaddr: addr, dstname: { Tunnel::register([$cid=c$id, $tunnel_type=Tunnel::SOCKS, $uid=c$uid]); } - -# -#global output = open_log_file("socks"); -# -#type socks_conn: record { -# id: conn_id; -# t: time; -# req: socks_request_type &optional; -# dstaddr: addr &optional; -# dstname: string &optional; -# p: port &optional; -# user: string &optional; -# service: string &optional; -# variant: string &default = "SOCKS v4"; -# granted: string &default = "no-reply"; -#}; -# -# -#global conns: table[conn_id] of socks_conn; -#global proxies: set[addr] &read_expire = 24hrs; -# -#event socks_request(c: connection, t: socks_request_type, dstaddr: addr, dstname: string, p: port, user: string) -# { -# local id = c$id; -# -# local sc: socks_conn; -# sc$id = id; -# sc$t = c$start_time; -# sc$req = t; -# -# if ( dstaddr != 0.0.0.0 ) -# sc$dstaddr = dstaddr; -# -# if ( dstname != "" ) -# sc$dstname = dstname; -# -# if ( p != 0/tcp ) -# sc$p = p; -# -# if ( user != "" ) -# sc$user = user; -# -# conns[id] = sc; -# } -# -#event socks_reply(c: connection, granted: bool, dst: addr, p: port) -# { -# local id = c$id; -# local sc: socks_conn; -# -# if ( id in conns ) -# sc = conns[id]; -# else -# { -# sc$id = id; -# sc$t = c$start_time; -# conns[id] = sc; -# } -# -# sc$granted = granted ? "ok" : "denied"; -# -# local proxy = c$id$resp_h; -# -# if ( proxy !in proxies ) -# { -# NOTICE([$note=SOCKSProxy, $src=proxy, $sub=sc$variant, -# $msg=fmt("SOCKS proxy seen at %s (%s)", proxy, sc$variant)]); -# add proxies[proxy]; -# } -# } -# -#function print_conn(sc: socks_conn) -# { -# local req = ""; -# if ( sc?$req ) -# { -# if ( sc$req == SOCKS_CONNECTION ) -# req = "relay-to"; -# if ( sc$req == SOCKS_PORT ) -# req = "bind-port"; -# } -# -# local p = sc?$p ? fmt("%s", sc$p) : ""; -# -# local dest = sc?$dstaddr -# ? (fmt("%s:%s%s", sc$dstaddr, p, (sc?$dstname ? fmt(" (%s)", sc$dstname) : ""))) -# : (sc?$dstname ? fmt("%s:%s", sc$dstname, p) : ""); -# local user = sc?$user ? fmt(" (user %s)", sc?$user) : ""; -# -# local service = sc?$service ? fmt(" [%s]", sc$service) : ""; -# -# print output, fmt("%.6f %s %s %s %s-> %s%s", sc$t, id_string(sc$id), req, -# dest, user, sc$granted, service); -# } -# -#event connection_state_remove(c: connection) -# { -# if ( c$id in conns ) -# print_conn(conns[c$id]); -# } -# diff --git a/testing/btest/Baseline/core.tunnels.socks/conn.log b/testing/btest/Baseline/core.tunnels.socks/conn.log new file mode 100644 index 0000000000..9d5ae8efb1 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.socks/conn.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1208299429.265243 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 tcp http,socks 0.008138 152 3950 SF - 0 ShAaDdfF 9 632 9 4430 (empty) diff --git a/testing/btest/Baseline/core.tunnels.socks/http.log b/testing/btest/Baseline/core.tunnels.socks/http.log new file mode 100644 index 0000000000..2dcab3f254 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.socks/http.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file +1208299429.270361 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 1 GET www.icir.org / - curl/7.16.3 (powerpc-apple-darwin9.0) libcurl/7.16.3 OpenSSL/0.9.7l zlib/1.2.3 0 3677 200 OK - - - (empty) - - - text/html - - diff --git a/testing/btest/Baseline/core.tunnels.socks/output b/testing/btest/Baseline/core.tunnels.socks/output new file mode 100644 index 0000000000..8bf984a58a --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.socks/output @@ -0,0 +1,11 @@ +[id=[orig_h=127.0.0.1, orig_p=62270/tcp, resp_h=127.0.0.1, resp_p=1080/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=177, flow_label=0], resp=[size=8, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0], start_time=1208299429.265243, duration=0.002565, service={ +SOCKS +}, addl=, hot=0, history=ShAaDd, uid=UWkUyAuUGXf, tunnel=[], dpd=, conn=[ts=1208299429.265243, uid=UWkUyAuUGXf, id=[orig_h=127.0.0.1, orig_p=62270/tcp, resp_h=127.0.0.1, resp_p=1080/tcp], proto=tcp, service=, duration=, orig_bytes=, resp_bytes=, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=, orig_ip_bytes=, resp_pkts=, resp_ip_bytes=, parents={ + +}], extract_orig=F, extract_resp=F, dns=, dns_state=, ftp=, http=, http_state=, irc=, smtp=, smtp_state=, ssh=, ssl=, syslog=] +--- +1 +192.150.187.12 + +80/tcp + diff --git a/testing/btest/Baseline/core.tunnels.socks/tunnel.log b/testing/btest/Baseline/core.tunnels.socks/tunnel.log new file mode 100644 index 0000000000..9ccbe8af26 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.socks/tunnel.log @@ -0,0 +1,9 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#types time string addr port addr port enum enum +1208299429.267808 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 Tunnel::DISCOVER Tunnel::SOCKS +1208299429.273401 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 Tunnel::CLOSE Tunnel::SOCKS diff --git a/testing/btest/Traces/tunnels/socks.pcap b/testing/btest/Traces/tunnels/socks.pcap new file mode 100644 index 0000000000000000000000000000000000000000..d70e2cb7dcbeefefc6e3ff9907f53a069e59e9bd GIT binary patch literal 5446 zcmcIo&2!{N6`%DERIJ260!8t0>4*Y$7fYJ)$J(CNXl;+j_7K}+JaV=cs@U{MJu|)5 zNJ@9hlgR;c;6PEu70x+u;X;Z_QpJTL2l59{l??}~_K+L5oH)Spx~2Khc)a0)s$@&8 z?qC10FBS?v zFBERb`!lDw`Q`JM9XaZ~g=f$Hvj%;O_n+LjdGqFt8!u>!I);`)p$GsH^G9sJ?fylkO|VECuk z@p>0<{tB4#&Uk(JY4bTalGporUI%p-S?JJpPpz_9UeeAvVa86Egrb7Hgay`yS-xwo zqFZq^AT0I_9LE7Me9Q(k+%ikIQ9#?%9h)%;TdgCjWNw)o0eTxg1M-Twx}+UO98d>? zfk{2e%!qZDv>neQF|^*X?ks6bnl#TJ=9uR%yZL2hZhH!P`uIBYd=nK5zkIQv?$kUl z{^F7U92_vu;xKQXkzvqEsf3!ZEopmLkP31WvW&{>XeVa!%O+Y`sjRJ5*0#{WvAd+T z2)iV#0-v$hBi0Hm|1P?Rw>_2 z!RjvVR*+s^TPu}_LDqevys@#h1=H#y^BHG2>=Ir`oTTq^)TZ^RL|XgV^ccDes!-|b0418K`X~c z==6N|s8K_@VObAWYnHX=?xi2r%u*S-42PV`nQ&lP^%Gt12@zK;OZtAWDm`zxXKJc7 zD~N!K=?h3vJi%=K|M0_fc=uwVvA|enZVPJYcouxwqaxd+|nP^kT(;0S=C2Yl8e`om)fkV&#?mKk+HeCzGakk$L3-f z5F}tw*4N`)chENN#}N|<_RK9^^)cm#+xL)dsaMAqR0PCvdjvIVtp++FVi2(l8g|hE zi;@@_s0p&Uq-0=QX+qkPE zJoA2;e|My@59>R7^)qQIf!naGcEo%FQV57U9b)b0uE{hvg+d%E&#yb z_D923S78(|=!R82ARPgQvjliyeh&}SN*D-WadC0PTzne3A-Qx8JfgTEa)rO^1REY{VswXP}SkVlBzNv$6bBejW`fhJ)73T z6OxiW(3O@^N^qN}=%{h>8al3@oTHPS*Bb{rZlif3wc*ptktwByz?Yw|mKgi~NG&s- zlUf-&qK!ELP$EJ7YZR0DEVCeboiGlu1(nSb>=`7(DTy8`6pcp${&D5wYjKPqR#!PDzRZJNUA!=aOh0qS77=5ro z7*qXY5`bOdKl%XeuGl z$s}-QVcM}fka-~eHXDFp|k3uM$Q(%DqwuRej_`XySrJVRy=_ zSwm_jIZYOX)rLspwPUw~$h&wmi3D*pO|xT1lNo9(hpEhcB}C~?L&oh;c@GDu7WLx< z8nl*2N$5eIgJetvG)W3b=5p8$p%JsF1KC}sYV{SKkTyG`4<+Id_oB%3qP}%Tx@kFa z{;2inQAmi6p5-Bwa5lA+|_Vq-<2*RNc zJp)z3OwC0(K=5UlNFt`Nn1Qcp3|$e1a_y1?Lunw1J)Y~ z??Tli>-+CRT|RsKp>O5u^6dEY@#BRz%GcHR@82mFPT&YG@3g)z{_wX(9w$8~yir!? z1gP&9?&s@!DFYm%Hh7qZKqq0T%#_pQbeZHeyX` z;SE`2XJyW~7@n^D#yi)vYR%J!$`UQzsYUTnl|`^Nh#D%+nz^#kZbOwijkFK`=trPO z8iH&3Dt05>j*{%S1qlXDAh*Rn;G$zeK z)*%QY&HhdjCR}a+72c>ADxJU%!r?HF4SPlEvj;yM@ChT_PB_VPnqo9tgHIbjt03n| zOdhcy%&QZlj8Y##beWA(cV?o?Bue=Q*F~wPb8!Ct)p=3MKTuKX<@vv|{c{e^KVHb= uO!2w)$E;6(aCKcD&HvR8qOX7W&NY1gH3#Qs-^=q!qpyDm>v}TJKK})-%m&W@ literal 0 HcmV?d00001 diff --git a/testing/btest/core/tunnels/socks.bro b/testing/btest/core/tunnels/socks.bro new file mode 100644 index 0000000000..8ab288c9bd --- /dev/null +++ b/testing/btest/core/tunnels/socks.bro @@ -0,0 +1,19 @@ +# @TEST-EXEC: bro -Cr $TRACES/tunnels/socks.pcap %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log + +event socks_request(c: connection, request_type: count, dstaddr: addr, + dstname: string, p: port, user: string) + { + print c; + print "---"; + print request_type; + print dstaddr; + print dstname; + print p; + print user; + } + + From 41ef1072ecf6317484e5de790cd23a7d9b2fb033 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 15 Jun 2012 16:24:52 -0700 Subject: [PATCH 398/651] Changing an error in the input framework to a warning. --- CHANGES | 5 +++++ VERSION | 2 +- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- aux/btest | 2 +- src/input/Manager.cc | 4 ++-- 8 files changed, 13 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index dce30785ae..a85c4f0847 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,9 @@ +2.0-623 | 2012-06-15 16:24:52 -0700 + + * Changing an error in the input framework to a warning. (Robin + Sommer) + 2.0-622 | 2012-06-15 15:38:43 -0700 * Input framework updates. (Bernhard Amann) diff --git a/VERSION b/VERSION index 51c26b96dc..0fec46f4a4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-622 +2.0-623 diff --git a/aux/binpac b/aux/binpac index 6f43a8115d..b4094cb75e 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 6f43a8115d8e6483a50957c5d21c5d69270ab3aa +Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 diff --git a/aux/bro-aux b/aux/bro-aux index c6391412e9..f938c81ada 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit c6391412e902e896836450ab98910309b2ca2d9b +Subproject commit f938c81ada94641ab5f0231983edc2ba866b9a1f diff --git a/aux/broccoli b/aux/broccoli index 0d139c09d5..4e17842743 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 0d139c09d5a9c8623ecc2a5f395178f0ddcd7e16 +Subproject commit 4e17842743fef8df6abf0588c7ca86c6937a2b6d diff --git a/aux/broctl b/aux/broctl index 880f3e48d3..589cb04c3d 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 880f3e48d33bb28d17184656f858a4a0e2e1574c +Subproject commit 589cb04c3d7e28a81aa07454e2b9b6b092f0e1af diff --git a/aux/btest b/aux/btest index 5856453712..4697bf4c80 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 585645371256e8ec028cabae24c5f4a2108546d2 +Subproject commit 4697bf4c8046a3ab7d5e00e926c5db883cb44664 diff --git a/src/input/Manager.cc b/src/input/Manager.cc index c0b4b04a7c..63fa59d0bc 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -696,8 +696,8 @@ bool Manager::RemoveStream(Stream *i) if ( i->removed ) { - reporter->Error("Stream %s is already queued for removal. Ignoring remove.", i->name.c_str()); - return false; + reporter->Warning("Stream %s is already queued for removal. Ignoring remove.", i->name.c_str()); + return true; } i->removed = true; From 6f3b6a64325e1a49f7bc92e9e3aeccd023cdc8e3 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 15 Jun 2012 16:25:50 -0700 Subject: [PATCH 399/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- aux/btest | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aux/binpac b/aux/binpac index b4094cb75e..6f43a8115d 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 +Subproject commit 6f43a8115d8e6483a50957c5d21c5d69270ab3aa diff --git a/aux/bro-aux b/aux/bro-aux index f938c81ada..c6391412e9 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit f938c81ada94641ab5f0231983edc2ba866b9a1f +Subproject commit c6391412e902e896836450ab98910309b2ca2d9b diff --git a/aux/broccoli b/aux/broccoli index 4e17842743..0d139c09d5 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 4e17842743fef8df6abf0588c7ca86c6937a2b6d +Subproject commit 0d139c09d5a9c8623ecc2a5f395178f0ddcd7e16 diff --git a/aux/broctl b/aux/broctl index 589cb04c3d..880f3e48d3 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 589cb04c3d7e28a81aa07454e2b9b6b092f0e1af +Subproject commit 880f3e48d33bb28d17184656f858a4a0e2e1574c diff --git a/aux/btest b/aux/btest index 4697bf4c80..5856453712 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 4697bf4c8046a3ab7d5e00e926c5db883cb44664 +Subproject commit 585645371256e8ec028cabae24c5f4a2108546d2 From a4df914ab7cab585abb1b456a048a5ae5e0f5e65 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 15 Jun 2012 20:53:09 -0400 Subject: [PATCH 400/651] Reduce the batch size to 1000 and add a maximum time interval for batches. --- .../logging/writers/elasticsearch.bro | 7 ++- src/logging.bif | 1 + src/logging/writers/ElasticSearch.cc | 53 +++++++++++++------ src/logging/writers/ElasticSearch.h | 9 ++-- 4 files changed, 50 insertions(+), 20 deletions(-) diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index 7f968d0042..e2d14a68e3 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -20,6 +20,11 @@ export { ## The batch size is the number of messages that will be queued up before ## they are sent to be bulk indexed. ## Note: this is mainly a memory usage parameter. - const batch_size = 10000 &redef; + const batch_size = 1000 &redef; + + ## The maximum amount of wall-clock time that is allowed to pass without + ## finishing a bulk log send. This represents the maximum delay you + ## would like to have with your logs before they show up in ElasticSearch. + const max_batch_interval = 1min &redef; } diff --git a/src/logging.bif b/src/logging.bif index 308ea78b7a..5434ac3705 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -92,3 +92,4 @@ const server_port: count; const index_name: string; const type_prefix: string; const batch_size: count; +const max_batch_interval: interval; diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index d8c4bee306..ed1c046143 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -32,6 +32,7 @@ ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) buffer.Clear(); counter = 0; + last_send = current_time(); curl_handle = HTTPSetup(); curl_result = new char[1024]; @@ -58,12 +59,21 @@ bool ElasticSearch::DoFinish() { return WriterBackend::DoFinish(); } + +bool ElasticSearch::BatchIndex() + { + HTTPSend(); + buffer.Clear(); + counter = 0; + last_send = current_time(); + return true; + } bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) { switch ( val->type ) { - // ElasticSearch defines bools as: 0 == false, everything else == true. So we treat it as an int. + // ES treats 0 as false and any other value as true so bool types go here. case TYPE_BOOL: case TYPE_INT: buffer.Add(val->val.int_val); @@ -197,11 +207,8 @@ bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, counter++; if ( counter >= BifConst::LogElasticSearch::batch_size ) - { - HTTPSend(); - buffer.Clear(); - counter = 0; - } + BatchIndex(); + return true; } @@ -217,6 +224,18 @@ bool ElasticSearch::DoSetBuf(bool enabled) return true; } +bool ElasticSearch::DoHeartbeat(double network_time, double current_time) + { + if ( last_send > 0 && + current_time-last_send > BifConst::LogElasticSearch::max_batch_interval ) + { + BatchIndex(); + } + + return true; + } + + // HTTP Functions start here. CURL* ElasticSearch::HTTPSetup() @@ -251,7 +270,8 @@ bool ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata) return true; } -bool ElasticSearch::HTTPSend(){ +bool ElasticSearch::HTTPSend() + { CURLcode return_code; curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, curl_result); @@ -259,15 +279,16 @@ bool ElasticSearch::HTTPSend(){ curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE, buffer.Len()); return_code = curl_easy_perform(curl_handle); - switch(return_code) { - case CURLE_COULDNT_CONNECT: - case CURLE_COULDNT_RESOLVE_HOST: - case CURLE_WRITE_ERROR: - return false; - - default: - return true; + switch ( return_code ) + { + case CURLE_COULDNT_CONNECT: + case CURLE_COULDNT_RESOLVE_HOST: + case CURLE_WRITE_ERROR: + return false; + + default: + return true; + } } -} #endif diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h index 21e9bdfe08..a366dd7020 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/ElasticSearch.h @@ -32,10 +32,12 @@ protected: double close, bool terminating); virtual bool DoFlush(); virtual bool DoFinish(); + virtual bool DoHeartbeat(double network_time, double current_time); private: bool AddFieldToBuffer(threading::Value* val, const threading::Field* field); bool AddFieldValueToBuffer(threading::Value* val, const threading::Field* field); + bool BatchIndex(); CURL* HTTPSetup(); bool HTTPReceive(void* ptr, int size, int nmemb, void* userdata); @@ -44,14 +46,15 @@ private: // Buffers, etc. ODesc buffer; uint64 counter; - + double last_send; + CURL* curl_handle; char* curl_result; - + // From scripts char* cluster_name; int cluster_name_len; - + uint64 batch_size; }; From ca5eb5382ab9d67ce340e64b1692ef681c3167da Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 15 Jun 2012 21:06:06 -0400 Subject: [PATCH 401/651] Flush logs to ES daemon as Bro is shutting down. --- src/logging/writers/ElasticSearch.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index ed1c046143..5e1efa504e 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -51,12 +51,12 @@ bool ElasticSearch::DoInit(string path, int num_fields, const Field* const * fie bool ElasticSearch::DoFlush() { - //TODO: Send flush command to ElasticSearch return true; } bool ElasticSearch::DoFinish() { + BatchIndex(); return WriterBackend::DoFinish(); } @@ -120,7 +120,7 @@ bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) { char c = val->val.string_val->data()[i]; // HTML entity encode special characters. - if ( c < 32 || c > 126 || c == '\n' || c == '"' || c == '\'' || c == '\\' ) + if ( c < 32 || c > 126 || c == '\n' || c == '"' || c == '\'' || c == '\\' || c == '&' ) { buffer.AddRaw("&#", 2); buffer.Add((uint8_t) c); From 8334dceadb748a93effda4828db2439554fb532f Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 15 Jun 2012 22:19:51 -0400 Subject: [PATCH 402/651] Changed the escaping method. --- src/logging/writers/ElasticSearch.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 5e1efa504e..a2019df9fe 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -122,9 +122,13 @@ bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) // HTML entity encode special characters. if ( c < 32 || c > 126 || c == '\n' || c == '"' || c == '\'' || c == '\\' || c == '&' ) { - buffer.AddRaw("&#", 2); - buffer.Add((uint8_t) c); - buffer.AddRaw(";", 1); + static const char hex_chars[] = "0123456789abcdef"; + buffer.AddRaw("\\u00", 4); + buffer.AddRaw(&hex_chars[(c & 0xf0) >> 4], 1); + buffer.AddRaw(&hex_chars[c & 0x0f], 1); + //buffer.AddRaw("&#//", 2); + //buffer.Add((uint8_t) c); + //buffer.AddRaw(";", 1); } else buffer.AddRaw(&c, 1); From b1561437e9d3bd8dfcf3fded3ff7ceca274d70e4 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sat, 16 Jun 2012 00:35:40 -0400 Subject: [PATCH 403/651] Forgot to call the parent method for DoHeartBeat. --- src/logging/writers/ElasticSearch.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index a2019df9fe..46282404a6 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -236,7 +236,7 @@ bool ElasticSearch::DoHeartbeat(double network_time, double current_time) BatchIndex(); } - return true; + return WriterBackend::DoHeartbeat(network_time, current_time); } From cd8169dda3150918a29eca21ca1fd7e7dcfc6ed2 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sat, 16 Jun 2012 22:22:40 -0400 Subject: [PATCH 404/651] Bug fix and feature. - Fixed bug with how data is sent to elasticsearch. - Added a feature to only allow data of a certain size to be buffered before sending to the elasticsearch server. Configured with the LogElasticSearch::max_byte_size variable. --- .../logging/writers/elasticsearch.bro | 6 +++- src/logging.bif | 3 +- src/logging/writers/ElasticSearch.cc | 29 +++++++++---------- 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index e2d14a68e3..b262201c85 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -20,11 +20,15 @@ export { ## The batch size is the number of messages that will be queued up before ## they are sent to be bulk indexed. ## Note: this is mainly a memory usage parameter. - const batch_size = 1000 &redef; + const max_batch_size = 1000 &redef; ## The maximum amount of wall-clock time that is allowed to pass without ## finishing a bulk log send. This represents the maximum delay you ## would like to have with your logs before they show up in ElasticSearch. const max_batch_interval = 1min &redef; + + ## The maximum byte size for a buffered JSON string to send to the bulk + ## insert API. + const max_byte_size = 1024 * 1024 &redef; } diff --git a/src/logging.bif b/src/logging.bif index 5434ac3705..cbae66efdb 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -91,5 +91,6 @@ const server_host: string; const server_port: count; const index_name: string; const type_prefix: string; -const batch_size: count; +const max_batch_size: count; const max_batch_interval: interval; +const max_byte_size: count; diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 46282404a6..fd028e9b68 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -186,31 +186,27 @@ bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, { // Our action line looks like: // {"index":{"_index":"$index_name","_type":"$type_prefix$path"}}\n - if ( counter == 0 ) - { - buffer.AddRaw("{\"index\":{\"_index\":\"", 20); - buffer.AddN((const char*) BifConst::LogElasticSearch::index_name->Bytes(), - BifConst::LogElasticSearch::index_name->Len()); - buffer.AddRaw("\",\"_type\":\"", 11); - buffer.AddN((const char*) BifConst::LogElasticSearch::type_prefix->Bytes(), - BifConst::LogElasticSearch::type_prefix->Len()); - buffer.Add(Path()); - buffer.AddRaw("\"}\n", 3); - } + buffer.AddRaw("{\"index\":{\"_index\":\"", 20); + buffer.AddN((const char*) BifConst::LogElasticSearch::index_name->Bytes(), + BifConst::LogElasticSearch::index_name->Len()); + buffer.AddRaw("\",\"_type\":\"", 11); + buffer.AddN((const char*) BifConst::LogElasticSearch::type_prefix->Bytes(), + BifConst::LogElasticSearch::type_prefix->Len()); + buffer.Add(Path()); + buffer.AddRaw("\"}\n", 3); + buffer.AddRaw("{", 1); for ( int i = 0; i < num_fields; i++ ) { - if ( i == 0 ) - buffer.AddRaw("{", 1); - else if ( buffer.Bytes()[buffer.Len()] != ',' && vals[i]->present ) + if ( i > 0 && buffer.Bytes()[buffer.Len()] != ',' && vals[i]->present ) buffer.AddRaw(",", 1); AddFieldToBuffer(vals[i], fields[i]); } - buffer.AddRaw("}\n", 2); counter++; - if ( counter >= BifConst::LogElasticSearch::batch_size ) + if ( counter >= BifConst::LogElasticSearch::max_batch_size || + uint(buffer.Len()) >= BifConst::LogElasticSearch::max_byte_size ) BatchIndex(); return true; @@ -219,6 +215,7 @@ bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, bool ElasticSearch::DoRotate(string rotated_path, double open, double close, bool terminating) { //TODO: Determine what, if anything, needs to be done here. + return true; } From 57980c86e62dc4dbbd95efe8cc761f84ac1c2d85 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sun, 17 Jun 2012 21:41:29 -0400 Subject: [PATCH 405/651] Bug fixes. - The curl handle is now cleaned up correctly. - Interval values are now treated as doubles. Treating them as uint64_t was wrong because intervals can be negative. There is also no obvious benefit in elasticsearch to converting the value to milliseconds. --- src/logging/writers/ElasticSearch.cc | 7 +++---- src/logging/writers/ElasticSearch.h | 1 - 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index fd028e9b68..402a2f21ad 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -35,8 +35,7 @@ ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) last_send = current_time(); curl_handle = HTTPSetup(); - curl_result = new char[1024]; - } +} ElasticSearch::~ElasticSearch() { @@ -57,6 +56,7 @@ bool ElasticSearch::DoFlush() bool ElasticSearch::DoFinish() { BatchIndex(); + curl_easy_cleanup(curl_handle); return WriterBackend::DoFinish(); } @@ -101,10 +101,10 @@ bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) break; case TYPE_DOUBLE: + case TYPE_INTERVAL: buffer.Add(val->val.double_val); break; - case TYPE_INTERVAL: case TYPE_TIME: // ElasticSearch uses milliseconds for timestamps buffer.Add((uint64_t) (val->val.double_val * 1000)); @@ -275,7 +275,6 @@ bool ElasticSearch::HTTPSend() { CURLcode return_code; - curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, curl_result); curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE, buffer.Len()); diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h index a366dd7020..bd1351214b 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/ElasticSearch.h @@ -49,7 +49,6 @@ private: double last_send; CURL* curl_handle; - char* curl_result; // From scripts char* cluster_name; From cb7eac212e33ec60e21886a793e73b346aba0ba1 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Sun, 17 Jun 2012 22:55:11 -0400 Subject: [PATCH 406/651] Small updates and a little standardization for config.h.in naming. --- CMakeLists.txt | 11 ++++++----- config.h.in | 7 +++++-- configure | 1 - doc/logging-elasticsearch.rst | 9 ++++----- src/logging/Manager.cc | 4 ++-- src/logging/writers/ElasticSearch.cc | 6 +++--- src/main.cc | 12 ++++++++++++ 7 files changed, 32 insertions(+), 18 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4b1cccf8dc..14cf66ac19 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -122,13 +122,13 @@ if (LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) list(APPEND OPTLIBS ${LibXML2_LIBRARIES}) endif() -set(USE_LIBCURL false) +set(USE_CURL false) find_package(CURL) if (CURL_FOUND) - set(USE_LIBCURL true) + set(USE_CURL true) include_directories(BEFORE ${CURL_INCLUDE_DIR}) list(APPEND OPTLIBS ${CURL_LIBRARIES}) - set(INSTALL_ELASTICSEARCH true) + set(USE_ELASTICSEARCH true) endif() if (ENABLE_PERFTOOLS_DEBUG) @@ -218,13 +218,14 @@ message( "\nBroccoli: ${INSTALL_BROCCOLI}" "\nBroctl: ${INSTALL_BROCTL}" "\nAux. Tools: ${INSTALL_AUX_TOOLS}" - "\nElasticSearch: ${INSTALL_ELASTICSEARCH}" "\n" "\nGeoIP: ${USE_GEOIP}" "\nGoogle perftools: ${USE_PERFTOOLS}" "\n debugging: ${USE_PERFTOOLS_DEBUG}" + "\ncURL: ${USE_CURL}" + "\n" "\nDataSeries: ${USE_DATASERIES}" - "\nlibCURL: ${USE_LIBCURL}" + "\nElasticSearch: ${USE_ELASTICSEARCH}" "\n" "\n================================================================\n" ) diff --git a/config.h.in b/config.h.in index 66121cefbf..aa286736fd 100644 --- a/config.h.in +++ b/config.h.in @@ -114,11 +114,14 @@ /* Analyze Mobile IPv6 traffic */ #cmakedefine ENABLE_MOBILE_IPV6 +/* Use libCurl. */ +#cmakedefine USE_CURL + /* Use the DataSeries writer. */ #cmakedefine USE_DATASERIES -/* Build the ElasticSearch writer. */ -#cmakedefine INSTALL_ELASTICSEARCH +/* Use the ElasticSearch writer. */ +#cmakedefine USE_ELASTICSEARCH /* Version number of package */ #define VERSION "@VERSION@" diff --git a/configure b/configure index 801fb1e801..3258d4abfc 100755 --- a/configure +++ b/configure @@ -98,7 +98,6 @@ append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false -append_cache_entry INSTALL_ELASTICSEARCH BOOL false append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index 4fce470d4a..26b49f3a0b 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -26,16 +26,15 @@ ElasticSearch with:: Compiling Bro with ElasticSearch Support ---------------------------------------- -First, ensure that you have libcurl installed. Secondly, set the -``--enable-elasticsearch`` option:: +First, ensure that you have libcurl installed the run configure.:: - # ./configure --enable-elasticsearch + # ./configure [...] ====================| Bro Build Summary |===================== [...] - ElasticSearch: true + cURL: true [...] - libCURL: true + ElasticSearch: true [...] ================================================================ diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 5c1203fd91..5562b3b867 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -17,7 +17,7 @@ #include "writers/Ascii.h" #include "writers/None.h" -#ifdef INSTALL_ELASTICSEARCH +#ifdef USE_ELASTICSEARCH #include "writers/ElasticSearch.h" #endif @@ -40,7 +40,7 @@ WriterDefinition log_writers[] = { { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, -#ifdef INSTALL_ELASTICSEARCH +#ifdef USE_ELASTICSEARCH { BifEnum::Log::WRITER_ELASTICSEARCH, "ElasticSearch", 0, writer::ElasticSearch::Instantiate }, #endif diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 402a2f21ad..494c48f286 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -2,7 +2,7 @@ #include "config.h" -#ifdef INSTALL_ELASTICSEARCH +#ifdef USE_ELASTICSEARCH #include #include @@ -261,7 +261,7 @@ CURL* ElasticSearch::HTTPSetup() // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. The best (only?) way to disable that is to // just use HTTP 1.0 - curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); + //curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); return handle; } @@ -275,8 +275,8 @@ bool ElasticSearch::HTTPSend() { CURLcode return_code; + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, buffer.Len()); curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE, buffer.Len()); return_code = curl_easy_perform(curl_handle); switch ( return_code ) diff --git a/src/main.cc b/src/main.cc index b1d0a4d723..04aa83b832 100644 --- a/src/main.cc +++ b/src/main.cc @@ -12,6 +12,10 @@ #include #endif +#ifdef USE_CURL +#include +#endif + #ifdef USE_IDMEF extern "C" { #include @@ -716,6 +720,10 @@ int main(int argc, char** argv) SSL_library_init(); SSL_load_error_strings(); +#ifdef USE_CURL + curl_global_init(CURL_GLOBAL_ALL); +#endif + // FIXME: On systems that don't provide /dev/urandom, OpenSSL doesn't // seed the PRNG. We should do this here (but at least Linux, FreeBSD // and Solaris provide /dev/urandom). @@ -1066,6 +1074,10 @@ int main(int argc, char** argv) done_with_network(); net_delete(); +#ifdef USE_CURL + curl_global_cleanup(); +#endif + terminate_bro(); // Close files after net_delete(), because net_delete() From 52ceee8c869ac04d0bbf74ec8296ef983e182742 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Mon, 18 Jun 2012 01:31:52 -0400 Subject: [PATCH 407/651] Fixed a bug with messed up time value passing to elasticsearch. --- src/logging/writers/ElasticSearch.cc | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 494c48f286..75a4e0514f 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -106,9 +106,19 @@ bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) break; case TYPE_TIME: - // ElasticSearch uses milliseconds for timestamps - buffer.Add((uint64_t) (val->val.double_val * 1000)); + { + // ElasticSearch uses milliseconds for timestamps and json only + // supports signed ints (uints can be too large). + uint64_t ts = (uint64_t) (val->val.double_val * 1000); + if ( ts >= INT64_MAX ) + { + Error(Fmt("time value too large: %" PRIu64, ts)); + buffer.AddRaw("null", 4); + } + else + buffer.Add(ts); break; + } case TYPE_ENUM: case TYPE_STRING: @@ -261,7 +271,7 @@ CURL* ElasticSearch::HTTPSetup() // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. The best (only?) way to disable that is to // just use HTTP 1.0 - //curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); + curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); return handle; } From 95f4390cb632f2c445ed45a6670b6ad8266e49c5 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Mon, 18 Jun 2012 02:03:43 -0400 Subject: [PATCH 408/651] Adding an extra header. --- src/logging/writers/ElasticSearch.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 75a4e0514f..c137505811 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -6,6 +6,7 @@ #include #include +#include #include "util.h" #include "BroString.h" From 146cb47d6ae76c1478569bca3dfb60cc44fcb700 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 18 Jun 2012 11:11:07 -0500 Subject: [PATCH 409/651] Fix performance problem checking packet encapsulation. (addresses #830) Connections were creating a new encapsulation object for nearly every packet even if no tunnels were ever involved with the Connection. --- src/Conn.cc | 12 +++++++++--- testing/btest/Baseline/core.tunnels.socks/output | 4 +--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/Conn.cc b/src/Conn.cc index 18d52f8a12..f3ebaca0c9 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -205,20 +205,26 @@ void Connection::CheckEncapsulation(const Encapsulation* arg_encap) if ( encapsulation && arg_encap ) { if ( *encapsulation != *arg_encap ) + { Event(tunnel_changed, 0, arg_encap->GetVectorVal()); + delete encapsulation; + encapsulation = new Encapsulation(arg_encap); + } } else if ( encapsulation ) { Encapsulation empty; Event(tunnel_changed, 0, empty.GetVectorVal()); + delete encapsulation; + encapsulation = new Encapsulation(arg_encap); } else if ( arg_encap ) + { Event(tunnel_changed, 0, arg_encap->GetVectorVal()); - - delete encapsulation; - encapsulation = new Encapsulation(arg_encap); + encapsulation = new Encapsulation(arg_encap); + } } void Connection::Done() diff --git a/testing/btest/Baseline/core.tunnels.socks/output b/testing/btest/Baseline/core.tunnels.socks/output index 8bf984a58a..ee5c5b5c20 100644 --- a/testing/btest/Baseline/core.tunnels.socks/output +++ b/testing/btest/Baseline/core.tunnels.socks/output @@ -1,8 +1,6 @@ [id=[orig_h=127.0.0.1, orig_p=62270/tcp, resp_h=127.0.0.1, resp_p=1080/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=177, flow_label=0], resp=[size=8, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0], start_time=1208299429.265243, duration=0.002565, service={ SOCKS -}, addl=, hot=0, history=ShAaDd, uid=UWkUyAuUGXf, tunnel=[], dpd=, conn=[ts=1208299429.265243, uid=UWkUyAuUGXf, id=[orig_h=127.0.0.1, orig_p=62270/tcp, resp_h=127.0.0.1, resp_p=1080/tcp], proto=tcp, service=, duration=, orig_bytes=, resp_bytes=, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=, orig_ip_bytes=, resp_pkts=, resp_ip_bytes=, parents={ - -}], extract_orig=F, extract_resp=F, dns=, dns_state=, ftp=, http=, http_state=, irc=, smtp=, smtp_state=, ssh=, ssl=, syslog=] +}, addl=, hot=0, history=ShAaDd, uid=UWkUyAuUGXf, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dns=, dns_state=, ftp=, http=, http_state=, irc=, smtp=, smtp_state=, ssh=, ssl=, syslog=] --- 1 192.150.187.12 From f3b3e73eba1dba81aa35f477d6a20ebdd54cf7a8 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 18 Jun 2012 12:29:49 -0500 Subject: [PATCH 410/651] Script-layer tunnel interface cleanup. - Clarify "tunnel_changed" event documentation. - Make expiration of "Tunnel::active" elements configuration via "Tunnel::expiration_interval". - Remove redundant registration of a connection's tunnels in tunnel/main.bro's "tunnel_changed" handler. - Rename "parents" field of "Conn::Info" to "tunnel_parents" to give more context. --- scripts/base/frameworks/tunnels/main.bro | 15 +++++++++------ scripts/base/protocols/conn/main.bro | 6 +++--- src/event.bif | 8 +++++--- .../Baseline/core.print-bpf-filters/conn.log | 2 +- .../btest/Baseline/core.print-bpf-filters/output | 8 ++++---- .../btest/Baseline/core.tunnels.ayiya/conn.log | 2 +- .../btest/Baseline/core.tunnels.socks/conn.log | 2 +- .../btest/Baseline/core.tunnels.teredo/conn.log | 2 +- testing/btest/Baseline/core.vlan-mpls/conn.log | 2 +- .../scripts.base.protocols.ftp.ftp-ipv4/conn.log | 2 +- .../scripts.base.protocols.ftp.ftp-ipv6/conn.log | 2 +- 11 files changed, 28 insertions(+), 23 deletions(-) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 1f0258e0a3..2f5625f8b2 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -17,7 +17,8 @@ export { DISCOVER, ## A tunnel connection has closed. CLOSE, - ## No new connections over a tunnel happened in the past day. + ## No new connections over a tunnel happened in the amount of + ## time indicated by :bro:see:`Tunnel::expiration_interval`. EXPIRE, }; @@ -68,9 +69,14 @@ export { ## action: The specific reason for the tunnel ending. global close: function(tunnel: Info, action: Action); + ## The amount of time a tunnel is not used in establishment of new + ## connections before it is considered inactive/expired. + const expiration_interval = 24hrs &redef; + ## Currently active tunnels. That is, tunnels for which new, encapsulated - ## connections have been seen in the last day. - global active: table[conn_id] of Info = table() &synchronized &read_expire=24hrs &expire_func=expire; + ## connections have been seen in the interval indicated by + ## :bro:see:`Tunnel::expiration_interval`. + global active: table[conn_id] of Info = table() &synchronized &read_expire=expiration_interval &expire_func=expire; } const ayiya_ports = { 5072/udp }; @@ -129,9 +135,6 @@ event new_connection(c: connection) &priority=5 event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 { - if ( c?$tunnel ) - register_all(c$tunnel); - register_all(e); } diff --git a/scripts/base/protocols/conn/main.bro b/scripts/base/protocols/conn/main.bro index 432bb12e84..5796c3c6b1 100644 --- a/scripts/base/protocols/conn/main.bro +++ b/scripts/base/protocols/conn/main.bro @@ -104,7 +104,7 @@ export { ## If this connection was over a tunnel, indicate the ## *uid* values for any encapsulating parent connections ## used over the lifetime of this inner connection. - parents: set[string] &log; + tunnel_parents: set[string] &log; }; ## Event that can be handled to access the :bro:type:`Conn::Info` @@ -195,7 +195,7 @@ function set_conn(c: connection, eoc: bool) c$conn$uid=c$uid; c$conn$id=c$id; if ( c?$tunnel && |c$tunnel| > 0 ) - add c$conn$parents[c$tunnel[|c$tunnel|-1]$uid]; + add c$conn$tunnel_parents[c$tunnel[|c$tunnel|-1]$uid]; c$conn$proto=get_port_transport_proto(c$id$resp_p); if( |Site::local_nets| > 0 ) c$conn$local_orig=Site::is_local_addr(c$id$orig_h); @@ -238,7 +238,7 @@ event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 { set_conn(c, F); if ( |e| > 0 ) - add c$conn$parents[e[|e|-1]$uid]; + add c$conn$tunnel_parents[e[|e|-1]$uid]; c$tunnel = e; } diff --git a/src/event.bif b/src/event.bif index 0531bb8a18..72c033cd75 100644 --- a/src/event.bif +++ b/src/event.bif @@ -143,9 +143,11 @@ event new_connection%(c: connection%); ## Generated for a connection whose tunneling has changed. This could ## be from a previously seen connection now being encapsulated in a tunnel, -## or from the outer encapsulation changing. Note that the connection's -## *tunnel* field is NOT automatically assigned to the new encapsulation value -## internally after this event is raised. +## or from the outer encapsulation changing. Note that connection *c*'s +## *tunnel* field is NOT automatically/internally assigned to the new +## encapsulation value of *e* after this event is raised. If the desired +## behavior is to track the latest tunnel encapsulation per-connection, +## then a handler of this event should assign *e* to ``c$tunnel``. ## ## c: The connection whose tunnel/encapsulation changed. ## diff --git a/testing/btest/Baseline/core.print-bpf-filters/conn.log b/testing/btest/Baseline/core.print-bpf-filters/conn.log index ca81844a4a..b563c4a3ed 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/conn.log +++ b/testing/btest/Baseline/core.print-bpf-filters/conn.log @@ -3,6 +3,6 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1128727435.450898 UWkUyAuUGXf 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) diff --git a/testing/btest/Baseline/core.print-bpf-filters/output b/testing/btest/Baseline/core.print-bpf-filters/output index b4a52965cb..0560b34769 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output +++ b/testing/btest/Baseline/core.print-bpf-filters/output @@ -5,7 +5,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1335456050.312960 - ip or not ip T T +1340040469.440535 - ip or not ip T T #separator \x09 #set_separator , #empty_field (empty) @@ -13,7 +13,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1335456050.557822 - ((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T +1340040469.681428 - ((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T #separator \x09 #set_separator , #empty_field (empty) @@ -21,7 +21,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1335456050.805695 - port 42 T T +1340040469.925663 - port 42 T T #separator \x09 #set_separator , #empty_field (empty) @@ -29,4 +29,4 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1335456051.042953 - port 56730 T T +1340040470.169001 - port 56730 T T diff --git a/testing/btest/Baseline/core.tunnels.ayiya/conn.log b/testing/btest/Baseline/core.tunnels.ayiya/conn.log index 5c23b4c404..db54a8a475 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/conn.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1257655301.595604 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 tcp http 2.101052 2981 4665 S1 - 0 ShADad 10 3605 11 5329 k6kgXLOoSKl 1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 udp ayiya 20.879001 5129 6109 SF - 0 Dd 21 5717 13 6473 (empty) diff --git a/testing/btest/Baseline/core.tunnels.socks/conn.log b/testing/btest/Baseline/core.tunnels.socks/conn.log index 9d5ae8efb1..f8a684d4c6 100644 --- a/testing/btest/Baseline/core.tunnels.socks/conn.log +++ b/testing/btest/Baseline/core.tunnels.socks/conn.log @@ -3,6 +3,6 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1208299429.265243 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 tcp http,socks 0.008138 152 3950 SF - 0 ShAaDdfF 9 632 9 4430 (empty) diff --git a/testing/btest/Baseline/core.tunnels.teredo/conn.log b/testing/btest/Baseline/core.tunnels.teredo/conn.log index 151230886b..cefc8f3e84 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/conn.log +++ b/testing/btest/Baseline/core.tunnels.teredo/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1210953047.736921 arKYeMETxOg 192.168.2.16 1576 75.126.130.163 80 tcp - 0.000357 0 0 SHR - 0 fA 1 40 1 40 (empty) 1210953050.867067 k6kgXLOoSKl 192.168.2.16 1577 75.126.203.78 80 tcp - 0.000387 0 0 SHR - 0 fA 1 40 1 40 (empty) diff --git a/testing/btest/Baseline/core.vlan-mpls/conn.log b/testing/btest/Baseline/core.vlan-mpls/conn.log index 20903d1db8..e165df621a 100644 --- a/testing/btest/Baseline/core.vlan-mpls/conn.log +++ b/testing/btest/Baseline/core.vlan-mpls/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 952109346.874907 UWkUyAuUGXf 10.1.2.1 11001 10.34.0.1 23 tcp - 2.102560 26 0 SH - 0 SADF 11 470 0 0 (empty) 1128727435.450898 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log index 5704153b07..4a20ec39b4 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1329843175.736107 arKYeMETxOg 141.142.220.235 37604 199.233.217.249 56666 tcp ftp-data 0.112432 0 342 SF - 0 ShAdfFa 4 216 4 562 (empty) 1329843179.871641 k6kgXLOoSKl 141.142.220.235 59378 199.233.217.249 56667 tcp ftp-data 0.111218 0 77 SF - 0 ShAdfFa 4 216 4 297 (empty) diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log index e3d458bae7..9d19ffaf85 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1329327783.316897 arKYeMETxOg 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49186 2001:470:4867:99::21 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 (empty) 1329327786.524332 k6kgXLOoSKl 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49187 2001:470:4867:99::21 57087 tcp ftp-data 0.217501 0 43 SF - 0 ShAdfFa 5 372 4 343 (empty) From 2ba3f5420b13ddc0f2cb173a2ccb41a716972f98 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 18 Jun 2012 12:48:03 -0500 Subject: [PATCH 411/651] Add "encap_hdr_size" option back in. The "tunnel_port" and "parse_udp_tunnels" options are still gone as those did not work entirely (e.g. IPv6 support and misnaming of tunnel_port/udp_tunnel_port). --- scripts/base/init-bare.bro | 4 ++++ src/NetVar.cc | 4 ++++ src/NetVar.h | 2 ++ src/Sessions.cc | 4 ++++ 4 files changed, 14 insertions(+) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 879a4f5995..503bf2547c 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -915,6 +915,10 @@ const frag_timeout = 0.0 sec &redef; ## to be potentially copied and buffered. const packet_sort_window = 0 usecs &redef; +## If positive, indicates the encapsulation header size that should +## be skipped. This applies to all packets. +const encap_hdr_size = 0 &redef; + ## Whether to use the ``ConnSize`` analyzer to count the number of packets and ## IP-level bytes transfered by each endpoint. If true, these values are returned ## in the connection's :bro:see:`endpoint` record value. diff --git a/src/NetVar.cc b/src/NetVar.cc index 70aa60c886..b057efad11 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -30,6 +30,8 @@ int partial_connection_ok; int tcp_SYN_ack_ok; int tcp_match_undelivered; +int encap_hdr_size; + double frag_timeout; double tcp_SYN_timeout; @@ -323,6 +325,8 @@ void init_net_var() tcp_SYN_ack_ok = opt_internal_int("tcp_SYN_ack_ok"); tcp_match_undelivered = opt_internal_int("tcp_match_undelivered"); + encap_hdr_size = opt_internal_int("encap_hdr_size"); + frag_timeout = opt_internal_double("frag_timeout"); tcp_SYN_timeout = opt_internal_double("tcp_SYN_timeout"); diff --git a/src/NetVar.h b/src/NetVar.h index 7aff9b84e6..e6f6e0cfc4 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -33,6 +33,8 @@ extern int partial_connection_ok; extern int tcp_SYN_ack_ok; extern int tcp_match_undelivered; +extern int encap_hdr_size; + extern double frag_timeout; extern double tcp_SYN_timeout; diff --git a/src/Sessions.cc b/src/Sessions.cc index c754a14698..b5d82f147f 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -158,6 +158,10 @@ void NetSessions::DispatchPacket(double t, const struct pcap_pkthdr* hdr, ip_data = pkt + hdr_size + (ip_hdr->ip_hl << 2); } + if ( encap_hdr_size > 0 && ip_data ) + // Blanket encapsulation + hdr_size += encap_hdr_size; + if ( src_ps->FilterType() == TYPE_FILTER_NORMAL ) NextPacket(t, hdr, pkt, hdr_size, pkt_elem); else From 7fc96a8c0f012d22f6cba7a38869108c7f9fef4d Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Mon, 18 Jun 2012 15:49:00 -0400 Subject: [PATCH 412/651] Adding a define to make the stdint C macros available. --- src/logging/writers/ElasticSearch.cc | 1 - src/util.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index c137505811..75a4e0514f 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -6,7 +6,6 @@ #include #include -#include #include "util.h" #include "BroString.h" diff --git a/src/util.h b/src/util.h index 6b237edfd8..559a155626 100644 --- a/src/util.h +++ b/src/util.h @@ -13,6 +13,7 @@ // Expose C99 functionality from inttypes.h, which would otherwise not be // available in C++. #define __STDC_FORMAT_MACROS +#define __STDC_LIMIT_MACROS #include #if __STDC__ From e04d6297330f4872c97588131f31abacce780acb Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 18 Jun 2012 15:44:34 -0500 Subject: [PATCH 413/651] Add state management of NetSessions's IP tunnel map. Entries are checked for inactivity at an interval controlled by "Tunnel::ip_tunnel_timeout" and discarded if needed. --- scripts/base/init-bare.bro | 3 +++ src/Sessions.cc | 28 +++++++++++++++++++++++++--- src/Sessions.h | 20 +++++++++++++++++++- src/Timer.cc | 1 + src/Timer.h | 1 + src/const.bif | 1 + 6 files changed, 50 insertions(+), 4 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 503bf2547c..3a57a65b20 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2717,6 +2717,9 @@ export { ## reduce false positives of UDP traffic (e.g. DNS) that also happens ## to have a valid Teredo encapsulation. const yielding_teredo_decapsulation = T &redef; + + ## How often to cleanup internal state for inactive IP tunnels. + const ip_tunnel_timeout = 24hrs &redef; } # end export module GLOBAL; diff --git a/src/Sessions.cc b/src/Sessions.cc index b5d82f147f..84c881b0ef 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -68,6 +68,24 @@ void TimerMgrExpireTimer::Dispatch(double t, int is_expire) } } +void IPTunnelTimer::Dispatch(double t, int is_expire) + { + NetSessions::IPTunnelMap::const_iterator it = + sessions->ip_tunnels.find(tunnel_idx); + + if ( it == sessions->ip_tunnels.end() ) return; + + double last_active = it->second.second; + double inactive_time = t > last_active ? t - last_active : 0; + + if ( inactive_time >= BifConst::Tunnel::ip_tunnel_timeout ) + // tunnel activity timed out, delete it from map + sessions->ip_tunnels.erase(tunnel_idx); + else if ( ! is_expire ) + // tunnel activity didn't timeout, schedule another timer + timer_mgr->Add(new IPTunnelTimer(t, tunnel_idx)); + } + NetSessions::NetSessions() { TypeList* t = new TypeList(); @@ -569,16 +587,20 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, else tunnel_idx = IPPair(ip_hdr->DstAddr(), ip_hdr->SrcAddr()); - IPTunnelMap::const_iterator it = ip_tunnels.find(tunnel_idx); + IPTunnelMap::iterator it = ip_tunnels.find(tunnel_idx); if ( it == ip_tunnels.end() ) { EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr()); - ip_tunnels[tunnel_idx] = ec; + ip_tunnels[tunnel_idx] = TunnelActivity(ec, network_time); + timer_mgr->Add(new IPTunnelTimer(network_time, tunnel_idx)); outer->Add(ec); } else - outer->Add(it->second); + { + it->second.second = network_time; + outer->Add(it->second.first); + } DoNextInnerPacket(t, hdr, inner, outer); diff --git a/src/Sessions.h b/src/Sessions.h index ed7f56c878..548c0903be 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -181,6 +181,7 @@ protected: friend class RemoteSerializer; friend class ConnCompressor; friend class TimerMgrExpireTimer; + friend class IPTunnelTimer; Connection* NewConn(HashKey* k, double t, const ConnID* id, const u_char* data, int proto, uint32 flow_lable, @@ -240,8 +241,10 @@ protected: PDict(Connection) udp_conns; PDict(Connection) icmp_conns; PDict(FragReassembler) fragments; + typedef pair IPPair; - typedef std::map IPTunnelMap; + typedef pair TunnelActivity; + typedef std::map IPTunnelMap; IPTunnelMap ip_tunnels; ARP_Analyzer* arp_analyzer; @@ -261,6 +264,21 @@ protected: TimerMgrMap timer_mgrs; }; + +class IPTunnelTimer : public Timer { +public: + IPTunnelTimer(double t, NetSessions::IPPair p) + : Timer(t + BifConst::Tunnel::ip_tunnel_timeout, + TIMER_IP_TUNNEL_INACTIVITY), tunnel_idx(p) {} + + ~IPTunnelTimer() {} + + void Dispatch(double t, int is_expire); + +protected: + NetSessions::IPPair tunnel_idx; +}; + // Manager for the currently active sessions. extern NetSessions* sessions; diff --git a/src/Timer.cc b/src/Timer.cc index 2e2fb09c6b..c2a8bb3421 100644 --- a/src/Timer.cc +++ b/src/Timer.cc @@ -20,6 +20,7 @@ const char* TimerNames[] = { "IncrementalSendTimer", "IncrementalWriteTimer", "InterconnTimer", + "IPTunnelInactivityTimer", "NetbiosExpireTimer", "NetworkTimer", "NTPExpireTimer", diff --git a/src/Timer.h b/src/Timer.h index bb6b8d56ae..310e72bdc9 100644 --- a/src/Timer.h +++ b/src/Timer.h @@ -26,6 +26,7 @@ enum TimerType { TIMER_INCREMENTAL_SEND, TIMER_INCREMENTAL_WRITE, TIMER_INTERCONN, + TIMER_IP_TUNNEL_INACTIVITY, TIMER_NB_EXPIRE, TIMER_NETWORK, TIMER_NTP_EXPIRE, diff --git a/src/const.bif b/src/const.bif index 368ee34396..499dc63314 100644 --- a/src/const.bif +++ b/src/const.bif @@ -16,5 +16,6 @@ const Tunnel::enable_ip: bool; const Tunnel::enable_ayiya: bool; const Tunnel::enable_teredo: bool; const Tunnel::yielding_teredo_decapsulation: bool; +const Tunnel::ip_tunnel_timeout: interval; const Threading::heartbeat_interval: interval; From ce58a3e90864a00027ec5ecabdf71e4f0eaa93f4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 18 Jun 2012 16:56:19 -0500 Subject: [PATCH 414/651] Refactor of interal tunnel analysis code. - Pulled more common code into NetSessions::DoNextInnerPacket() and made the pcap header it makes internally use network_time - Remove Encapsulation class ctor from pointer - Rename Encapsulation class to EncapsulationStack --- src/Conn.cc | 14 +++++++------- src/Conn.h | 8 ++++---- src/Sessions.cc | 37 +++++++++++++++++++++---------------- src/Sessions.h | 21 +++++++++++++-------- src/Teredo.cc | 9 ++------- src/TunnelEncapsulation.cc | 2 +- src/TunnelEncapsulation.h | 26 ++++++++++---------------- src/ayiya-analyzer.pac | 8 ++------ 8 files changed, 60 insertions(+), 65 deletions(-) diff --git a/src/Conn.cc b/src/Conn.cc index f3ebaca0c9..bc2e7fb5cf 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -113,7 +113,7 @@ unsigned int Connection::external_connections = 0; IMPLEMENT_SERIAL(Connection, SER_CONNECTION); Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, - uint32 flow, const Encapsulation* arg_encap) + uint32 flow, const EncapsulationStack* arg_encap) { sessions = s; key = k; @@ -162,7 +162,7 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, uid = 0; // Will set later. if ( arg_encap ) - encapsulation = new Encapsulation(arg_encap); + encapsulation = new EncapsulationStack(*arg_encap); else encapsulation = 0; @@ -200,7 +200,7 @@ Connection::~Connection() --external_connections; } -void Connection::CheckEncapsulation(const Encapsulation* arg_encap) +void Connection::CheckEncapsulation(const EncapsulationStack* arg_encap) { if ( encapsulation && arg_encap ) { @@ -208,22 +208,22 @@ void Connection::CheckEncapsulation(const Encapsulation* arg_encap) { Event(tunnel_changed, 0, arg_encap->GetVectorVal()); delete encapsulation; - encapsulation = new Encapsulation(arg_encap); + encapsulation = new EncapsulationStack(*arg_encap); } } else if ( encapsulation ) { - Encapsulation empty; + EncapsulationStack empty; Event(tunnel_changed, 0, empty.GetVectorVal()); delete encapsulation; - encapsulation = new Encapsulation(arg_encap); + encapsulation = 0; } else if ( arg_encap ) { Event(tunnel_changed, 0, arg_encap->GetVectorVal()); - encapsulation = new Encapsulation(arg_encap); + encapsulation = new EncapsulationStack(*arg_encap); } } diff --git a/src/Conn.h b/src/Conn.h index b3798cfb36..782d41a801 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -52,7 +52,7 @@ class Analyzer; class Connection : public BroObj { public: Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, - uint32 flow, const Encapsulation* arg_encap); + uint32 flow, const EncapsulationStack* arg_encap); virtual ~Connection(); // Invoked when an encapsulation is discovered. It records the @@ -60,7 +60,7 @@ public: // event if it's different from the previous encapsulation (or the // first encountered). encap can be null to indicate no // encapsulation. - void CheckEncapsulation(const Encapsulation* encap); + void CheckEncapsulation(const EncapsulationStack* encap); // Invoked when connection is about to be removed. Use Ref(this) // inside Done to keep the connection object around (though it'll @@ -252,7 +252,7 @@ public: uint64 GetUID() const { return uid; } - const Encapsulation* GetEncapsulation() const + const EncapsulationStack* GetEncapsulation() const { return encapsulation; } void CheckFlowLabel(bool is_orig, uint32 flow_label); @@ -292,7 +292,7 @@ protected: double inactivity_timeout; RecordVal* conn_val; LoginConn* login_conn; // either nil, or this - const Encapsulation* encapsulation; // tunnels + const EncapsulationStack* encapsulation; // tunnels int suppress_event; // suppress certain events to once per conn. unsigned int installed_status_timer:1; diff --git a/src/Sessions.cc b/src/Sessions.cc index 84c881b0ef..10126f45b1 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -367,7 +367,7 @@ int NetSessions::CheckConnectionTag(Connection* conn) void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, - int hdr_size, const Encapsulation* encapsulation) + int hdr_size, const EncapsulationStack* encapsulation) { uint32 caplen = hdr->caplen - hdr_size; const struct ip* ip4 = ip_hdr->IP4_Hdr(); @@ -576,8 +576,6 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, return; } - Encapsulation* outer = new Encapsulation(encapsulation); - // Look up to see if we've already seen this IP tunnel, identified // by the pair of IP addresses, so that we can always associate the // same UID with it. @@ -594,18 +592,13 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, EncapsulatingConn ec(ip_hdr->SrcAddr(), ip_hdr->DstAddr()); ip_tunnels[tunnel_idx] = TunnelActivity(ec, network_time); timer_mgr->Add(new IPTunnelTimer(network_time, tunnel_idx)); - outer->Add(ec); } else - { it->second.second = network_time; - outer->Add(it->second.first); - } - DoNextInnerPacket(t, hdr, inner, outer); + DoNextInnerPacket(t, hdr, inner, encapsulation, + ip_tunnels[tunnel_idx].first); - delete inner; - delete outer; Remove(f); return; } @@ -724,7 +717,8 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, } void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, - const IP_Hdr* inner, const Encapsulation* outer) + const IP_Hdr* inner, const EncapsulationStack* prev, + const EncapsulatingConn& ec) { struct pcap_pkthdr fake_hdr; fake_hdr.caplen = fake_hdr.len = inner->TotalLen(); @@ -732,7 +726,11 @@ void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, if ( hdr ) fake_hdr.ts = hdr->ts; else - fake_hdr.ts.tv_sec = fake_hdr.ts.tv_usec = 0; + { + fake_hdr.ts.tv_sec = (time_t) network_time; + fake_hdr.ts.tv_usec = (suseconds_t) + ((network_time - (double)fake_hdr.ts.tv_sec) * 1000000); + } const u_char* pkt = 0; @@ -741,7 +739,14 @@ void NetSessions::DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, else pkt = (const u_char*) inner->IP6_Hdr(); + EncapsulationStack* outer = prev ? + new EncapsulationStack(*prev) : new EncapsulationStack(); + outer->Add(ec); + DoNextPacket(t, &fake_hdr, inner, pkt, 0, outer); + + delete inner; + delete outer; } int NetSessions::ParseIPPacket(int caplen, const u_char* const pkt, int proto, @@ -778,7 +783,7 @@ int NetSessions::ParseIPPacket(int caplen, const u_char* const pkt, int proto, bool NetSessions::CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, const struct pcap_pkthdr* h, - const u_char* p, const Encapsulation* encap) + const u_char* p, const EncapsulationStack* encap) { uint32 min_hdr_len = 0; switch ( proto ) { @@ -1110,7 +1115,7 @@ void NetSessions::GetStats(SessionStats& s) const Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, const u_char* data, int proto, uint32 flow_label, - const Encapsulation* encapsulation) + const EncapsulationStack* encapsulation) { // FIXME: This should be cleaned up a bit, it's too protocol-specific. // But I'm not yet sure what the right abstraction for these things is. @@ -1331,7 +1336,7 @@ void NetSessions::Internal(const char* msg, const struct pcap_pkthdr* hdr, } void NetSessions::Weird(const char* name, const struct pcap_pkthdr* hdr, - const u_char* pkt, const Encapsulation* encap) + const u_char* pkt, const EncapsulationStack* encap) { if ( hdr ) dump_this_packet = 1; @@ -1343,7 +1348,7 @@ void NetSessions::Weird(const char* name, const struct pcap_pkthdr* hdr, } void NetSessions::Weird(const char* name, const IP_Hdr* ip, - const Encapsulation* encap) + const EncapsulationStack* encap) { if ( encap && encap->LastType() != BifEnum::Tunnel::NONE ) reporter->Weird(ip->SrcAddr(), ip->DstAddr(), diff --git a/src/Sessions.h b/src/Sessions.h index 548c0903be..613eebe5ca 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -16,7 +16,7 @@ struct pcap_pkthdr; -class Encapsulation; +class EncapsulationStack; class Connection; class ConnID; class OSFingerprint; @@ -109,9 +109,9 @@ public: void GetStats(SessionStats& s) const; void Weird(const char* name, const struct pcap_pkthdr* hdr, - const u_char* pkt, const Encapsulation* encap = 0); + const u_char* pkt, const EncapsulationStack* encap = 0); void Weird(const char* name, const IP_Hdr* ip, - const Encapsulation* encap = 0); + const EncapsulationStack* encap = 0); PacketFilter* GetPacketFilter() { @@ -137,7 +137,7 @@ public: void DoNextPacket(double t, const struct pcap_pkthdr* hdr, const IP_Hdr* ip_hdr, const u_char* const pkt, - int hdr_size, const Encapsulation* encapsulation); + int hdr_size, const EncapsulationStack* encapsulation); /** * Wrapper that recurses on DoNextPacket for encapsulated IP packets. @@ -147,10 +147,15 @@ public: * so that the fake pcap header passed to DoNextPacket will use * the same timeval. The caplen and len fields of the fake pcap * header are always set to the TotalLength() of \a inner. - * @param outer The encapsulation information for the inner IP packet. + * @param inner Pointer to IP header wrapper of the inner packet, ownership + * of the pointer's memory is assumed by this function. + * @param prev Any previous encapsulation stack of the caller, not including + * the most-recently found depth of encapsulation. + * @param ec The most-recently found depth of encapsulation. */ void DoNextInnerPacket(double t, const struct pcap_pkthdr* hdr, - const IP_Hdr* inner, const Encapsulation* outer); + const IP_Hdr* inner, const EncapsulationStack* prev, + const EncapsulatingConn& ec); /** * Returns a wrapper IP_Hdr object if \a pkt appears to be a valid IPv4 @@ -185,7 +190,7 @@ protected: Connection* NewConn(HashKey* k, double t, const ConnID* id, const u_char* data, int proto, uint32 flow_lable, - const Encapsulation* encapsulation); + const EncapsulationStack* encapsulation); // Check whether the tag of the current packet is consistent with // the given connection. Returns: @@ -234,7 +239,7 @@ protected: // than that protocol's minimum header size. bool CheckHeaderTrunc(int proto, uint32 len, uint32 caplen, const struct pcap_pkthdr* hdr, const u_char* pkt, - const Encapsulation* encap); + const EncapsulationStack* encap); CompositeHash* ch; PDict(Connection) tcp_conns; diff --git a/src/Teredo.cc b/src/Teredo.cc index e537edb916..ac68bdbed1 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -149,7 +149,7 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, return; } - const Encapsulation* e = Conn()->GetEncapsulation(); + const EncapsulationStack* e = Conn()->GetEncapsulation(); if ( e && e->Depth() >= BifConst::Tunnel::max_depth ) { @@ -222,12 +222,7 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, Conn()->Event(teredo_bubble, 0, teredo_hdr); } - Encapsulation* outer = new Encapsulation(e); EncapsulatingConn ec(Conn(), BifEnum::Tunnel::TEREDO); - outer->Add(ec); - sessions->DoNextInnerPacket(network_time, 0, inner, outer); - - delete inner; - delete outer; + sessions->DoNextInnerPacket(network_time, 0, inner, e, ec); } diff --git a/src/TunnelEncapsulation.cc b/src/TunnelEncapsulation.cc index f023a40b6a..edbabef81f 100644 --- a/src/TunnelEncapsulation.cc +++ b/src/TunnelEncapsulation.cc @@ -34,7 +34,7 @@ RecordVal* EncapsulatingConn::GetRecordVal() const return rv; } -bool operator==(const Encapsulation& e1, const Encapsulation& e2) +bool operator==(const EncapsulationStack& e1, const EncapsulationStack& e2) { if ( ! e1.conns ) return e2.conns; diff --git a/src/TunnelEncapsulation.h b/src/TunnelEncapsulation.h index 9dcf134536..e8ca7a48b6 100644 --- a/src/TunnelEncapsulation.h +++ b/src/TunnelEncapsulation.h @@ -114,12 +114,12 @@ protected: /** * Abstracts an arbitrary amount of nested tunneling. */ -class Encapsulation { +class EncapsulationStack { public: - Encapsulation() : conns(0) + EncapsulationStack() : conns(0) {} - Encapsulation(const Encapsulation& other) + EncapsulationStack(const EncapsulationStack& other) { if ( other.conns ) conns = new vector(*(other.conns)); @@ -127,15 +127,7 @@ public: conns = 0; } - Encapsulation(const Encapsulation* other) - { - if ( other && other->conns ) - conns = new vector(*(other->conns)); - else - conns = 0; - } - - Encapsulation& operator=(const Encapsulation& other) + EncapsulationStack& operator=(const EncapsulationStack& other) { if ( this == &other ) return *this; @@ -150,10 +142,10 @@ public: return *this; } - ~Encapsulation() { delete conns; } + ~EncapsulationStack() { delete conns; } /** - * Add a new inner-most tunnel to the Encapsulation. + * Add a new inner-most tunnel to the EncapsulationStack. * * @param c The new inner-most tunnel to append to the tunnel chain. */ @@ -200,9 +192,11 @@ public: return vv; } - friend bool operator==(const Encapsulation& e1, const Encapsulation& e2); + friend bool operator==(const EncapsulationStack& e1, + const EncapsulationStack& e2); - friend bool operator!=(const Encapsulation& e1, const Encapsulation& e2) + friend bool operator!=(const EncapsulationStack& e1, + const EncapsulationStack& e2) { return ! ( e1 == e2 ); } diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index f1b144ff44..89d1143ad7 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -12,7 +12,7 @@ flow AYIYA_Flow function process_ayiya(pdu: PDU): bool %{ Connection *c = connection()->bro_analyzer()->Conn(); - const Encapsulation* e = c->GetEncapsulation(); + const EncapsulationStack* e = c->GetEncapsulation(); if ( e && e->Depth() >= BifConst::Tunnel::max_depth ) { @@ -72,14 +72,10 @@ flow AYIYA_Flow if ( result != 0 ) return false; - Encapsulation* outer = new Encapsulation(e); EncapsulatingConn ec(c, BifEnum::Tunnel::AYIYA); - outer->Add(ec); - sessions->DoNextInnerPacket(network_time(), 0, inner, outer); + sessions->DoNextInnerPacket(network_time(), 0, inner, e, ec); - delete inner; - delete outer; return (result == 0) ? true : false; %} From d6286d953d0d0c34096ebbe9bec4b25ed2d72565 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 18 Jun 2012 17:04:46 -0500 Subject: [PATCH 415/651] Remove &synchronized from Tunnel::active table. --- scripts/base/frameworks/tunnels/main.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 2f5625f8b2..869c501fe6 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -76,7 +76,7 @@ export { ## Currently active tunnels. That is, tunnels for which new, encapsulated ## connections have been seen in the interval indicated by ## :bro:see:`Tunnel::expiration_interval`. - global active: table[conn_id] of Info = table() &synchronized &read_expire=expiration_interval &expire_func=expire; + global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire; } const ayiya_ports = { 5072/udp }; From cf593f13e0f664051bae119ea56b3f02441e18a1 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 18 Jun 2012 15:58:52 -0700 Subject: [PATCH 416/651] Updating baselines and NEWS. --- NEWS | 4 ++-- .../conn.ds.txt | 6 +++--- .../conn.ds.txt | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 66bf8b040c..75bb78130f 100644 --- a/NEWS +++ b/NEWS @@ -60,8 +60,8 @@ Bro 2.1 signature_files constant, this can be used to load signatures relative to the current script (e.g., "@load-sigs ./foo.sig"). -- The options encap_hdr_size and tunnel_port have been removed. Bro - now supports decapsulating tunnels directly for protocols it +- The options "tunnel_port" and "parse_udp_tunnels" have been removed. + Bro now supports decapsulating tunnels directly for protocols it understands. TODO: Extend. diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt index 620babdd4c..c4ac546ab6 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt @@ -28,7 +28,7 @@ - + @@ -49,10 +49,10 @@ - + # Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents 1300475167096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 1300475167097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 1300475167099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt index c20e38d4f3..b74b9fd7e3 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -28,7 +28,7 @@ - + @@ -49,10 +49,10 @@ - + # Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents 1300475167.096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 1300475167.097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 1300475167.099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 From 83f385b2b02f8c00cbaee4e65a268154fa6a05c1 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 19 Jun 2012 12:59:38 -0500 Subject: [PATCH 417/651] Make Teredo bubble packet parsing more lenient. Teredo bubble packets (IPv6 w/ No Next Header and zero Payload Length) with data extending past the inner IPv6 header (the outer IPv4 header's Total Length and UDP header's Length indicate this) now only raises a "Teredo_payload_len_mismatch" weird instead of causing a ProtocolViolation(). This also fixes a crash in NetSessions::ParseIPPacket() that occurred when the packet length didn't match the payload length field. --- src/Sessions.cc | 5 +- src/Sessions.h | 9 +- src/Teredo.cc | 44 +++++++--- src/ayiya-analyzer.pac | 3 + .../core.tunnels.false-teredo/dpd.log | 13 +++ .../core.tunnels.false-teredo/weird.log | 6 -- .../conn.log | 14 +++ .../http.log | 9 ++ .../output | 83 ++++++++++++++++++ .../tunnel.log | 13 +++ .../weird.log | 9 ++ .../tunnels/teredo_bubble_with_payload.pcap | Bin 0 -> 15606 bytes testing/btest/core/tunnels/false-teredo.bro | 1 + .../tunnels/teredo_bubble_with_payload.test | 36 ++++++++ 14 files changed, 219 insertions(+), 26 deletions(-) create mode 100644 testing/btest/Baseline/core.tunnels.false-teredo/dpd.log create mode 100644 testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log create mode 100644 testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log create mode 100644 testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/output create mode 100644 testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log create mode 100644 testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log create mode 100644 testing/btest/Traces/tunnels/teredo_bubble_with_payload.pcap create mode 100644 testing/btest/core/tunnels/teredo_bubble_with_payload.test diff --git a/src/Sessions.cc b/src/Sessions.cc index 330d39605d..6f42e5726b 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -574,6 +574,7 @@ void NetSessions::DoNextPacket(double t, const struct pcap_pkthdr* hdr, if ( result != 0 ) { + delete inner; Remove(f); return; } @@ -774,11 +775,7 @@ int NetSessions::ParseIPPacket(int caplen, const u_char* const pkt, int proto, reporter->InternalError("Bad IP protocol version in DoNextInnerPacket"); if ( (uint32)caplen != inner->TotalLen() ) - { - delete inner; - inner = 0; return (uint32)caplen < inner->TotalLen() ? -1 : 1; - } return 0; } diff --git a/src/Sessions.h b/src/Sessions.h index 245cd4cbf6..a7d7b1272f 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -168,11 +168,14 @@ public: * @param proto Either IPPROTO_IPV6 or IPPROTO_IPV4 to indicate which IP * protocol \a pkt corresponds to. * @param inner The inner IP packet wrapper pointer to be allocated/assigned - * if \a pkt looks like a valid IP packet. - * @return 0 If the inner IP packet appeared valid in which case the caller - * is responsible for deallocating \a inner, else -1 if \a caplen + * if \a pkt looks like a valid IP packet or at least long enough + * to hold an IP header. + * @return 0 If the inner IP packet appeared valid, else -1 if \a caplen * is greater than the supposed IP packet's payload length field or * 1 if \a caplen is less than the supposed packet's payload length. + * In the -1 case, \a inner may still be non-null if \a caplen was + * long enough to be an IP header, and \a inner is always non-null + * for other return values. */ int ParseIPPacket(int caplen, const u_char* const pkt, int proto, IP_Hdr*& inner); diff --git a/src/Teredo.cc b/src/Teredo.cc index ac68bdbed1..54676c3255 100644 --- a/src/Teredo.cc +++ b/src/Teredo.cc @@ -78,12 +78,9 @@ bool TeredoEncapsulation::DoParse(const u_char* data, int& len, return false; } - if ( len - 40 != ntohs(((const struct ip6_hdr*)data)->ip6_plen) ) - { - Weird("Teredo_payload_len_mismatch"); - return false; - } - + // There's at least a possible IPv6 header, we'll decide what to do + // later if the payload length field doesn't match the actual length + // of the packet. inner_ip = data; return true; } @@ -160,7 +157,21 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, IP_Hdr* inner = 0; int rslt = sessions->ParseIPPacket(len, te.InnerIP(), IPPROTO_IPV6, inner); - if ( rslt == 0 ) + if ( rslt > 0 ) + { + if ( inner->NextProto() == IPPROTO_NONE && inner->PayloadLen() == 0 ) + // Teredo bubbles having data after IPv6 header isn't strictly a + // violation, but a little weird. + Weird("Teredo_bubble_with_payload"); + else + { + delete inner; + ProtocolViolation("Teredo payload length", (const char*) data, len); + return; + } + } + + if ( rslt == 0 || rslt > 0 ) { if ( BifConst::Tunnel::yielding_teredo_decapsulation && ! ProtocolConfirmed() ) @@ -174,12 +185,20 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, LOOP_OVER_GIVEN_CONST_CHILDREN(i, Parent()->GetChildren()) { if ( (*i)->ProtocolConfirmed() ) + { sibling_has_confirmed = true; + break; + } } } if ( ! sibling_has_confirmed ) ProtocolConfirmation(); + else + { + delete inner; + return; + } } else { @@ -188,13 +207,12 @@ void Teredo_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, } } - else if ( rslt < 0 ) - ProtocolViolation("Truncated Teredo", (const char*) data, len); - else - ProtocolViolation("Teredo payload length", (const char*) data, len); - - if ( rslt != 0 || ! ProtocolConfirmed() ) return; + { + delete inner; + ProtocolViolation("Truncated Teredo", (const char*) data, len); + return; + } Val* teredo_hdr = 0; diff --git a/src/ayiya-analyzer.pac b/src/ayiya-analyzer.pac index 89d1143ad7..7a151453c1 100644 --- a/src/ayiya-analyzer.pac +++ b/src/ayiya-analyzer.pac @@ -70,7 +70,10 @@ flow AYIYA_Flow ${pdu.packet}.length()); if ( result != 0 ) + { + delete inner; return false; + } EncapsulatingConn ec(c, BifEnum::Tunnel::AYIYA); diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log b/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log new file mode 100644 index 0000000000..4949f16e62 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log @@ -0,0 +1,13 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path dpd +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto analyzer failure_reason +#types time string addr port addr port enum string string +1258567191.486869 UWkUyAuUGXf 192.168.1.105 57696 192.168.1.1 53 udp TEREDO Teredo payload length [c\x1d\x81\x80\x00\x01\x00\x02\x00\x02\x00\x00\x04amch\x0equestionmarket\x03com\x00\x00\x01\x00...] +1258578181.516140 nQcgTWjvg4c 192.168.1.104 64838 192.168.1.1 53 udp TEREDO Teredo payload length [h\xfd\x81\x80\x00\x01\x00\x02\x00\x03\x00\x02\x08football\x02uk\x07reuters\x03com\x00\x00\x01\x00...] +1258579063.784919 j4u32Pc5bif 192.168.1.104 55778 192.168.1.1 53 udp TEREDO Teredo payload length [j\x12\x81\x80\x00\x01\x00\x02\x00\x04\x00\x00\x08fastflip\x0agooglelabs\x03com\x00\x00\x01\x00...] +1258581768.898165 TEfuqmmG4bh 192.168.1.104 50798 192.168.1.1 53 udp TEREDO Teredo payload length [o\xe3\x81\x80\x00\x01\x00\x02\x00\x04\x00\x04\x03www\x0fnashuatelegraph\x03com\x00\x00\x01\x00...] +1258584478.989528 FrJExwHcSal 192.168.1.104 64963 192.168.1.1 53 udp TEREDO Teredo payload length [e\xbd\x81\x80\x00\x01\x00\x08\x00\x06\x00\x06\x08wellness\x05blogs\x04time\x03com\x00\x00\x01\x00...] +1258600683.934672 5OKnoww6xl4 192.168.1.103 59838 192.168.1.1 53 udp TEREDO Teredo payload length [h\xf0\x81\x80\x00\x01\x00\x01\x00\x02\x00\x00\x06update\x0csanasecurity\x03com\x00\x00\x01\x00...] diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log index 989b7beede..0ec1d0a7cf 100644 --- a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log +++ b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log @@ -6,14 +6,8 @@ #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1258567191.405770 - - - - - truncated_header_in_tunnel - F bro -1258567191.486869 UWkUyAuUGXf 192.168.1.105 57696 192.168.1.1 53 Teredo_payload_len_mismatch - F bro 1258578181.260420 - - - - - truncated_header_in_tunnel - F bro -1258578181.516140 nQcgTWjvg4c 192.168.1.104 64838 192.168.1.1 53 Teredo_payload_len_mismatch - F bro 1258579063.557927 - - - - - truncated_header_in_tunnel - F bro -1258579063.784919 j4u32Pc5bif 192.168.1.104 55778 192.168.1.1 53 Teredo_payload_len_mismatch - F bro 1258581768.568451 - - - - - truncated_header_in_tunnel - F bro -1258581768.898165 TEfuqmmG4bh 192.168.1.104 50798 192.168.1.1 53 Teredo_payload_len_mismatch - F bro 1258584478.859853 - - - - - truncated_header_in_tunnel - F bro -1258584478.989528 FrJExwHcSal 192.168.1.104 64963 192.168.1.1 53 Teredo_payload_len_mismatch - F bro 1258600683.934458 - - - - - truncated_header_in_tunnel - F bro -1258600683.934672 5OKnoww6xl4 192.168.1.103 59838 192.168.1.1 53 Teredo_payload_len_mismatch - F bro diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log new file mode 100644 index 0000000000..6ceb4efcb3 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log @@ -0,0 +1,14 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1340127577.354166 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 tcp http 0.052829 1675 10467 S1 - 0 ShADad 10 2279 12 11191 j4u32Pc5bif +1340127577.336558 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 udp teredo 0.010291 129 52 SF - 0 Dd 2 185 1 80 (empty) +1340127577.341510 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 udp teredo 0.065485 2367 11243 SF - 0 Dd 12 2703 13 11607 (empty) +1340127577.339015 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 udp teredo - - - SHR - 0 d 0 0 1 137 (empty) +1340127577.339015 nQcgTWjvg4c fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 k6kgXLOoSKl +1340127577.343969 TEfuqmmG4bh 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.007778 4 4 OTH - 0 - 1 52 1 52 UWkUyAuUGXf,j4u32Pc5bif +1340127577.336558 arKYeMETxOg fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 UWkUyAuUGXf diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log new file mode 100644 index 0000000000..869476d7db --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log @@ -0,0 +1,9 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file +1340127577.361683 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - +1340127577.379360 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/output b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/output new file mode 100644 index 0000000000..02d5a41e74 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/output @@ -0,0 +1,83 @@ +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] +auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] + ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] + auth: [id=, value=, nonce=14796129349558001544, confirm=0] + origin: [p=3797/udp, a=70.55.215.234] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=12, nxt=58, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] + origin: [p=32900/udp, a=83.170.1.38] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] +bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=12, nxt=58, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=24, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=24, nxt=6, hlim=245, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=817, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=514, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=898, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=812, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=717, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] +packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] + ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log new file mode 100644 index 0000000000..3f47321245 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log @@ -0,0 +1,13 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#types time string addr port addr port enum enum +1340127577.336558 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO +1340127577.339015 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO +1340127577.351747 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO +1340127577.406995 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO +1340127577.406995 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO +1340127577.406995 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log new file mode 100644 index 0000000000..e01fa49d45 --- /dev/null +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log @@ -0,0 +1,9 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1340127577.346849 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Teredo_bubble_with_payload - F bro +1340127577.349292 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Teredo_bubble_with_payload - F bro diff --git a/testing/btest/Traces/tunnels/teredo_bubble_with_payload.pcap b/testing/btest/Traces/tunnels/teredo_bubble_with_payload.pcap new file mode 100644 index 0000000000000000000000000000000000000000..5036a52b5671fc6b284c2f7037cd9f59527b5645 GIT binary patch literal 15606 zcmeHu1yEee*6yGIf;$1iAR$0-9o*gBbr@i9861L#K!5-Nf&~i%2(Ez;+&xHw6I?=Y zhX9Y^-2CsH+*jwnw{E>x^{W0|)w}lY-MhclYj=OF_0colUYU&uKnDCeZf*bogxeea z`UTZe8We!%?KQjrC=F(Z_W{Ji;^{y%fHVLA4Q~ek0`YKzS_%+x#rZPSu)knU0<`tL z;ja(sK*jFUe0vTsgTG2DaC039zqow?a05TCZV>;t|Hm0I3{Xf3zee~+eIQ_;slJDS z2yTWfGYa7K?K61!!+<}GxicezzZ?K?#&dMLH9;M23f3g{7l6AOAxB7Ah|aK5RDP1JmkK#%<;FciS;-){zQ4G<*y)#I;$0}l}R@B2Jix|NB^fjm=)`Z#_77=RwiGda!c; z7s~Iw{*L+!1Cp`g;{TY21~%bMX+UDH3k zL;eIuuJJ(uSlxC5FaI=Vz3^{$V`llwn57QT&ETyG|K%~u-lBV|;Y!FoIRAsK#zIj5 zAxMC`@;fS$d-wtzH5up6s7U5p!)CXjNF4trDyf8Ay(s`7E3E}&1IlS>sj+dea$sS| zIk~|EfKX=-URG--Cu@5ME7-{a3q#ut;>sd!4RO4EsO;newYN8C<6&h7GU!4bEuB2w z1cBOuz<*o=RkVOStQ>+sh$D-(rXbMOLx6*o--3q`C<_7GIk9oFvxC^VIC(kQfih56 zh?SEU8y73P1rHX6I2a6ZzD0mRykKlLFb8{Pb7yCJDA*hZb#i3$g4aCp`n}e{UeHCD zot2-NiH+&*3iPWZ7A12>Yj<;NhydIacQ^YW=n{b zy*Ui>U++uWn7g_`ZVi&xRAm8yc=%a3nB8GkETFq?_?Z8|{u9*PL7TcFx+ebpdI|7Is75K-pLMn+el4AT1Hr2N|=wA2gJt?w&1a{;$r7A=MvCT z7Utk&=jY^N=ip`+Q2NIm4-Xd)NI>@XPC!!_CL;^h)>gDM(9*J(v9Si-dKlsWaj<~6 zIzWVBuI>;)pyqAYQtIO3;&R~Ila#lEqZKEguBN4f3rI(iPYR}PCnc%x>8!=;EiK0Z z208PBU0tEBTB=-fZh9)5DyGh=U|kk15T`v@#oNPPTh3Y!s-xy+pzkWjtL12`?rJS5 zt*T_Jt*I*MYOf??!z0fwVJT}1b@K9YuyeE4b#=9ZDC@9z^I3RtXmKd1Nh(XJ>ay$F zd9v%txag`nE8E*iNXofc!Z>xnZZ_uZww&xb&hqA-yx!J=K$TnDIoO45mGyO1?3{eu z?8UjY%{AOXKHf5#O45#QAgHd654Wu4ty`_^Tyz|GEM+v=W!QaSV33WJy^4Z{y}gC0 zE<_)~uj|EY4$^nxQ+9Nf@p17q0Q*?$>F_{UG-NC!U0oIUEv>Y9tljwdl}tUjBs4AE zA@bIys;XAD&YECN8!QYg3^);R#Qcc}ey1Qk+K4*}3jGTONgb%dm*g!G{I^v54+`@1 z^L4aT!zE`B`S}MCu{uD8q5 z)9ruvqy%xahS>-JIr+Hw;h%@<1sdGV0K}UbeFx99I{fRVW{F39CVtTfGu}lUL%a@S zgD0zNjtx%vd>>>30bc^y(F9GpZ?0nKoSij2`KTO+<%2tHGE!u@-r~LK5K?~kDtW<7 zKpwrDVNGcFmA5oW=~`9PtEkg+rpFK7%a(TavV7=}xOdLMs?auPDq?gpT)i2pnGw7B zq$?*>iJ2!g!4D-sh`1Q@A?aif7MDqQRteQo0(}t&D}zcDzBQzo4h4&E@G|~dzDrWD znTSh7QIA7}G+IT)F91B0hCcHQj|~s!D?c`3sx;V8saRMCId>yOFZeJ%DlI%Z#qt`I zJv9)kdfz}pC79CRyQ9LoKy*2oMNS<@As&}1H;o#r#{hS<8$G(R0tYj$EshIeUq6T+ zG6e#Yx$GPZBCs4Hmvx!6{Y)Vsc+lx5BsA#shTS~*B>}#xY*w2ye+(2ic3{6;^aX|v zq_vI-e=dhiwRm_khcQMDx{7k2Df51FsJT^!v~fbkQ*qbedQG>nb!MtebeX)Xtw&g1 zLJy+?SBS+#ty?0>KZyzn_c_HiLf!T!icn>@wXvXs0{gCGG-}E(PZ$L&?I45}?D_${ zipo^QIz9{L4Yfe@A1%T5*fcmKTf_}mo08NfZnP#*;cc}&FR;#dq!5g`gXogU1F0V= z+5}?%%x{H)oOh7EIKrk~uYBx~y?Wy#31n?ZA39)BPM$1Uy@+3;$(cGMU1-(&=;&}N zPWmjSa%moMU8G4`kJ0UO>qbXez+!fIFSNfU$jr;GJ<~rGWkpBN9LWNuaDqPnr*E zt)dn>C-56h@{qSep-%MnDw_8@ns&S@Q}o^&sVFpO@n^EOKB(oU)z_Fi{;~Q+?-J~I zcv=?F(DZZJRPtcVX)53IB+P2Sm7T>!%_F~Qx}LH~|Zkd*9bb52+)UY$M8rf;^ ze2!Ajn$=kYFZ@{xl@4-|=&9uf)vzGnT>a&K^P9AQ`Kxx$+Meu1_lZ98IV5*V@d=7a zFB`;yZ!o?g-lzDKl5(MQJiJ?<-Iv{zk(+qGTH@Ma^GY+ihh!vui^N+gBkybN;`2Tt zLbaz2r5cu7^tAGGB<T4iA?=in-sVPBMTy!~8*&-Y%V<8hG`6^TtC54ZGR!MrOj z0^fRq9(~kV1BI}B*`G6RKS7lG`0gcFCLKxQx_Gd#JW95zoTsvp?FCADdalkp(I*(6 z@QWh9=!D1DS$QXB+Ho@jF!a1!j-QC(X~wWwrg-oehLH8xZb)Ql)wd+4Gx(8M3`K}! z%qR1+0yZe^10xR8En5$h=@C|f<*5&{v!HDshu_4+?nnr2kmZnF%o-0r)SCeb&8Q!r zgFVLl0Ww?#8$W{sWpaiw3+IjqjFN%{gM`L;3FBK;3q;-gy}Gkr8U!ok_&Od{Oei{i zv@6GIqZtv_{K`)I11l3ZcE~QeBPWMOJ>#EtkGcC4dC`@XysRddL zghl$K68Yyc6mwXPT;43gn~uCv2x;G)p>1p<^59qwq3;3}mvr->8Un#=QcnRIp+%Rf z&F{wdA}ijv5h&L5?j1_d;Hpsc^4COR7$K zt85S4uj@4`GGJG@sU(h}UgRPFc7khuC@ zUHJlTm`q;uce}Cm37PeGimbAz)m?2 z$L5hroVkSRC z+6RxgyqA4F&Uc50zZr{=r(Zp#l$nR_30w;MU9E3!L%F?(?m@iR+kuA<~ z@h?SYe|SS1JIP>taK;RI|EI|IcWF#7eDN;Dee&mI6Yy`RxQ>>z`A)m=6c_t8#)PBZ zuSNlA-6~}8@_S^9C_@_s=V`@Pf97dK&K);1c$kOyuST{`yLUWIO3v_e3IG@$S4(R$ z&b~Oei)_kX$AWnhiISKaL3QG~G*j7BrVpl%gzZrg)ob>*v2>|uf-LI7>DAVaK$4x$ z9;rW^l}&vgxbCKVvL-zFRHMZlbDIL6CI8X%z-au~NebCW`FzLX7?;**a!640_-lrV zXB&Yt7uxhOo-W#!pDDVLjjXUp!;J)UQf?CpDiy&L-L+VdP%K+QG7S#IUf)bltm@Me z2D?h^*@K<;sxReCTC+*HIT@S6T*=VqPDWA;IAv)b%`Pu@xf2;~_-{WS&4%_(5hrz5 z>0*==e@ZxzarL}y)HM{#V5t_1<)*b74( z>0N|%ZzYx6oZ8jy$sz+Q4AW0&Hx* ziE3>BqXzx|rv^!n|815LeV1i){Q0lfzpu-<%=!7cjH=;MTj5CkU(jVVA+TFj=MNcD zQ&U)-U08?H-rGP=#YtaJ!Bt*TS`g@L4zm$v`-52iRE!0-vt^|5_A|&If3QB zNtqiP9Kr*NVaXwfQcmm#e0zd!@4U2g)_#&M;9URlbi~b+u9X>IUMN!<`SMU-0$C;w z7y5FR1h(jvj@~AvTC5S=N8XLQB7U^WPVS92l->OO5zFU9H;wS1b^%Yv1I)bs2+B3(|7Seul zkMz&1bbcQSbeUis_KQCj6Yhely!(Q}pf={ZT%&r(-BrX0;eaT7di)*zN7o*Zy(HJw z*V`LCc#@0br%qdM#|)q^4?3sIzWV3{*%-GD(bJW9$C21<1ck&?)fy$~B#eYa(r-1` zh@82TY#-rpx^vz*c|Z7kO!KDgG^C;t+*4{(FGuwm_c}8lhrE9-e%Y1Ui!)Kx^`R!! zn;14>HQdDDh0Ry_rW1r>RMUw-g6$L09 zXlVatfjL*{k|wYWlb`fHu)>G`O6Fa=I1{fzf!DZdQs}d|?nvxTQG8 zfbPVY)V}-Y%rKW)Y>SvqZu+2d+GsCs1*-RxM=K$la;Wq$T7suCXkPrd2)kjT=+90Z zHA4E<#G*~}DO|%{c{^D#8IddU!tjviznYU{qgtnxkPKcONrdD@!!UwdzA|{A{V>+= zWw!${ar99MxpJh81F(Y%D4#hA_xT+F)iGPZaj)$A4N{^B_S_|2C0HYJW%F|@ei5#6 zZwenY7p3pWCtU=Yv|EP9PwanG2}p5IiPw9@A^!uV7x~zEww(iOim=!=r#vRILD__F zZGNt{t{6`~)*azX7D%fvTIfS2s&w~~f==vs=?LdImrUI}fd_lno>=WvhN@78IYj$Q z{KGt=e7x{_?qKxR!UrKyc3^rnh6*UecYC;Wf^=CwcsZUs#m?KPtk_MiPzI(IAIH>K zj9wmMrd27*pn4)#BBZsi{>4Qkb~Ucs^Yae5v`2-htj1vqX!_EzH9;w$GdDLQ0_p^s z{J^XAv~f1^O0HC{AxZu5hn^*mOnMNVcjjVerUukLkNF3%%a=?ZeLnH}5h!jXsI)v? zX8aUl8rs$MegC@y*E>&i($85;^NRWf%q)g`=k*`4GN>4hw`EKiwRgR>p*G)oYdN;y z@&1GPzh%srJH~AP&luAq?btl)FZ(_33 zFK5Sod_Z`8`J_Ja)iF5Fs$St9D0|;Fk}DF-)y=#s9*~a!`}iJ;sof76ys{cD&t$&J z2bB~WN%#+6_z;}8GoNy%MxNu_rn8l(Ie^|A&uw{Xwl`8eSEcG=GksIG@ugpNmr8{5 zW>pmPaOYT5v~79O61Lw)q0&VHJ_{4wTiIsQ3}FX@Z`2+PB(%rT9vZ8ojn3xAIUWVw1m=2trp5T`_Cc|J9wb{Kn8hXtlrg|;(cePZus{}J_^ z1}tZZGKEJ^!f4%CF=P0ZTRlTl^E2~whI{7jWT(MM8DS>=jBSno4iUPM0Yb4Z|G>n@ z06%2dSHy0V3Mc^6Lb>9)>h0UB)sOBo^`1>X^W85`yPJ;(OV(Rpf_B1bwfENy;>&%_ zmKl-Rn>X!$*fK=UkjB$&6jx{yXAu~ayEgIy5g?2Ycxx^91)omHQLRvF)e#h=MO#Pg zk)fhm0d9=Bf=sQRmXHSV9$koAPJZGl)Z{P^S2;VAD~WwR!4Vgq_C>JSK`tZc^&3#L zzJJ%^z4X{xYD0$5>S!veC$%J5qZL8}LipbZSt+8@UrFKnNC?<1srC0~Eys=OT|$x; z6Dbl)YI&=>WHX32Ys5n^jS%ja&0k@cKF`~68s8b^Kt+|y@>m=|eNllF#VEeUAM56E z2645^10l&$0RhA+nr%NbX&zjvVG)PThWjT|kN7*(?mm0f(pg^V60ff#I=E~+${KUWAZ77n?TaK_iZXg+~Is z0(O~i#bVaP0oJ>wD-mxG4ADj3r@(m0b0`fzuIp}+e2_=!p?Kr@qlZ6&*MmPohDoL* zNlCSC)?Q#c21{7?bXwoEnKRa(sy=%fCg3`TeuGQ*!Etwpbo0gNjh9FW3ZnQAORFL4 zsD`~K%faJ9;zG!2hoa8~w;C3tL{i^Aen7g3%JfZJ>2ejrnD=S_vOHsbjD^i$iLdpg zOpfm;6TbCij#P7%jri%+XZ*>q15Py}6?E)r4MQpJ-GOh6hE*ch{a*O#yq|k6-cxciAiRzpBglYTxlRDYe!0ZKRydkx6ujd@)jND)-G< z5Wwz#Cot*GN3Qo#_SDiM4$nDg;P%3#t0!>KiAkbBc`Ssy9$o2<`SA5&11iNM5Qef3 z+F&%dJ7)6i3olG!TW5t4^}vG=p4@0kB*rWyN22esw>7an)N-PBVA(Lt7#o3ba+eu} zmxyZ*F*B-9jrGb0S|zV8Cr0{XDwe_+jH1bb#QSl|u3l(I&0hOtm{c$FW!{E^C;1jJ zojE33DeaRoIBp8Hgu4tCgwvNFxon9n7nISiTlu9>z1BMoFE}H0X?^)#WD?3J(Arw> z2EKkjn@F{4_R@B;9?aix9jeXx26Go+~a;35biHn!PGgw>xjWuJA zs~Kengw!>Mu%m|9UFe1qCLyEwI(k)Oq$;&8-mD$S_$?ra?>B69iUX40uKZz=6ge|Ig3I0`=isEv z?<_=(5Trd6BDo?Crt(y%na{T#e{H^5M9NFY8Sw$<`aa-xXW<{a4gf#@mIJo$IDqOu z=YXdUYPh28zc@hsb}!c7>^g|Oi`82H%mM#?cKg)fSFAQ10suhxZ~v1&U<7qr`iOt^ z#K)~@M}v>v3TvQ2KC$(O;-mzcDZ?@fxqZJ^YfD)G%M_|oPv}Z~3SSXN(zvaArpR18 z0Wm8ci}!5s1!N|jHQjKvmfLBAYjFQXgLg&eoJwu{S)1kK#;=+di?}7y(7K4XMOmai zpVpO`)VdoW>a7WN=HpD|m1fPm8PYk$TiIdHGR;&h_BP9D7A^eUMh1lS%q+b|Z22_` zbXz_;x5n@wv&1Y4*(#d}v5V55e@=9`m$o;Fom5N}OO4#}CRc3mvtp777zLdxkj!U` zKE>KekM|6fTgCC=6m$I>u?^L_etJ&Eiv(IEY=t4yxGbrtc7d=aQcYPS?QDj~^>Q9$-mb-~6~XXI_%u9VT+31{CWa6VkC+_;h>vdz zrAIHYWfEyE$QK;le$+OuK^%EF&cyqKiHDVSym68TMZ z$39n{;tgy`nC{X=n=H{~JOi#xeARN&$y93eb=6B3-S*2Nf-&V`s?{e-P=ewwoUcJ%~qLpniDP%|z zz{f}lt3DUj{gR+k)s6Se!0W@3hz`f#)X$ZQ=Z~9POFfg94AYlK5HH5Gt z>xvPPlcV8dlh@}w@_u$aET66F5^SO3hR9@S| zHTGri8288Xj4vLv@7t`cu@|8~o__9DCmP5^CCMf}z1xx#i)@vIh8cBsu`0gNQPj-R zL|H_f{xh4lK?F77gT357;9D&2WS%loE7x4E^TNB(3?+vUBW>s2b39kgd9-I%qhrK$ z8FC%5L>4`WqY-Y&;uCym0vNk5f>9z9;&NJ%+4)5lmYvoM~{@(8V!V zp9cc(n=4AcEOUxvp*xt8VoFh_n;O{8*nS>d(D$t|IGxRPY~XnS)GxpRBOo)6<46G( zV14i%im<076ce@+* zDs>M>PCqRD#Lw{>KP5tniww2#Yr8{nq~t}F-@Q*6CZ#)@K?wQ+T;B1|NmE%Tdm`VT zTL!+>PTed!zMjdJBL5!vF32n`n`Ztd`*viBA3n0dmSj%Jy-G|M-J zxlm*{GJU=QC0J^Pr^B-cLkN6s6!Pi`yoO%*Am;57HYoRLf$ z$7@#=w~xlcdq>jk-Zdf5`{s$-lUHg*d`+_J5tHEz+L+Fw?PVI5f%+NcPjy|qRMR3) z+)AgrE#xb0!#3NZ4~z1SdP?6F71l!r>vy%DB_AM1WeORv9!_jGn9u&01roE z(8Iz)#prW4wJ37=8B)v^`QA$+4F^Y=Bw2<#vo6>i7$D_IQ-b*9LcF%{dN+_p{72m^ z4`b^kEf5Co2UlQ{@qXqb$zgV8SF)jiVo0drBn$fmMs_A8?o}SucunQ96-4W2gH^ZI zz4YxD`sEG2OVVwdxg}P7R=(e84^96HGU_Sn`XO(l%h!1Qk)(=0+O$}w$Y)|o?A>gm zTWZuC)upNY=u*b6g-z0-{ulAvgNdO6JMoy#6oig;ytlYVv=i6zIUXp-x?7g0~gbQN(( z$eY#sr{eiLqIY-ordQb$u5mFb^}!RGv#ySBJ@b#-wCoJQd7MuCpk>14uRYO&J`H>t zB#?9m^ZJ1WTb(UqoM-cg^=x6;o~Sp{?67CyIAYL`Mi_mRL)L)|Ri+p3GaNlLG%u!!4C z=islH>R%R8Njgqk%aAfx#QSr1I+mx!=0)ST07a5(MX?TPCSEi13i+$B>X4S+K z;`pfaDxJ=~S;)h+&zrFB4L(O}ecC&N97)=(k9usY9*=Tju#h*wRh(y46_tvBeVpNM zf^C|D(>l@mtRD>S%6n49$hPx-8coryot)po|1)HX_XppFU~P_b-sYPQB7uu^L&vcR zOZDB0?-K_Fr_XM*aZ!J&Oei^30hYr_A5!=OouW1m9H@K3j83gD|!{!;t+BBUyOLTLgg zwK_p%e7-@t+~`lR4b(6%B*36}jJ*KDpbuNzqYbZo9~`*Lh?Iu>b6Y*nWO`5`A5t80~&^F)>+9~iew z(>t#wN1B9QUM}u6qXoc}Y*3M5nt0mhx9=UPq37+JfZHRmvG~e#!+Ea8Q#4&_6b&kJFGdS3c zIZi9m=sk|2>nthwBxTDP>buu=a%uPd__6GF!ML9R{#e0zqtC1C6D+10XbUEt4r);1 z(i!%68KwzmcTt++`J^5_p4`0eCif8ujR8#!vcLl*Kwu7hCb4%{W|!S+V=hD$~slq}es)z|ydsV*@rg)HeD9@pTf<4$s} z;L*+XTHRdGYtZuRoEa3vpKaVP(*4zW`r8P7Xp`Su3+DDw4FOj#2CBEMw;Ina1neVZ z_!8CWzxp*0FI%ZmU{$^~a5^wzR^d_kkm>oQgfO4EZ1$C-a;t*28Fq~gwkXkulY-nH z2{k@ydE9J@P@{U%1@wYv1s2K)0Y>jdOhf7Yq?0~nX+&q`v7V>WJ)R#dnb-U3#-LS4 zjK;`l9k*rq!$@GDa=xad-<;uwnpB{&*}IB$+c_Sr9afeQ=`*4aLABH!h|pZ?)_zxA zIy~N_v6IR)W1TCZ+-#A3!D)JVV83OI_mrA&JM6gxcj7+RnI@>X`Fk&9X9vpMRPA=C zlj`)GJRp9RbR^1pnl(S4S2A<1qUO8Vo_$*T6ij?S|B+X1paRo*J<`XApqrNkZ6w>X zvehWg3(C0EyPjmztYY_rDPx#@czY7Wq=SAWwL0LYK489@r^#%iRa_j2K7(y)BKLp9 z6LDG0cx{m(@YwgDREbm!itdRx90+cx9RN6P4NxFxVu9Hw-6xuyva0pf1lF zb@G+HZX-&2sgd2BoiqIn#Y>M#XAC08BAr8jr~w(PPL%MF5t+lhbAILr4l44~8w6dE z=*RLW00zUiKvoZOZ6oh=k$}P@PF3=}S#N#Fl49|Br-7pm>>-tGyN9~zL~cf=Xamko zUKFHUwt)8v>dT$t+OoV_fJ`^ZDqMylhIoWB++%K2xyzaiAb*(=y%)8ot)Y+J*Dj{r zqe?EuH9VqZLwE0SCFD%eWRni*$K?wXIAaz(K?P9VYJu?bPkJC8?W;RI@Gn{b;?<8i zxE_c&_^;}LQQ5!rz#S+6fQ81?(IhjBg)%7WH}tq8x=1@r5z5nUN8w@V=To^3c8;?Y z^;o&+NlhBmZ+PY_1L3x>pWF1|BsU~b-c5N05|;mxhNS-4&7#(kUNd7ARu#f@s@?7V zlvB-!0JsoD=^yC#lj_z!xW>ZCefwLdPj8H!g4qe5?Vdh)5O2&%^VFQ!xYoY6bRgc_ zNlZPB8jR9>vNa*;o6GVP_F|wTRP4Ji!}g-s#ixTHX$*Sz1!R{W+}O`$@$Fk8)jKkb zlD!Z$Tv9*8AjECbDGXIpx;8qQn=&Y2uRcwLveeTj3ci{%lo9McR2LH#;_U?YC1$rw zM$Bs(Aa})rf@D%dLLcAn`e1BCDKM&`+@^Jvo!?eHF?#Z&+R)wR!;k8j+w*p{pu3Id zkLrfo^LDkbyUpp3>MMb2f5Gjil+*r#4gimr&tEfc^~s}h8s$i{FQIEcKKVklvcxLZ z*I`Qf#5~e*K?Z&ACtgOzI^ARQ0_ySUZL!hnWnOcFRL}0^LsSi$=#0NJ62rcAU z7FcHMW@-q$SY0B;zzn_pMfMraEsk0UO2_9T7ZhUgBR-~1=kI%HGy^_2=;Usxbt;S78H;iIQ?EY+yD^@>4QuxA!W#1bJgj+X N01s=nZhrvuzW|OvoBRL( literal 0 HcmV?d00001 diff --git a/testing/btest/core/tunnels/false-teredo.bro b/testing/btest/core/tunnels/false-teredo.bro index ebb428f65a..37088e9535 100644 --- a/testing/btest/core/tunnels/false-teredo.bro +++ b/testing/btest/core/tunnels/false-teredo.bro @@ -2,6 +2,7 @@ # @TEST-EXEC: test ! -e weird.log # @TEST-EXEC: bro -r $TRACES/tunnels/false-teredo.pcap %INPUT Tunnel::yielding_teredo_decapsulation=F >output # @TEST-EXEC: btest-diff weird.log +# @TEST-EXEC: btest-diff dpd.log function print_teredo(name: string, outer: connection, inner: teredo_hdr) { diff --git a/testing/btest/core/tunnels/teredo_bubble_with_payload.test b/testing/btest/core/tunnels/teredo_bubble_with_payload.test new file mode 100644 index 0000000000..f45d8ca585 --- /dev/null +++ b/testing/btest/core/tunnels/teredo_bubble_with_payload.test @@ -0,0 +1,36 @@ +# @TEST-EXEC: bro -r $TRACES/tunnels/teredo_bubble_with_payload.pcap %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff weird.log + +function print_teredo(name: string, outer: connection, inner: teredo_hdr) + { + print fmt("%s: %s", name, outer$id); + print fmt(" ip6: %s", inner$hdr$ip6); + if ( inner?$auth ) + print fmt(" auth: %s", inner$auth); + if ( inner?$origin ) + print fmt(" origin: %s", inner$origin); + } + +event teredo_packet(outer: connection, inner: teredo_hdr) + { + print_teredo("packet", outer, inner); + } + +event teredo_authentication(outer: connection, inner: teredo_hdr) + { + print_teredo("auth", outer, inner); + } + +event teredo_origin_indication(outer: connection, inner: teredo_hdr) + { + print_teredo("origin", outer, inner); + } + +event teredo_bubble(outer: connection, inner: teredo_hdr) + { + print_teredo("bubble", outer, inner); + } From c30c0d5ff2606507b92f4d3a96c190d18d1da438 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 20 Jun 2012 13:56:42 -0400 Subject: [PATCH 418/651] Very small updates to the tunnels framework. - Make the uid field optional since it's conceptually incorrect for proxies being treated as tunnels to have it. - Reordered two fields in the log. - Reduced the default tunnel expiration interface to something more reasonable (1 hour). --- scripts/base/frameworks/tunnels/main.bro | 16 ++++++++++------ scripts/base/init-bare.bro | 2 +- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro index 869c501fe6..0861559558 100644 --- a/scripts/base/frameworks/tunnels/main.bro +++ b/scripts/base/frameworks/tunnels/main.bro @@ -28,17 +28,20 @@ export { ts: time &log; ## The unique identifier for the tunnel, which may correspond ## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels. - uid: string &log; + ## This is optional because there could be numerous connections + ## for payload proxies like SOCKS but we should treat it as a single + ## tunnel. + uid: string &log &optional; ## The tunnel "connection" 4-tuple of endpoint addresses/ports. ## For an IP tunnel, the ports will be 0. id: conn_id &log; - ## The type of activity that occurred. - action: Action &log; ## The type of tunnel. tunnel_type: Tunnel::Type &log; + ## The type of activity that occurred. + action: Action &log; }; - ## Logs all tunnels in an ecapsulation chain with action + ## Logs all tunnels in an encapsulation chain with action ## :bro:see:`Tunnel::DISCOVER` that aren't already in the ## :bro:id:`Tunnel::active` table and adds them if not. global register_all: function(ecv: EncapsulatingConnVector); @@ -71,7 +74,7 @@ export { ## The amount of time a tunnel is not used in establishment of new ## connections before it is considered inactive/expired. - const expiration_interval = 24hrs &redef; + const expiration_interval = 1hrs &redef; ## Currently active tunnels. That is, tunnels for which new, encapsulated ## connections have been seen in the interval indicated by @@ -104,7 +107,8 @@ function register(ec: EncapsulatingConn) { local tunnel: Info; tunnel$ts = network_time(); - tunnel$uid = ec$uid; + if ( ec?$uid ) + tunnel$uid = ec$uid; tunnel$id = ec$cid; tunnel$action = DISCOVER; tunnel$tunnel_type = ec$tunnel_type; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 3a57a65b20..17ea0823ac 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -191,7 +191,7 @@ export { tunnel_type: Tunnel::Type; ## A globally unique identifier that, for non-IP-in-IP tunnels, ## cross-references the *uid* field of :bro:type:`connection`. - uid: string; + uid: string &optional; } &log; } # end export module GLOBAL; From 896f252a31dfa59dc75eb7a1d9326ca04be79fd7 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 20 Jun 2012 13:58:25 -0400 Subject: [PATCH 419/651] Updates for the SOCKS analyzer. - Now supports SOCKSv5 in the analyzer and the DPD sigs. - Reworked the core events. - Tests. - A SOCKS log! --- scripts/base/frameworks/dpd/dpd.sig | 27 +++- scripts/base/protocols/socks/__load__.bro | 1 + scripts/base/protocols/socks/consts.bro | 41 +++++++ scripts/base/protocols/socks/main.bro | 93 +++++++++++++- src/event.bif | 20 ++- src/socks-analyzer.pac | 116 +++++++++++++++--- src/socks-protocol.pac | 115 ++++++++++++++--- src/socks.pac | 2 +- .../socks.log | 8 ++ .../tunnel.log | 8 ++ .../socks.log | 8 ++ .../tunnel.log | 8 ++ testing/btest/Traces/socks-with-ssl.trace | Bin 0 -> 5695 bytes testing/btest/Traces/socks.trace | Bin 0 -> 11260 bytes .../scripts/base/protocols/socks/trace1.test | 6 + .../scripts/base/protocols/socks/trace2.test | 5 + 16 files changed, 411 insertions(+), 47 deletions(-) create mode 100644 scripts/base/protocols/socks/consts.bro create mode 100644 testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log create mode 100644 testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log create mode 100644 testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log create mode 100644 testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log create mode 100644 testing/btest/Traces/socks-with-ssl.trace create mode 100644 testing/btest/Traces/socks.trace create mode 100644 testing/btest/scripts/base/protocols/socks/trace1.test create mode 100644 testing/btest/scripts/base/protocols/socks/trace2.test diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig index 305383809d..245e79bfdf 100644 --- a/scripts/base/frameworks/dpd/dpd.sig +++ b/scripts/base/frameworks/dpd/dpd.sig @@ -162,33 +162,48 @@ signature dpd_teredo { enable "teredo" } -signature dpd_socks_client { +signature dpd_socks4_client { ip-proto == tcp # '32' is a rather arbitrary max length for the user name. payload /^\x04[\x01\x02].{0,32}\x00/ tcp-state originator } -signature dpd_socks_server { +signature dpd_socks4_server { ip-proto == tcp - requires-reverse-signature dpd_socks_client + requires-reverse-signature dpd_socks4_client payload /^\x00[\x5a\x5b\x5c\x5d]/ tcp-state responder enable "socks" } -signature dpd_socks_reverse_client { +signature dpd_socks4_reverse_client { ip-proto == tcp # '32' is a rather arbitrary max length for the user name. payload /^\x04[\x01\x02].{0,32}\x00/ tcp-state responder } -signature dpd_socks_reverse_server { +signature dpd_socks4_reverse_server { ip-proto == tcp - requires-reverse-signature dpd_socks_client + requires-reverse-signature dpd_socks4_reverse_client payload /^\x00[\x5a\x5b\x5c\x5d]/ tcp-state originator enable "socks" } +signature dpd_socks5_client { + ip-proto == tcp + payload /^\x05/ + tcp-state originator +} + +signature dpd_socks5_server { + ip-proto == tcp + requires-reverse-signature dpd_socks5_client + payload /^\x05/ + tcp-state responder + enable "socks" +} + + diff --git a/scripts/base/protocols/socks/__load__.bro b/scripts/base/protocols/socks/__load__.bro index d551be57d3..0098b81a7a 100644 --- a/scripts/base/protocols/socks/__load__.bro +++ b/scripts/base/protocols/socks/__load__.bro @@ -1 +1,2 @@ +@load ./consts @load ./main \ No newline at end of file diff --git a/scripts/base/protocols/socks/consts.bro b/scripts/base/protocols/socks/consts.bro new file mode 100644 index 0000000000..6341262041 --- /dev/null +++ b/scripts/base/protocols/socks/consts.bro @@ -0,0 +1,41 @@ +module SOCKS; + +export { + type RequestType: enum { + CONNECTION = 1, + PORT = 2, + }; + + const v5_authentication_methods: table[count] of string = { + [0] = "No Authentication Required", + [1] = "GSSAPI", + [2] = "Username/Password", + [3] = "Challenge-Handshake Authentication Protocol", + [4] = "Unassigned", + [5] = "Challenge-Response Authentication Method", + [6] = "Secure Sockets Layer", + [7] = "NDS Authentication", + [8] = "Multi-Authentication Framework", + [255] = "No Acceptable Methods", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + const v4_status: table[count] of string = { + [0x5a] = "succeeded", + [0x5b] = "general SOCKS server failure", + [0x5c] = "request failed because client is not running identd", + [0x5d] = "request failed because client's identd could not confirm the user ID string in the request", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + const v5_status: table[count] of string = { + [0] = "succeeded", + [1] = "general SOCKS server failure", + [2] = "connection not allowed by ruleset", + [3] = "Network unreachable", + [4] = "Host unreachable", + [5] = "Connection refused", + [6] = "TTL expired", + [7] = "Command not supported", + [8] = "Address type not supported", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + +} \ No newline at end of file diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro index 54d181e43e..cceea68758 100644 --- a/scripts/base/protocols/socks/main.bro +++ b/scripts/base/protocols/socks/main.bro @@ -1,15 +1,98 @@ @load base/frameworks/tunnels +@load ./consts module SOCKS; export { - type RequestType: enum { - CONNECTION = 1, - PORT = 2, + redef enum Log::ID += { LOG }; + + type Info: record { + ## Time when the proxy connection was first detected. + ts: time &log; + uid: string &log; + id: conn_id &log; + ## Protocol version of SOCKS. + version: count &log; + ## Username for the proxy if extracted from the network. + user: string &log &optional; + ## Server status for the attempt at using the proxy. + status: string &log &optional; + ## Client requested address. Mutually exclusive with req_name. + req_h: addr &log &optional; + ## Client requested domain name. Mutually exclusive with req_h. + req_name: string &log &optional; + ## Client requested port. + req_p: port &log &optional; + ## Server bound address. Mutually exclusive with bound_name. + bound_h: addr &log &optional; + ## Server bound domain name. Mutually exclusive with bound_h. + bound_name: string &log &optional; + ## Server bound port. + bound_p: port &log &optional; }; + + ## Event that can be handled to access the SOCKS + ## record as it is sent on to the logging framework. + global log_socks: event(rec: Info); } -event socks_request(c: connection, request_type: count, dstaddr: addr, dstname: string, p: port, user: string) +event bro_init() &priority=5 { - Tunnel::register([$cid=c$id, $tunnel_type=Tunnel::SOCKS, $uid=c$uid]); + Log::create_stream(SOCKS::LOG, [$columns=Info, $ev=log_socks]); } + +redef record connection += { + socks: SOCKS::Info &optional; +}; + +# Configure DPD +redef capture_filters += { ["socks"] = "tcp port 1080" }; +redef dpd_config += { [ANALYZER_SOCKS] = [$ports = set(1080/tcp)] }; +redef likely_server_ports += { 1080/tcp }; + +function set_session(c: connection, version: count) + { + if ( ! c?$socks ) + c$socks = [$ts=network_time(), $id=c$id, $uid=c$uid, $version=version]; + } + +event socks_request(c: connection, version: count, request_type: count, + dstaddr: addr, dstname: string, p: port, user: string) &priority=5 + { + set_session(c, version); + + if ( dstaddr != [::] ) + c$socks$req_h = dstaddr; + if ( dstname != "" ) + c$socks$req_name = dstname; + c$socks$req_p = p; + + # Copy this conn_id and set the orig_p to zero because in the case of SOCKS proxies there will + # be potentially many source ports since a new proxy connection is established for each + # proxied connection. We treat this as a singular "tunnel". + local cid = copy(c$id); + cid$orig_p = 0/tcp; + Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS, $payload_proxy=T]); + } + +event socks_reply(c: connection, version: count, reply: count, dstaddr: addr, dstname: string, p: port) &priority=5 + { + set_session(c, version); + + if ( version == 5 ) + c$socks$status = v5_status[reply]; + else if ( version == 4 ) + c$socks$status = v4_status[reply]; + + if ( dstaddr != [::] ) + c$socks$bound_h = dstaddr; + if ( dstname != "" ) + c$socks$bound_name = dstname; + + c$socks$bound_p = p; + } + +event socks_reply(c: connection, version: count, reply: count, dstaddr: addr, dstname: string, p: port) &priority=-5 + { + Log::write(SOCKS::LOG, c$socks); + } \ No newline at end of file diff --git a/src/event.bif b/src/event.bif index 4d28ab7a40..d1e28a98e5 100644 --- a/src/event.bif +++ b/src/event.bif @@ -6101,7 +6101,9 @@ event signature_match%(state: signature_state, msg: string, data: string%); ## ## c: The parent connection of the proxy. ## -## t: The type of the request. +## version: The version of SOCKS this message used. +## +## request_type: The type of the request. ## ## dstaddr: Address that the tunneled traffic should be sent to. ## @@ -6109,13 +6111,23 @@ event signature_match%(state: signature_state, msg: string, data: string%); ## ## p: The destination port for the proxied traffic. ## -## user: Username given for the SOCKS connection. -event socks_request%(c: connection, request_type: count, dstaddr: addr, dstname: string, p: port, user: string%); +## user: Username given for the SOCKS connection. This is not yet implemented for SOCKSv5. +event socks_request%(c: connection, version: count, request_type: count, dstaddr: addr, dstname: string, p: port, user: string%); ## Generated when a SOCKS reply is analyzed. ## +## c: The parent connection of the proxy. ## -event socks_reply%(c: connection, granted: bool, dst: addr, p: port%); +## version: The version of SOCKS this message used. +## +## reply: The status reply from the server. +## +## dstaddr: The address that the server sent the traffic to. +## +## dstname: The name the server sent the traffic to. Only applicable for SOCKSv5. +## +## p: The destination port for the proxied traffic. +event socks_reply%(c: connection, version: count, reply: count, dstaddr: addr, dstname: string, p: port%); ## Generated when a protocol analyzer finds an identification of a software ## used on a system. This is a protocol-independent event that is fed by diff --git a/src/socks-analyzer.pac b/src/socks-analyzer.pac index 88a29fe383..0842303f40 100644 --- a/src/socks-analyzer.pac +++ b/src/socks-analyzer.pac @@ -19,39 +19,127 @@ StringVal* array_to_string(vector *a) %} refine connection SOCKS_Conn += { - function socks_request(cmd: uint8, dstaddr: uint32, dstname: uint8[], p: uint16, user: uint8[]): bool + + function socks4_request(request: SOCKS4_Request): bool %{ + StringVal *dstname; + if ( ${request.v4a} ) + dstname = array_to_string(${request.name}); + BifEvent::generate_socks_request(bro_analyzer(), bro_analyzer()->Conn(), - cmd, - new AddrVal(htonl(dstaddr)), - array_to_string(dstname), - new PortVal(p | TCP_PORT_MASK), - array_to_string(user)); + 4, + ${request.command}, + new AddrVal(htonl(${request.addr})), + dstname, + new PortVal(${request.port} | TCP_PORT_MASK), + array_to_string(${request.user})); static_cast(bro_analyzer())->EndpointDone(true); return true; %} - function socks_reply(granted: bool, dst: uint32, p: uint16): bool + function socks4_reply(reply: SOCKS4_Reply): bool %{ BifEvent::generate_socks_reply(bro_analyzer(), bro_analyzer()->Conn(), - granted, - new AddrVal(htonl(dst)), - new PortVal(p | TCP_PORT_MASK)); + 4, + ${reply.status}, + new AddrVal(htonl(${reply.addr})), + new StringVal(""), + new PortVal(${reply.port} | TCP_PORT_MASK)); bro_analyzer()->ProtocolConfirmation(); static_cast(bro_analyzer())->EndpointDone(false); return true; %} + + function socks5_request(request: SOCKS5_Request): bool + %{ + AddrVal *ip_addr = 0; + StringVal *domain_name = 0; + + // This is dumb and there must be a better way (checking for presence of a field)... + switch ( ${request.remote_name.addr_type} ) + { + case 1: + ip_addr = new AddrVal(htonl(${request.remote_name.ipv4})); + break; + + case 3: + domain_name = new StringVal(${request.remote_name.domain_name.name}.length(), + (const char*) ${request.remote_name.domain_name.name}.data()); + break; + + case 4: + ip_addr = new AddrVal(IPAddr(IPv6, (const uint32_t*) ${request.remote_name.ipv6}, IPAddr::Network)); + break; + } + + BifEvent::generate_socks_request(bro_analyzer(), + bro_analyzer()->Conn(), + 5, + ${request.command}, + ip_addr, + domain_name, + new PortVal(${request.port} | TCP_PORT_MASK), + new StringVal("")); + + static_cast(bro_analyzer())->EndpointDone(true); + + return true; + %} + + function socks5_reply(reply: SOCKS5_Reply): bool + %{ + AddrVal *ip_addr = 0; + StringVal *domain_name = 0; + + // This is dumb and there must be a better way (checking for presence of a field)... + switch ( ${reply.bound.addr_type} ) + { + case 1: + ip_addr = new AddrVal(htonl(${reply.bound.ipv4})); + break; + + case 3: + domain_name = new StringVal(${reply.bound.domain_name.name}.length(), + (const char*) ${reply.bound.domain_name.name}.data()); + break; + + case 4: + ip_addr = new AddrVal(IPAddr(IPv6, (const uint32_t*) ${reply.bound.ipv6}, IPAddr::Network)); + break; + } + + BifEvent::generate_socks_reply(bro_analyzer(), + bro_analyzer()->Conn(), + 5, + ${reply.reply}, + ip_addr, + domain_name, + new PortVal(${reply.port} | TCP_PORT_MASK)); + + bro_analyzer()->ProtocolConfirmation(); + static_cast(bro_analyzer())->EndpointDone(false); + return true; + %} + }; -refine typeattr SOCKS_Request += &let { - proc: bool = $context.connection.socks_request(command, addr, empty, port, user); +refine typeattr SOCKS4_Request += &let { + proc: bool = $context.connection.socks4_request(this); }; -refine typeattr SOCKS_Reply += &let { - proc: bool = $context.connection.socks_reply((status == 0x5a), addr, port); +refine typeattr SOCKS4_Reply += &let { + proc: bool = $context.connection.socks4_reply(this); +}; + +refine typeattr SOCKS5_Request += &let { + proc: bool = $context.connection.socks5_request(this); +}; + +refine typeattr SOCKS5_Reply += &let { + proc: bool = $context.connection.socks5_reply(this); }; diff --git a/src/socks-protocol.pac b/src/socks-protocol.pac index a908c2da68..8ae81a6e02 100644 --- a/src/socks-protocol.pac +++ b/src/socks-protocol.pac @@ -1,34 +1,115 @@ -type SOCKS_Message(is_orig: bool) = case is_orig of { - true -> request: SOCKS_Request; - false -> reply: SOCKS_Reply; + +type SOCKS_Version(is_orig: bool) = record { + version: uint8; + msg: case version of { + 4 -> socks4_msg: SOCKS4_Message(is_orig); + 5 -> socks5_msg: SOCKS5_Message(is_orig); + default -> socks_msg_fail: empty; + }; }; -type SOCKS_Request = record { - version: uint8; +# SOCKS5 Implementation +type SOCKS5_Message(is_orig: bool) = case $context.connection.v5_past_authentication() of { + true -> msg: SOCKS5_Real_Message(is_orig); + false -> auth: SOCKS5_Auth_Negotiation(is_orig); +}; + +type SOCKS5_Auth_Negotiation(is_orig: bool) = case is_orig of { + true -> req: SOCKS5_Auth_Negotiation_Request; + false -> rep: SOCKS5_Auth_Negotiation_Reply; +}; + +type SOCKS5_Auth_Negotiation_Request = record { + method_count: uint8; + methods: uint8[method_count]; +}; + +type SOCKS5_Auth_Negotiation_Reply = record { + selected_auth_method: uint8; +} &let { + past_auth = $context.connection.set_v5_past_authentication(); +}; + +type SOCKS5_Real_Message(is_orig: bool) = case is_orig of { + true -> request: SOCKS5_Request; + false -> reply: SOCKS5_Reply; +}; + +type Domain_Name = record { + len: uint8; + name: bytestring &length=len; +} &byteorder = bigendian; + +type SOCKS5_Address = record { + addr_type: uint8; + addr: case addr_type of { + 1 -> ipv4: uint32; + 3 -> domain_name: Domain_Name; + 4 -> ipv6: uint32[4]; + default -> err: bytestring &restofdata &transient; + }; +} &byteorder = bigendian; + +type SOCKS5_Request = record { + command: uint8; + reserved: uint8; + remote_name: SOCKS5_Address; + port: uint16; +} &byteorder = bigendian; + +type SOCKS5_Reply = record { + reply: uint8; + reserved: uint8; + bound: SOCKS5_Address; + port: uint16; +} &byteorder = bigendian; + + +# SOCKS4 Implementation +type SOCKS4_Message(is_orig: bool) = case is_orig of { + true -> request: SOCKS4_Request; + false -> reply: SOCKS4_Reply; +}; + +type SOCKS4_Request = record { command: uint8; port: uint16; addr: uint32; user: uint8[] &until($element == 0); - host: case v4a of { true -> name: uint8[] &until($element == 0); # v4a false -> empty: uint8[] &length=0; } &requires(v4a); - - # FIXME: Can this be non-zero? If so we need to keep it for the - # next analyzer. - rest: bytestring &restofdata; } &byteorder = bigendian &let { v4a: bool = (addr <= 0x000000ff); }; -type SOCKS_Reply = record { - zero: uint8; - status: uint8; +type SOCKS4_Reply = record { + zero: uint8; + status: uint8; port: uint16; addr: uint32; - - # FIXME: Can this be non-zero? If so we need to keep it for the - # next analyzer. - rest: bytestring &restofdata; } &byteorder = bigendian; + + +refine connection SOCKS_Conn += { + %member{ + bool v5_authenticated_; + %} + + %init{ + v5_authenticated_ = false; + %} + + function v5_past_authentication(): bool + %{ + return v5_authenticated_; + %} + + function set_v5_past_authentication(): bool + %{ + v5_authenticated_ = true; + return true; + %} +}; + diff --git a/src/socks.pac b/src/socks.pac index 4f16582690..15d3580674 100644 --- a/src/socks.pac +++ b/src/socks.pac @@ -18,7 +18,7 @@ connection SOCKS_Conn(bro_analyzer: BroAnalyzer) { %include socks-protocol.pac flow SOCKS_Flow(is_orig: bool) { - datagram = SOCKS_Message(is_orig) withcontext(connection, this); + datagram = SOCKS_Version(is_orig) withcontext(connection, this); }; %include socks-analyzer.pac \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log new file mode 100644 index 0000000000..4241190234 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path socks +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status req_h req_name req_p bound_h bound_name bound_p +#types time string addr port addr port count string string addr string port addr string port +1340213015.276495 UWkUyAuUGXf 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log new file mode 100644 index 0000000000..a7068cd0da --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action +#types time string addr port addr port enum enum +1340213015.276495 - 10.0.0.55 0 60.190.189.214 8124 Tunnel::SOCKS Tunnel::DISCOVER diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log new file mode 100644 index 0000000000..556ed9263e --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path socks +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status req_h req_name req_p bound_h bound_name bound_p +#types time string addr port addr port count string string addr string port addr string port +1340113261.914619 UWkUyAuUGXf 10.0.0.50 59580 85.194.84.197 1080 5 - succeeded - www.google.com 443 0.0.0.0 - 443 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log new file mode 100644 index 0000000000..5eac3ae7ad --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action +#types time string addr port addr port enum enum +1340113261.914619 - 10.0.0.50 0 85.194.84.197 1080 Tunnel::SOCKS Tunnel::DISCOVER diff --git a/testing/btest/Traces/socks-with-ssl.trace b/testing/btest/Traces/socks-with-ssl.trace new file mode 100644 index 0000000000000000000000000000000000000000..da27cc88736d8906a4789ee990dbca865cfd6685 GIT binary patch literal 5695 zcmb_fc{o(>`+jFJ7zSCBJxi1Ij9p|8MWrl7vW=xFjAampkR(wml#*zX%BP}5X)z>g z${vxlsI;jNQheVt^1X^a-|P3sZ?5aPu6fU#x!>o0p69;bbK38f7GMD$%zmb)0l>gT zKtli8ZRJF;27X7Q3-PwOe{Fk%LD{QNF0cXs)4FO?U`n{Nj0k|HQc4g6r?$6#(#900JR;?NK~1hHErtaA)>lqaz7H04NdM z3s7Wq2fY5<+ae=6{BVbsU_p+ni>uh^_Oob9kKutXL_%X;u4IcRXs+hV=j7S~5e4i8 zrl%b`W^&4gC_4&fO@AhT&gP7EVvEsRw`q!zTCnD+%aB5H#kIQ}zr*$?Yhb zLrwsaYqv*%>L3ss=kYM zV46i_34;`{8;*`Xi=V{-l7J42lf}(iz!HLTu>@K`lf}#8V_{i%kPXbB?k}4Hv#8_CkSC(%_xe9Jh~tXNjh%vie&i8TF`^S!UGtzAb(l1JtJzVtR2Hgog#_| zqqpINNWX4#u(pu3FhiupNE8YM(bPrQA6K~4LJYJJO)VV*9RpWn%Wu0mIZ%X<1t<)c z5YfUbfaXUJ4WfC?Vsi?UR@1|S7+$m>S!bGWwi02JoMdr zcws`jg4GU8B(p|&k8sYdBL-4M;*$ozGFBpU>vlETKdflg@6AtS1&_30Vc#*Q65b#Q zuMl3CzLXFKGmXb00MSKPGI$L{4N=Kon!hAjaSMYHyi7xb8XBO^V0Q#{5{;Rv!=hW5 zsl%TTGWtJ7yc{A!;8G#L#&W`;nEi$&aC;`(V3Nnp@faj@_A&;8_d|S;#j|S!lPvmc zGdeXa__y5<E!Flr9)01{Yi37Wl^gjM1Jn!1m<#=&rAjO0W5HngrUr3*Y1<`CX`F{} zPhrpcn@5KQcQ8-p8q#xO-`SHQN}Hr1PAcS|d85RK^r7Bx#>-OaR;xs1tdple8I~;1PH_&Pf#U_;Xr+%^FdN&)GMW z=0NigQblJ_ouY^6phL(bq;3)56&5C|C2Jq<9YCYX+K18ty+Wg8Eqp^6G(Q^Ei$S9Y z$(n^Tw$MXqj3`tth&G}FHS%A%pi9kJx%~f%^M6UAz0JQlU1Z0_CdyQ-)PVvMo)Dwr z2X#J^)E}C*oSAp%@#PxN@rPJiKe)Bz2YDDOA=S8=)}|L4qukV{X`3^kqW{fsN|~Bw z-x1x{#D73cwa+TUq;TBV;DfPz-lK9CXu3(1@Qm?_+JF=ZLKd@rpr|jrKs9>HvyKMI z-5Wi{e@S8rNti?g|0@YDgaDNRog3jnukqvY(9{UfG-d}H4)M$-wg6C99)+$v>+SOn zGV+B9^dNg8_2)rG%@7$`7682+thqVk?Go2G0TY@n8gsoJ1l0LLN1i{WeMi=`5qn zL;%9zz_ZOKnChj)b{)-GUw~~;LKOZ_UZRasR>VVpoBi{@I2D07V_GXPT$nk+L>E4VSs%edE-|9q;M*mcaXrlVc~DF`h<|O?(e+ zusbjs*N^G-D=X{vcI@gb9x!pFXG?QjLeK6-UI1!wK+FtZs^R{o_|dZ>dhgVPf?J1= z{HCn}=B&iLIG#02hD{CcaeIsD-no0?Wz>fwS}iNiYi8TWv~F*qhKSvC-r0NIiwfdd zwph|>E^@MUZ=W8 zy^K`EWvoamgs2mo z6h_Zn49ZfWGv%?l|2xa|POZsKP6+7!^`v`va28j0E&|4PXr6$6B` zs~-#{9v2o%6_9a$YVnS5z}?N^YY^@(Zkp)nT) zJ7CRz8#282R}@%nn@Rx0a2V_j0}o+l^wpIUW<80FK?|@t!|>^HWY`nm7kN&s79&b&&fAEYI_`!We$org*sgO)s)YuC5 z;;;SB$_6+&>s(ez@`MwV`o6+r{c>)^1x*p!`?}QBcgBqwlf^s=H0woOufAkcM2lXQ zS4ymR;Xd?XJHJW|Ry&=gE|^xbk3Z!Gb6;t4r>LZX_htK+nyJ>DU8VxnW zi=5x8hphG_JlV_<=dNnwSYtgJW3pkLuyG~ss>9HZ@QCP_#0u>V2jAtWmXfO-Xy=C4 z9$zF{-1W`p-t$id=wj>z*8)e^@jUn(Qn-iw z!uO~W-Yz#@^X0}Ya(fb|G_iAKH1847Y24#7#VKmN#QwTge?H}X$}y~veo3)yi;hP3 zhCm^i!T04^A>o~=&im4FBKNWaw<(OUgrzdMvNOcK8=8?)G7c(Yxl4jNpB^f@Dc z6X#8S@WZ}bDe=CP2pg`8)v-@t|OaWA4pV# zSQ))O=ZgBlw8fSu_h`q6JIT2nV;Y<&F??`M-+z_O_b2xY>x?iLnRT5np`T$a#p63C zZ|T1cJtrqetqkgKymODkwq)QV$Iy|gH%aE1(rb0}sjr+MX5~LWmTh{|;6q zvWuS9%1@mr%Z`-rZMFHC^^97?v-MolpkW&3D~rRrknlPC?z8vz z?tM$b$hRahheT*Gmjo)vw*ygQOK+zk5JWw+D2kH_n3XXZ;~vd1^HJL7FN3hi7`a1E60!fww?! zN`>^Q+R^Cy+Dz&9L-ces?~+5(-R@q57T(dZ33P3ezT=;=;?9)6!_f&RNIv_0a+ zX!N={rt~MD>FF+jK4~e*0{uD)>6u@QM&B~kl-}N{r&9o3uCy`GwXvv;OKn89&PI@Y z!r~n;8sQ9{AGbbkuCS{HQQ6w$6pGa0HS{D9kqC@PNQPe+3=RwF&43;Teuu*>-r1xg zXuIz^6Wd$C4C_@6%dl@|LlTVh{(MM<#I+?hFjq9gjL1!=LE_CzBND00uq)>p6Y&_t z6+r8M)S3v*G;;!bzc+Nk-mgrYupT(!hRR{E|4pJ6ilF-*!jy!>N-hqG05mwvh=Gnu zNaU{=kw_ivU(s(M{sM`3jg{{K=IGAcph>{S%OP}$kFVISR z9u5H>(F`+Z#)yT`nO|-gkqDf5;=C~tIMd2*(V|8EGKEyMNa3F?D?*asG)sRX84Ndb z20K>_o$>QV6KB)__dW*heVC~;92osSl%48McR;*&oq)sv!{IO^PA7wC$tO&&8j(mH ze7G@1@7_imNCZKQIkO+}B&i^qeuH4J*32pF%?qGY_PlN46qpq|R1T_v>rpzV)Y*n0 zXrm1z#@-?z1z1EgOo`Y9XJa5y{>F$z;O0fkjEPZ^LNq{9lqnV|0*d7lsZu0o`imvG zXtYq66u|IjkV(-pg_4gNF9n&Dsu0O(0%>zv7h$JCwO59-Jj>@6QI4~=-*0<%an>sz`gKMk(5@V@DoWjZykC=q%>QW zBaw>vsJL8G?1$!vatkw+A~Go=LC8nJ&4$aQQc<>2B9rpb0+FbgmRTq%1+UK*<%;AY zIUmhaDvS960mkkKfd42fkbsHf!Of>iB@0VLA&g)&JzbocqX>zK2;nhAbVe38i^0p{ z<#2Md_`(ndi%AdUvbljAeoBZSE+Hm8K5JnSn;FT@i(K?tY*ZmnDbLD@lf)+krpqG* zkhU;8GbD*lD5aB0F}bvO;D%g@D2J9Jk!Az4=ZbRtP$nCll_^D;bUGJhGWcu;pA(2i#S6(K zGU*i%dbunFaRk)|9HxnavFdk(iIPY)Nt7KzOuVy-I&kW)zadfVC?KHq5r(qpY&1cp z1YxAL39JTK6TMAzbXEzpiNQn}Tt1Vco?@i=c*Exlhttr&EK0eokPrW#20|@_6$l|( z2Q>f(jV`K+$uKw1wHoJUymNDfy#!?Yj;j^=+$2juw%b9M%h?Db0oe|RS+>vrBoY#f zO`dr=BFJ{)YsN%fwu_koiaeQISt2j=&o36g%=Ld(uFsT9K&~@^FfWF=9)vO&d={6_ z4jP{8&uWJrT&B5>u$8aCT)*|5ajx4-gqTFW%Av1z_RvGonNmlV!cczyHqnB5oB@xz ze8QTx5sB2k+Nfr*3e_B0sclj5n2O}ux52%N3zCWNI=pCX3^>~DVxgOtTtR9K{ z<_ZTMRFBJv`dqi)4b;O0x;*R~B9af30}eCw_}6BQ9@QTlkqFe|n!aML(-Mc*3X*J@ zR1pyLvQ+rzl?uWV5s(kMUOreTox^AI_;mVk`N;Uq&JfGy^5F>f9ze@t(g`~7gqY+k zP^Aw>pE4vnl>(yA1H>{T9cw{8rtu)r?wPTC=!n$8!SibjL^nwMe<2^dzf3-$gU$d4 zg@G0Z4%1{u*au3We5_kxob1V2%@vN;P(IF6_448P4Ui8vNW77OMS_5Iz+pqCpKnIfN$0zMuhN z>y*y4gow*jC=HDjAJyt42Njfvb!Ao%pU&XZnOdFHcDJR%C)n{olZ1>8bQ`zuxqf`YAdU1JYh3})L2<iC%S+y=$h;*n=Pq*4FPL$U8WE zBZ0V=XtR$<+>gcVx3YQ5|-gS0rbfz@AxbAm$K8nR2$K#I@ ziSLt0$L#H!ZETK{Y>wO7eu%?;Xl?x=xEM3$BeLDeG2|u}mlj9IW1gNLIyjtibo_YS zxO#NLK5w4`<0l-Z(~oeuhj_s!yu41M=t(cHlT&?~n9L6tj1~^(6r24~NXY3KGY-rM z-6#mJ5{uU?Tv#X0YZeHa;}T9sM4X&GyRm%9sm!diW5#?!wmWBMciztKk{$Vyz5P{t z`^ydvR~#HZ_wcwpZrn9T#~a|=$?1-h)3?sf-@CY63<|o!V&9!IkY3>kOIR zIV1FJWaOEssL#T~zle{&EKI!>5%Fz`@PZ`&%915Fk13+3XkZDi0q1(*%cN2dt7{HbaYqDtj<}n-LY}qaq-;=iQS1w-HFMaX=&Zb zDLpB|o|M#XVQNq6oSxLVJ#*&u%uDM@o8LQsLC=EMdl#hlzMj#Wp4pp`)ti;mn=R@S ziTlKozP$WCNkLzJVQ+p>U!k;8*&RzWtd;9k_svGzBA38j6_{iXU@2iiss6V}+zHn*q%c~&x z$M@jCzQ`cr2QvZY1VS7{25mf|o9fT5AlmnS9hJE2qtgF<@ZGPqO>L(ue`{?B7O&sQ zEB^kt$an75EWy$zm#S^M;$mbxmp{E+_3!zSk}tMSjV)bAJm=Y*d$#=N$2a&ZuWhn(rufZq zS-AiwxvczbwpX0>Pne%tI=0gIdz~7$yV$f&j9RC3Yfp`(&7YI+^K9Nb-q*&5SCQNb zTk>T;%nJ88in7)sy#U;k{QvSGb)xpzr$Kx?{{gZ+ZwBK=l!!QE`Isy@-?ByN(bJZ zd48(w$?d^qRu8Yd!o@APNSN*s9qC{A?SD(Y&aJLoer|8<#h<2+pTa7rUv|Mw^cI64 zD36=_U74(X)mM{q)-m3oW~qK$VUMe}URCkDWqd@WaNZ>=d)~cJD>CkiV0R^US3`K@ z>?g$&Y<=I!m?&r%lXq`r*8QYpXWyF8bJm_op=TZORhbR;6Wi`(hlNqy1UNFg>5MDB zBcpAd@1>;VF}B{tDhy#vl1i}qw)dos-a_GWaK46$5qFPCSiE!6olx8^R7k=pE>A_m z_OQ_h+pvdJ4$cb+uSJH?)wutj1f^D8~FO)bwI>{G1#vAi%=<7Ks)@GtZ@VWOVt}Yf(PE%G^jrI7remTj% z@T;?9ujI~84n>0D>Ja>GEfk0!jD_VK++81jUYL^BT;W=)a;Q9UQn!6asdI4!1)?V; zP7JWMuos;d2#KF>ACU+O#2Ro`r7aM(MDqfX!F^dE{>=--5%!|Bg%b}=%vQp}i3Rqm z_-wXe-*q{1g+FgUxOo9OoiB~q` zEHs!I1W5dK;)q0Q-wz*rWFUG$qM@YxKkjbpt2NW&@=xq;GlBwN++gbGvshwkT^{w8 zr6a>l=`h>(nq+(G^5zO^E^IJczynd6?UX2>A70SqPXVzKG?#FgHJGwED@c5J!dO3a zM4%rrtBi@m^`lUft9+>)e{1as)||Cz+bD(h_)QT4j_^iMloy_E!C=gsH1v2hSCrU= zF#nO&#`#ap6=LR|gj-guC5HU>q0~{LAo1KJvIRxC&4EOD`-nuaWpzY%cmtBf7ZU%p zqA>ol^D;&dI7)qCzp0z=4=BoW;Gi&2EyH1&{|I}Zrhce7XPp0YPc~P$aiF5ScVD0X zR0VVrba{;z*+NCq?Ew`f_}GXe12_MjW>6$LB>rjr&}~@#B^3$#59o((z8B~RbkJ$wpwabXK=X+(-df{qPkYc@ z;f{m)aj-?7?e6=4e$Y`sv?(4UnlEBDIE2h6Jo?7i)fS1k0;q9}^&{LhUgZdg;M0cMI!sIC z0b<$1p+}#&jbE@JQF4AXVubD^Ym_?Gb!ejm5G$rysujn^KqA^S8gbPHosGLf#zNvj zK$OXc3Vw4NL)SuLN6~1+wO{LqyMwGD@i8DS*fg{&ZBA^fhQySD(TE#2>WKBMXh`%S ewk7`BH+1S{PGsDJM4a1b#9F0}*Z|@P5dQ;uT%1$@ literal 0 HcmV?d00001 diff --git a/testing/btest/scripts/base/protocols/socks/trace1.test b/testing/btest/scripts/base/protocols/socks/trace1.test new file mode 100644 index 0000000000..fb65b33cbc --- /dev/null +++ b/testing/btest/scripts/base/protocols/socks/trace1.test @@ -0,0 +1,6 @@ +# @TEST-EXEC: bro -r $TRACES/socks.trace %INPUT +# @TEST-EXEC: btest-diff socks.log +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff tunnel.log + +@load base/protocols/socks diff --git a/testing/btest/scripts/base/protocols/socks/trace2.test b/testing/btest/scripts/base/protocols/socks/trace2.test new file mode 100644 index 0000000000..5e3a449120 --- /dev/null +++ b/testing/btest/scripts/base/protocols/socks/trace2.test @@ -0,0 +1,5 @@ +# @TEST-EXEC: bro -r $TRACES/socks-with-ssl.trace %INPUT +# @TEST-EXEC: btest-diff socks.log +# @TEST-EXEC: btest-diff tunnel.log + +@load base/protocols/socks From a60153060d4280c58df83ae9b930a11720cd0fb1 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 20 Jun 2012 14:19:49 -0400 Subject: [PATCH 420/651] SOCKS and tunnel test updates. --- .../Baseline/core.tunnels.ayiya/tunnel.log | 10 +++++----- .../Baseline/core.tunnels.socks/conn.log | 8 -------- .../Baseline/core.tunnels.socks/http.log | 8 -------- .../btest/Baseline/core.tunnels.socks/output | 9 --------- .../Baseline/core.tunnels.socks/tunnel.log | 9 --------- .../Baseline/core.tunnels.teredo/tunnel.log | 14 +++++++------- .../tunnel.log | 14 +++++++------- .../tunnel.log | 8 ++++++++ testing/btest/core/tunnels/socks.bro | 19 ------------------- .../scripts/base/protocols/socks/trace1.test | 1 - .../scripts/base/protocols/socks/trace3.test | 4 ++++ 11 files changed, 31 insertions(+), 73 deletions(-) delete mode 100644 testing/btest/Baseline/core.tunnels.socks/conn.log delete mode 100644 testing/btest/Baseline/core.tunnels.socks/http.log delete mode 100644 testing/btest/Baseline/core.tunnels.socks/output delete mode 100644 testing/btest/Baseline/core.tunnels.socks/tunnel.log create mode 100644 testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log delete mode 100644 testing/btest/core/tunnels/socks.bro create mode 100644 testing/btest/scripts/base/protocols/socks/trace3.test diff --git a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log index 512f49b6ee..b4ef2781c6 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum -1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA -1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA -1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA -1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA +1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::AYIYA Tunnel::DISCOVER +1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::AYIYA Tunnel::DISCOVER +1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::AYIYA Tunnel::CLOSE +1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::AYIYA Tunnel::CLOSE diff --git a/testing/btest/Baseline/core.tunnels.socks/conn.log b/testing/btest/Baseline/core.tunnels.socks/conn.log deleted file mode 100644 index f8a684d4c6..0000000000 --- a/testing/btest/Baseline/core.tunnels.socks/conn.log +++ /dev/null @@ -1,8 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents -#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] -1208299429.265243 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 tcp http,socks 0.008138 152 3950 SF - 0 ShAaDdfF 9 632 9 4430 (empty) diff --git a/testing/btest/Baseline/core.tunnels.socks/http.log b/testing/btest/Baseline/core.tunnels.socks/http.log deleted file mode 100644 index 2dcab3f254..0000000000 --- a/testing/btest/Baseline/core.tunnels.socks/http.log +++ /dev/null @@ -1,8 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path http -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file -#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1208299429.270361 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 1 GET www.icir.org / - curl/7.16.3 (powerpc-apple-darwin9.0) libcurl/7.16.3 OpenSSL/0.9.7l zlib/1.2.3 0 3677 200 OK - - - (empty) - - - text/html - - diff --git a/testing/btest/Baseline/core.tunnels.socks/output b/testing/btest/Baseline/core.tunnels.socks/output deleted file mode 100644 index ee5c5b5c20..0000000000 --- a/testing/btest/Baseline/core.tunnels.socks/output +++ /dev/null @@ -1,9 +0,0 @@ -[id=[orig_h=127.0.0.1, orig_p=62270/tcp, resp_h=127.0.0.1, resp_p=1080/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=177, flow_label=0], resp=[size=8, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0], start_time=1208299429.265243, duration=0.002565, service={ -SOCKS -}, addl=, hot=0, history=ShAaDd, uid=UWkUyAuUGXf, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dns=, dns_state=, ftp=, http=, http_state=, irc=, smtp=, smtp_state=, ssh=, ssl=, syslog=] ---- -1 -192.150.187.12 - -80/tcp - diff --git a/testing/btest/Baseline/core.tunnels.socks/tunnel.log b/testing/btest/Baseline/core.tunnels.socks/tunnel.log deleted file mode 100644 index 9ccbe8af26..0000000000 --- a/testing/btest/Baseline/core.tunnels.socks/tunnel.log +++ /dev/null @@ -1,9 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type -#types time string addr port addr port enum enum -1208299429.267808 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 Tunnel::DISCOVER Tunnel::SOCKS -1208299429.273401 UWkUyAuUGXf 127.0.0.1 62270 127.0.0.1 1080 Tunnel::CLOSE Tunnel::SOCKS diff --git a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log index 5a2114dd1c..9cead25be1 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum -1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO -1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO -1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO -1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO -1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO -1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO +1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::DISCOVER +1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::DISCOVER +1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::DISCOVER +1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::CLOSE +1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::CLOSE +1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::CLOSE diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log index 3f47321245..30f88ed251 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum -1340127577.336558 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO -1340127577.339015 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO -1340127577.351747 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO -1340127577.406995 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO -1340127577.406995 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO -1340127577.406995 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO +1340127577.336558 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::DISCOVER +1340127577.339015 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::DISCOVER +1340127577.351747 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::DISCOVER +1340127577.406995 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::CLOSE +1340127577.406995 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::CLOSE +1340127577.406995 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::CLOSE diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log new file mode 100644 index 0000000000..4723cb99c4 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path tunnel +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action +#types time string addr port addr port enum enum +1208299429.265774 - 127.0.0.1 0 127.0.0.1 1080 Tunnel::SOCKS Tunnel::DISCOVER diff --git a/testing/btest/core/tunnels/socks.bro b/testing/btest/core/tunnels/socks.bro deleted file mode 100644 index 8ab288c9bd..0000000000 --- a/testing/btest/core/tunnels/socks.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro -Cr $TRACES/tunnels/socks.pcap %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff tunnel.log -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff http.log - -event socks_request(c: connection, request_type: count, dstaddr: addr, - dstname: string, p: port, user: string) - { - print c; - print "---"; - print request_type; - print dstaddr; - print dstname; - print p; - print user; - } - - diff --git a/testing/btest/scripts/base/protocols/socks/trace1.test b/testing/btest/scripts/base/protocols/socks/trace1.test index fb65b33cbc..fb1d9ebaf2 100644 --- a/testing/btest/scripts/base/protocols/socks/trace1.test +++ b/testing/btest/scripts/base/protocols/socks/trace1.test @@ -1,6 +1,5 @@ # @TEST-EXEC: bro -r $TRACES/socks.trace %INPUT # @TEST-EXEC: btest-diff socks.log -# @TEST-EXEC: btest-diff http.log # @TEST-EXEC: btest-diff tunnel.log @load base/protocols/socks diff --git a/testing/btest/scripts/base/protocols/socks/trace3.test b/testing/btest/scripts/base/protocols/socks/trace3.test new file mode 100644 index 0000000000..c3b3b091eb --- /dev/null +++ b/testing/btest/scripts/base/protocols/socks/trace3.test @@ -0,0 +1,4 @@ +# @TEST-EXEC: bro -C -r $TRACES/tunnels/socks.pcap %INPUT +# @TEST-EXEC: btest-diff tunnel.log + +@load base/protocols/socks From 886cc7368f357c15a9cd4f24c6ccad48f111d18c Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 20 Jun 2012 14:20:06 -0400 Subject: [PATCH 421/651] Fix a bug in the SOCKS analyzer. --- src/socks-analyzer.pac | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/socks-analyzer.pac b/src/socks-analyzer.pac index 0842303f40..2c3fd68e09 100644 --- a/src/socks-analyzer.pac +++ b/src/socks-analyzer.pac @@ -22,9 +22,11 @@ refine connection SOCKS_Conn += { function socks4_request(request: SOCKS4_Request): bool %{ - StringVal *dstname; + StringVal *dstname = 0; if ( ${request.v4a} ) dstname = array_to_string(${request.name}); + else + dstname = new StringVal(""); BifEvent::generate_socks_request(bro_analyzer(), bro_analyzer()->Conn(), @@ -77,6 +79,11 @@ refine connection SOCKS_Conn += { break; } + if ( ! ip_addr ) + ip_addr = new AddrVal(uint32(0)); + if ( ! domain_name ) + domain_name = new StringVal(""); + BifEvent::generate_socks_request(bro_analyzer(), bro_analyzer()->Conn(), 5, @@ -113,6 +120,11 @@ refine connection SOCKS_Conn += { break; } + if ( ! ip_addr ) + ip_addr = new AddrVal(uint32(0)); + if ( ! domain_name ) + domain_name = new StringVal(""); + BifEvent::generate_socks_reply(bro_analyzer(), bro_analyzer()->Conn(), 5, From f59736cb17864cc101c6d1121d4173a53f1c6a49 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 20 Jun 2012 15:12:52 -0400 Subject: [PATCH 422/651] SOCKS DPD fixes. - Restricted the SOCKS 5 DPD signatures further. - Added protocol violations. --- scripts/base/frameworks/dpd/dpd.sig | 6 ++++-- scripts/base/protocols/socks/consts.bro | 1 - src/socks-analyzer.pac | 28 +++++++++++++++++++++++++ src/socks-protocol.pac | 6 +++++- 4 files changed, 37 insertions(+), 4 deletions(-) diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig index 245e79bfdf..05dcee9d49 100644 --- a/scripts/base/frameworks/dpd/dpd.sig +++ b/scripts/base/frameworks/dpd/dpd.sig @@ -194,14 +194,16 @@ signature dpd_socks4_reverse_server { signature dpd_socks5_client { ip-proto == tcp - payload /^\x05/ + # Watch for a few authentication methods to reduce false positives. + payload /^\x05.[\x00\x01\x02]/ tcp-state originator } signature dpd_socks5_server { ip-proto == tcp requires-reverse-signature dpd_socks5_client - payload /^\x05/ + # Watch for a single authentication method to be chosen by the server. + payload /^\x05\x01[\x00\x01\x02]/ tcp-state responder enable "socks" } diff --git a/scripts/base/protocols/socks/consts.bro b/scripts/base/protocols/socks/consts.bro index 6341262041..fb14aae601 100644 --- a/scripts/base/protocols/socks/consts.bro +++ b/scripts/base/protocols/socks/consts.bro @@ -11,7 +11,6 @@ export { [1] = "GSSAPI", [2] = "Username/Password", [3] = "Challenge-Handshake Authentication Protocol", - [4] = "Unassigned", [5] = "Challenge-Response Authentication Method", [6] = "Secure Sockets Layer", [7] = "NDS Authentication", diff --git a/src/socks-analyzer.pac b/src/socks-analyzer.pac index 2c3fd68e09..1d71753fb5 100644 --- a/src/socks-analyzer.pac +++ b/src/socks-analyzer.pac @@ -22,6 +22,7 @@ refine connection SOCKS_Conn += { function socks4_request(request: SOCKS4_Request): bool %{ + StringVal *dstname = 0; if ( ${request.v4a} ) dstname = array_to_string(${request.name}); @@ -59,6 +60,12 @@ refine connection SOCKS_Conn += { function socks5_request(request: SOCKS5_Request): bool %{ + if ( ${request.reserved} != 0 ) + { + bro_analyzer()->ProtocolViolation(fmt("invalid value in reserved field: %d", ${request.reserved})); + return false; + } + AddrVal *ip_addr = 0; StringVal *domain_name = 0; @@ -77,6 +84,11 @@ refine connection SOCKS_Conn += { case 4: ip_addr = new AddrVal(IPAddr(IPv6, (const uint32_t*) ${request.remote_name.ipv6}, IPAddr::Network)); break; + + default: + bro_analyzer()->ProtocolViolation(fmt("invalid SOCKSv5 addr type: %d", ${request.remote_name.addr_type})); + return false; + break; } if ( ! ip_addr ) @@ -118,6 +130,11 @@ refine connection SOCKS_Conn += { case 4: ip_addr = new AddrVal(IPAddr(IPv6, (const uint32_t*) ${reply.bound.ipv6}, IPAddr::Network)); break; + + default: + bro_analyzer()->ProtocolViolation(fmt("invalid SOCKSv5 addr type: %d", ${reply.bound.addr_type})); + return false; + break; } if ( ! ip_addr ) @@ -138,6 +155,17 @@ refine connection SOCKS_Conn += { return true; %} + function version_error(version: uint8): bool + %{ + bro_analyzer()->ProtocolViolation(fmt("unsupported/unknown SOCKS version %d", version)); + return true; + %} + + +}; + +refine typeattr SOCKS_Version_Error += &let { + proc: bool = $context.connection.version_error(version); }; refine typeattr SOCKS4_Request += &let { diff --git a/src/socks-protocol.pac b/src/socks-protocol.pac index 8ae81a6e02..0d441665f6 100644 --- a/src/socks-protocol.pac +++ b/src/socks-protocol.pac @@ -4,10 +4,14 @@ type SOCKS_Version(is_orig: bool) = record { msg: case version of { 4 -> socks4_msg: SOCKS4_Message(is_orig); 5 -> socks5_msg: SOCKS5_Message(is_orig); - default -> socks_msg_fail: empty; + default -> socks_msg_fail: SOCKS_Version_Error(version); }; }; +type SOCKS_Version_Error(version: uint8) = record { + nothing: empty; +}; + # SOCKS5 Implementation type SOCKS5_Message(is_orig: bool) = case $context.connection.v5_past_authentication() of { true -> msg: SOCKS5_Real_Message(is_orig); From 232585c96ced34b8c6d3a7a54e0d34a5fadb201e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 20 Jun 2012 14:34:31 -0700 Subject: [PATCH 423/651] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broccoli b/aux/broccoli index 0d139c09d5..f1b0a395ab 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 0d139c09d5a9c8623ecc2a5f395178f0ddcd7e16 +Subproject commit f1b0a395ab32388d8375ab72ec263b6029833f96 From 1564edb296b1008eac3a05e9bdec89a5c1f8d5d0 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 20 Jun 2012 14:46:54 -0700 Subject: [PATCH 424/651] Fixing merge left-over. --- src/event.bif | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/event.bif b/src/event.bif index 78d2cd76a9..149d61492b 100644 --- a/src/event.bif +++ b/src/event.bif @@ -522,14 +522,6 @@ event esp_packet%(p: pkt_hdr%); ## .. bro:see:: new_packet tcp_packet ipv6_ext_headers event mobile_ipv6_message%(p: pkt_hdr%); -<<<<<<< HEAD -## Generated for every packet that has a non-empty transport-layer payload. -## This is a very low-level and expensive event that should be avoided when -## at all possible. It's usually infeasible to handle when processing even -## medium volumes of traffic in real-time. It's even worse than -## :bro:id:`new_packet`. That said, if you work from a trace and want to -## do some packet-level analysis, it may come in handy. -======= ## Genereated for any IPv6 packet encapsulated in a Teredo tunnel. ## See :rfc:`4380` for more information about the Teredo protocol. ## @@ -591,7 +583,6 @@ event teredo_bubble%(outer: connection, inner: teredo_hdr%); ## traffic in real-time. It's even worse than :bro:id:`new_packet`. That said, if ## you work from a trace and want to do some packet-level analysis, it may come in ## handy. ->>>>>>> topic/robin/tunnels-merge ## ## c: The connection the packet is part of. ## @@ -866,13 +857,8 @@ event udp_contents%(u: connection, is_orig: bool, contents: string%); ## Generated when a UDP session for a supported protocol has finished. Some of ## Bro's application-layer UDP analyzers flag the end of a session by raising -<<<<<<< HEAD -## this event. Currently, the analyzers for DNS, NTP, Netbios, and Syslog -## support this. -======= ## this event. Currently, the analyzers for DNS, NTP, Netbios, Syslog, AYIYA, ## and Teredo support this. ->>>>>>> topic/robin/tunnels-merge ## ## u: The connection record for the corresponding UDP flow. ## From 5f75850577bba7c3b294f2c92cf036a5cb9cfd0d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 20 Jun 2012 15:13:39 -0700 Subject: [PATCH 425/651] Updating baselines. --- doc/scripts/DocSourcesList.cmake | 1 + testing/btest/Baseline/core.print-bpf-filters/output | 8 ++++---- .../canonified_loaded_scripts.log | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index 433d7edd0a..650982f9bb 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -78,6 +78,7 @@ rest_target(${psd} base/protocols/irc/main.bro) rest_target(${psd} base/protocols/smtp/entities-excerpt.bro) rest_target(${psd} base/protocols/smtp/entities.bro) rest_target(${psd} base/protocols/smtp/main.bro) +rest_target(${psd} base/protocols/socks/consts.bro) rest_target(${psd} base/protocols/socks/main.bro) rest_target(${psd} base/protocols/ssh/main.bro) rest_target(${psd} base/protocols/ssl/consts.bro) diff --git a/testing/btest/Baseline/core.print-bpf-filters/output b/testing/btest/Baseline/core.print-bpf-filters/output index 0560b34769..d1c2d47893 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output +++ b/testing/btest/Baseline/core.print-bpf-filters/output @@ -5,7 +5,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1340040469.440535 - ip or not ip T T +1340229717.179155 - ip or not ip T T #separator \x09 #set_separator , #empty_field (empty) @@ -13,7 +13,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1340040469.681428 - ((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T +1340229717.462355 - (((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (tcp port 1080)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T #separator \x09 #set_separator , #empty_field (empty) @@ -21,7 +21,7 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1340040469.925663 - port 42 T T +1340229717.733007 - port 42 T T #separator \x09 #set_separator , #empty_field (empty) @@ -29,4 +29,4 @@ #path packet_filter #fields ts node filter init success #types time string string bool bool -1340040470.169001 - port 56730 T T +1340229718.001009 - port 56730 T T diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index ff2e2fc701..9414e9bd41 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -95,6 +95,7 @@ scripts/base/init-default.bro scripts/base/protocols/smtp/./entities.bro scripts/base/protocols/smtp/./entities-excerpt.bro scripts/base/protocols/socks/__load__.bro + scripts/base/protocols/socks/./consts.bro scripts/base/protocols/socks/./main.bro scripts/base/protocols/ssh/__load__.bro scripts/base/protocols/ssh/./main.bro From 737706994b0e1dfe3fb33b92ad05255e00883f85 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 20 Jun 2012 15:42:03 -0700 Subject: [PATCH 426/651] Accepting different AF_INET6 values for loopback link headers. Turns out they differ by platform. --- src/PktSrc.cc | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/PktSrc.cc b/src/PktSrc.cc index 615815b41b..9b974f7e53 100644 --- a/src/PktSrc.cc +++ b/src/PktSrc.cc @@ -193,7 +193,18 @@ void PktSrc::Process() { protocol = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0]; - if ( protocol != AF_INET && protocol != AF_INET6 ) + // From the Wireshark Wiki: "AF_INET6, unfortunately, has + // different values in {NetBSD,OpenBSD,BSD/OS}, + // {FreeBSD,DragonFlyBSD}, and {Darwin/Mac OS X}, so an IPv6 + // packet might have a link-layer header with 24, 28, or 30 + // as the AF_ value." As we may be reading traces captured on + // platforms other than what we're running on, we accept them + // all here. + if ( protocol != AF_INET + && protocol != AF_INET6 + && protocol != 24 + && protocol != 28 + && protocol != 30 ) { sessions->Weird("non_ip_packet_in_null_transport", &hdr, data); data = 0; From 8b99cc4a478ff2abc68776775d2f045265e37f56 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 20 Jun 2012 16:02:20 -0700 Subject: [PATCH 427/651] Updating NEWS in preparation for beta. Feel free to suggest edits/extensions. --- NEWS | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 67 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 75bb78130f..848a51acc5 100644 --- a/NEWS +++ b/NEWS @@ -6,10 +6,71 @@ This document summarizes the most important changes in the current Bro release. For a complete list of changes, see the ``CHANGES`` file. -Bro 2.1 -------- +Bro 2.1 Beta +------------ -- Dependencies: +New Functionality +~~~~~~~~~~~~~~~~~ + +- Bro now comes with extensive IPv6 support. Past versions offered + only basic IPv6 functionality that was rarely used in practice as it + had to be enabled explicitly. IPv6 support is now fully integrated + into all parts of Bro including protocol analysis and the scripting + language. It's on by default and no longer requires any special + configuration. + + Some of the most significant enhancements include support for IPv6 + fragment reassembly, support for following IPv6 extension header + chains, and support for tunnel decapsulation (6to4 and Teredo). The + DNS analyzer now handles AAAA records properly, and DNS lookups that + Bro itself performs now include AAAA queries, so that, for example, + the result returned by script-level lookups is a set that can + contain both IPv4 and IPv6 addresses. Support for the most common + ICMPv6 message types has been added. Also, the FTP EPSV and EPRT + commands are now handled properly. Internally, the way IP addresses + are stored internally has been improved, so Bro can handle both IPv4 + and IPv6 by default without any special configuration. + + In addition to Bro itself, the other Bro components have also been + made IPv6-aware by default. In particular, significant changes were + made to trace-summary, PySubnetTree, and Broccoli to support IPv6. + +- Bro now decapsulates tunnels via its new tunnel framework located in + scripts/base/frameworks/tunnels. It currently supports Teredo, + AYIYA, IP-in-IP (both IPv4 and IPv6), and SOCKS. For all these, it + logs the outher tunnel connections in both conn.log and tunnel.log, + and then proceeds to analyze the inner payload as if it were not + tunneled, including also logging that session in conn.log. For + SOCKS, it generates a new socks.log in addition with more + information. + +- Bro now features a flexible input framework that allows users to + integrate external information in real-time into Bro while it + processing network traffic. The most direct use-case at the moment + is reading data from ASCII files into Bro tables, with updates + picked up automatically when the file changes during runtime. See + doc/input.rst for more information. + + Internally, the input framework is structured around the notion of + "reader plugins" that make it easy to interface to different data + sources. We will add more in the future. + +- Bro's default ASCII log format is not exactly the most efficient way + for storing and searching large volumes of data. An an alternative, + Bro nows comes with experimental support for DataSeries output, an + efficient binary format for recording structured bulk data. + DataSeries is developed and maintained at HP Labs. See + doc/logging-dataseries for more information. + + +Changed Functionality +~~~~~~~~~~~~~~~~~~~~~ + +The following summarized the most important differences in existing +functionality. Note that this list is not complete, see CHANGES for +the full set. + +- Changes in dependencies: * Bro now requires CMake >= 2.6.3. @@ -17,8 +78,7 @@ Bro 2.1 configure time. Doing so can significantly improve memory and CPU use. -- Bro now supports IPv6 out of the box; the configure switch - --enable-brov6 is gone. +- The configure switch --enable-brov6 is gone. - DNS name lookups performed by Bro now also query AAAA records. The results of the A and AAAA queries for a given hostname are combined @@ -35,7 +95,7 @@ Bro 2.1 - The syntax for IPv6 literals changed from "2607:f8b0:4009:802::1012" to "[2607:f8b0:4009:802::1012]". -- Bro now spawn threads for doing its logging. From a user's +- Bro now spawns threads for doing its logging. From a user's perspective not much should change, except that the OS may now show a bunch of Bro threads. @@ -64,7 +124,6 @@ Bro 2.1 Bro now supports decapsulating tunnels directly for protocols it understands. -TODO: Extend. Bro 2.0 ------- @@ -97,7 +156,7 @@ final release are: ASCII logger now respects to add a suffix to the log files it creates. - * The ASCII logs now include further header information, and + * The ASCII logs now include further header information, and fields set to an empty value are now logged as ``(empty)`` by default (instead of ``-``, which is already used for fields that are not set at all). From 6b8b4dab71770efa6e902cf14892d8cc4253eab7 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 20 Jun 2012 22:57:46 -0400 Subject: [PATCH 428/651] Fixed some problems with the SOCKS analyzer and tests. --- scripts/base/frameworks/dpd/dpd.sig | 5 +- scripts/base/init-bare.bro | 11 ++++ scripts/base/protocols/socks/main.bro | 47 ++++++--------- src/NetVar.cc | 4 ++ src/NetVar.h | 2 + src/event.bif | 4 +- src/socks-analyzer.pac | 57 +++++++------------ .../socks.log | 2 +- .../socks.log | 2 +- 9 files changed, 63 insertions(+), 71 deletions(-) diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig index 05dcee9d49..49e24cefc6 100644 --- a/scripts/base/frameworks/dpd/dpd.sig +++ b/scripts/base/frameworks/dpd/dpd.sig @@ -202,8 +202,9 @@ signature dpd_socks5_client { signature dpd_socks5_server { ip-proto == tcp requires-reverse-signature dpd_socks5_client - # Watch for a single authentication method to be chosen by the server. - payload /^\x05\x01[\x00\x01\x02]/ + # Watch for a single authentication method to be chosen by the server or + # the server to indicate the no authentication is required. + payload /^\x05(\x00|\x01[\x00\x01\x02])/ tcp-state responder enable "socks" } diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 17ea0823ac..3a323ad7fe 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2402,6 +2402,17 @@ type bittorrent_benc_dir: table[string] of bittorrent_benc_value; ## bt_tracker_response_not_ok type bt_tracker_headers: table[string] of string; +module SOCKS; +export { + ## This record is for a SOCKS client or server to provide either a + ## name or an address to represent a desired or established connection. + type Address: record { + host: addr &optional; + name: string &optional; + } &log; +} +module GLOBAL; + @load base/event.bif ## BPF filter the user has set via the -f command line options. Empty if none. diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro index cceea68758..96dbccb522 100644 --- a/scripts/base/protocols/socks/main.bro +++ b/scripts/base/protocols/socks/main.bro @@ -8,27 +8,23 @@ export { type Info: record { ## Time when the proxy connection was first detected. - ts: time &log; - uid: string &log; - id: conn_id &log; + ts: time &log; + uid: string &log; + id: conn_id &log; ## Protocol version of SOCKS. - version: count &log; + version: count &log; ## Username for the proxy if extracted from the network. - user: string &log &optional; + user: string &log &optional; ## Server status for the attempt at using the proxy. - status: string &log &optional; - ## Client requested address. Mutually exclusive with req_name. - req_h: addr &log &optional; - ## Client requested domain name. Mutually exclusive with req_h. - req_name: string &log &optional; + status: string &log &optional; + ## Client requested SOCKS address. Could be an address, a name or both. + request: SOCKS::Address &log &optional; ## Client requested port. - req_p: port &log &optional; - ## Server bound address. Mutually exclusive with bound_name. - bound_h: addr &log &optional; - ## Server bound domain name. Mutually exclusive with bound_h. - bound_name: string &log &optional; + request_p: port &log &optional; + ## Server bound address. Could be an address, a name or both. + bound: SOCKS::Address &log &optional; ## Server bound port. - bound_p: port &log &optional; + bound_p: port &log &optional; }; ## Event that can be handled to access the SOCKS @@ -57,15 +53,12 @@ function set_session(c: connection, version: count) } event socks_request(c: connection, version: count, request_type: count, - dstaddr: addr, dstname: string, p: port, user: string) &priority=5 + sa: SOCKS::Address, p: port, user: string) &priority=5 { set_session(c, version); - if ( dstaddr != [::] ) - c$socks$req_h = dstaddr; - if ( dstname != "" ) - c$socks$req_name = dstname; - c$socks$req_p = p; + c$socks$request = sa; + c$socks$request_p = p; # Copy this conn_id and set the orig_p to zero because in the case of SOCKS proxies there will # be potentially many source ports since a new proxy connection is established for each @@ -75,7 +68,7 @@ event socks_request(c: connection, version: count, request_type: count, Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS, $payload_proxy=T]); } -event socks_reply(c: connection, version: count, reply: count, dstaddr: addr, dstname: string, p: port) &priority=5 +event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=5 { set_session(c, version); @@ -84,15 +77,11 @@ event socks_reply(c: connection, version: count, reply: count, dstaddr: addr, ds else if ( version == 4 ) c$socks$status = v4_status[reply]; - if ( dstaddr != [::] ) - c$socks$bound_h = dstaddr; - if ( dstname != "" ) - c$socks$bound_name = dstname; - + c$socks$bound = sa; c$socks$bound_p = p; } -event socks_reply(c: connection, version: count, reply: count, dstaddr: addr, dstname: string, p: port) &priority=-5 +event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=-5 { Log::write(SOCKS::LOG, c$socks); } \ No newline at end of file diff --git a/src/NetVar.cc b/src/NetVar.cc index b057efad11..248ae15e1a 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -48,6 +48,8 @@ int tcp_excessive_data_without_further_acks; RecordType* x509_type; +RecordType* socks_address; + double non_analyzed_lifetime; double tcp_inactivity_timeout; double udp_inactivity_timeout; @@ -344,6 +346,8 @@ void init_net_var() opt_internal_int("tcp_excessive_data_without_further_acks"); x509_type = internal_type("X509")->AsRecordType(); + + socks_address = internal_type("SOCKS::Address")->AsRecordType(); non_analyzed_lifetime = opt_internal_double("non_analyzed_lifetime"); tcp_inactivity_timeout = opt_internal_double("tcp_inactivity_timeout"); diff --git a/src/NetVar.h b/src/NetVar.h index e6f6e0cfc4..2561fa0ad9 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -51,6 +51,8 @@ extern int tcp_excessive_data_without_further_acks; extern RecordType* x509_type; +extern RecordType* socks_address; + extern double non_analyzed_lifetime; extern double tcp_inactivity_timeout; extern double udp_inactivity_timeout; diff --git a/src/event.bif b/src/event.bif index d1e28a98e5..63ec361851 100644 --- a/src/event.bif +++ b/src/event.bif @@ -6112,7 +6112,7 @@ event signature_match%(state: signature_state, msg: string, data: string%); ## p: The destination port for the proxied traffic. ## ## user: Username given for the SOCKS connection. This is not yet implemented for SOCKSv5. -event socks_request%(c: connection, version: count, request_type: count, dstaddr: addr, dstname: string, p: port, user: string%); +event socks_request%(c: connection, version: count, request_type: count, sa: SOCKS::Address, p: port, user: string%); ## Generated when a SOCKS reply is analyzed. ## @@ -6127,7 +6127,7 @@ event socks_request%(c: connection, version: count, request_type: count, dstaddr ## dstname: The name the server sent the traffic to. Only applicable for SOCKSv5. ## ## p: The destination port for the proxied traffic. -event socks_reply%(c: connection, version: count, reply: count, dstaddr: addr, dstname: string, p: port%); +event socks_reply%(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port%); ## Generated when a protocol analyzer finds an identification of a software ## used on a system. This is a protocol-independent event that is fed by diff --git a/src/socks-analyzer.pac b/src/socks-analyzer.pac index 1d71753fb5..bf5ada4631 100644 --- a/src/socks-analyzer.pac +++ b/src/socks-analyzer.pac @@ -22,19 +22,16 @@ refine connection SOCKS_Conn += { function socks4_request(request: SOCKS4_Request): bool %{ - - StringVal *dstname = 0; + RecordVal* sa = new RecordVal(socks_address); + sa->Assign(0, new AddrVal(htonl(${request.addr}))); if ( ${request.v4a} ) - dstname = array_to_string(${request.name}); - else - dstname = new StringVal(""); + sa->Assign(1, array_to_string(${request.name})); BifEvent::generate_socks_request(bro_analyzer(), bro_analyzer()->Conn(), 4, ${request.command}, - new AddrVal(htonl(${request.addr})), - dstname, + sa, new PortVal(${request.port} | TCP_PORT_MASK), array_to_string(${request.user})); @@ -45,12 +42,14 @@ refine connection SOCKS_Conn += { function socks4_reply(reply: SOCKS4_Reply): bool %{ + RecordVal* sa = new RecordVal(socks_address); + sa->Assign(0, new AddrVal(htonl(${reply.addr}))); + BifEvent::generate_socks_reply(bro_analyzer(), bro_analyzer()->Conn(), 4, ${reply.status}, - new AddrVal(htonl(${reply.addr})), - new StringVal(""), + sa, new PortVal(${reply.port} | TCP_PORT_MASK)); bro_analyzer()->ProtocolConfirmation(); @@ -65,24 +64,23 @@ refine connection SOCKS_Conn += { bro_analyzer()->ProtocolViolation(fmt("invalid value in reserved field: %d", ${request.reserved})); return false; } - - AddrVal *ip_addr = 0; - StringVal *domain_name = 0; + + RecordVal* sa = new RecordVal(socks_address); // This is dumb and there must be a better way (checking for presence of a field)... switch ( ${request.remote_name.addr_type} ) { case 1: - ip_addr = new AddrVal(htonl(${request.remote_name.ipv4})); + sa->Assign(0, new AddrVal(htonl(${request.remote_name.ipv4}))); break; case 3: - domain_name = new StringVal(${request.remote_name.domain_name.name}.length(), - (const char*) ${request.remote_name.domain_name.name}.data()); + sa->Assign(1, new StringVal(${request.remote_name.domain_name.name}.length(), + (const char*) ${request.remote_name.domain_name.name}.data())); break; case 4: - ip_addr = new AddrVal(IPAddr(IPv6, (const uint32_t*) ${request.remote_name.ipv6}, IPAddr::Network)); + sa->Assign(0, new AddrVal(IPAddr(IPv6, (const uint32_t*) ${request.remote_name.ipv6}, IPAddr::Network))); break; default: @@ -91,17 +89,11 @@ refine connection SOCKS_Conn += { break; } - if ( ! ip_addr ) - ip_addr = new AddrVal(uint32(0)); - if ( ! domain_name ) - domain_name = new StringVal(""); - BifEvent::generate_socks_request(bro_analyzer(), bro_analyzer()->Conn(), 5, ${request.command}, - ip_addr, - domain_name, + sa, new PortVal(${request.port} | TCP_PORT_MASK), new StringVal("")); @@ -112,23 +104,22 @@ refine connection SOCKS_Conn += { function socks5_reply(reply: SOCKS5_Reply): bool %{ - AddrVal *ip_addr = 0; - StringVal *domain_name = 0; + RecordVal* sa = new RecordVal(socks_address); // This is dumb and there must be a better way (checking for presence of a field)... switch ( ${reply.bound.addr_type} ) { case 1: - ip_addr = new AddrVal(htonl(${reply.bound.ipv4})); + sa->Assign(0, new AddrVal(htonl(${reply.bound.ipv4}))); break; case 3: - domain_name = new StringVal(${reply.bound.domain_name.name}.length(), - (const char*) ${reply.bound.domain_name.name}.data()); + sa->Assign(1, new StringVal(${reply.bound.domain_name.name}.length(), + (const char*) ${reply.bound.domain_name.name}.data())); break; case 4: - ip_addr = new AddrVal(IPAddr(IPv6, (const uint32_t*) ${reply.bound.ipv6}, IPAddr::Network)); + sa->Assign(0, new AddrVal(IPAddr(IPv6, (const uint32_t*) ${reply.bound.ipv6}, IPAddr::Network))); break; default: @@ -137,17 +128,11 @@ refine connection SOCKS_Conn += { break; } - if ( ! ip_addr ) - ip_addr = new AddrVal(uint32(0)); - if ( ! domain_name ) - domain_name = new StringVal(""); - BifEvent::generate_socks_reply(bro_analyzer(), bro_analyzer()->Conn(), 5, ${reply.reply}, - ip_addr, - domain_name, + sa, new PortVal(${reply.port} | TCP_PORT_MASK)); bro_analyzer()->ProtocolConfirmation(); diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log index 4241190234..08d31fdb69 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log @@ -3,6 +3,6 @@ #empty_field (empty) #unset_field - #path socks -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status req_h req_name req_p bound_h bound_name bound_p +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p #types time string addr port addr port count string string addr string port addr string port 1340213015.276495 UWkUyAuUGXf 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log index 556ed9263e..8fd109f3a4 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log @@ -3,6 +3,6 @@ #empty_field (empty) #unset_field - #path socks -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status req_h req_name req_p bound_h bound_name bound_p +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p #types time string addr port addr port count string string addr string port addr string port 1340113261.914619 UWkUyAuUGXf 10.0.0.50 59580 85.194.84.197 1080 5 - succeeded - www.google.com 443 0.0.0.0 - 443 From 3eb16e5738288c06126b917e553fd0ce2f2a0c3e Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 21 Jun 2012 02:07:30 -0400 Subject: [PATCH 429/651] Add another SOCKS command. --- scripts/base/protocols/socks/consts.bro | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scripts/base/protocols/socks/consts.bro b/scripts/base/protocols/socks/consts.bro index fb14aae601..e5dcc460d4 100644 --- a/scripts/base/protocols/socks/consts.bro +++ b/scripts/base/protocols/socks/consts.bro @@ -2,8 +2,9 @@ module SOCKS; export { type RequestType: enum { - CONNECTION = 1, - PORT = 2, + CONNECTION = 1, + PORT = 2, + UDP_ASSOCIATE = 3, }; const v5_authentication_methods: table[count] of string = { @@ -37,4 +38,4 @@ export { [8] = "Address type not supported", } &default=function(i: count):string { return fmt("unknown-%d", i); }; -} \ No newline at end of file +} From b38d1e1ec2336f47ec5d83f3878275e86343ddbc Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 21 Jun 2012 11:57:45 -0700 Subject: [PATCH 430/651] Reworking log writer API to make it easier to pass additional information to a writer's initialization method. However, for now the information provided is still the same. --- src/RemoteSerializer.cc | 18 ++++++------- src/RemoteSerializer.h | 5 ++-- src/logging/Manager.cc | 23 +++++++++------- src/logging/Manager.h | 5 ++-- src/logging/WriterBackend.cc | 22 ++++++++++++---- src/logging/WriterBackend.h | 44 ++++++++++++++++++++++--------- src/logging/WriterFrontend.cc | 23 ++++++++-------- src/logging/WriterFrontend.h | 12 ++++----- src/logging/writers/Ascii.cc | 8 +++--- src/logging/writers/Ascii.h | 2 +- src/logging/writers/DataSeries.cc | 12 ++++----- src/logging/writers/DataSeries.h | 2 +- src/logging/writers/None.cc | 4 +-- src/logging/writers/None.h | 2 +- 14 files changed, 112 insertions(+), 70 deletions(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 838bafb0d6..6a73bae553 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -2503,17 +2503,17 @@ bool RemoteSerializer::ProcessRemotePrint() return true; } -bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields) +bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields) { loop_over_list(peers, i) { - SendLogCreateWriter(peers[i]->id, id, writer, path, num_fields, fields); + SendLogCreateWriter(peers[i]->id, id, writer, info, num_fields, fields); } return true; } -bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields) +bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields) { SetErrorDescr("logging"); @@ -2535,8 +2535,8 @@ bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* bool success = fmt.Write(id->AsEnum(), "id") && fmt.Write(writer->AsEnum(), "writer") && - fmt.Write(path, "path") && - fmt.Write(num_fields, "num_fields"); + fmt.Write(num_fields, "num_fields") && + info.Write(&fmt); if ( ! success ) goto error; @@ -2691,13 +2691,13 @@ bool RemoteSerializer::ProcessLogCreateWriter() fmt.StartRead(current_args->data, current_args->len); int id, writer; - string path; int num_fields; + logging::WriterBackend::WriterInfo info; bool success = fmt.Read(&id, "id") && fmt.Read(&writer, "writer") && - fmt.Read(&path, "path") && - fmt.Read(&num_fields, "num_fields"); + fmt.Read(&num_fields, "num_fields") && + info.Read(&fmt); if ( ! success ) goto error; @@ -2716,7 +2716,7 @@ bool RemoteSerializer::ProcessLogCreateWriter() id_val = new EnumVal(id, BifType::Enum::Log::ID); writer_val = new EnumVal(writer, BifType::Enum::Log::Writer); - if ( ! log_mgr->CreateWriter(id_val, writer_val, path, num_fields, fields, true, false) ) + if ( ! log_mgr->CreateWriter(id_val, writer_val, info, num_fields, fields, true, false) ) goto error; Unref(id_val); diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 4ebf15e68d..1d7feef585 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -9,6 +9,7 @@ #include "IOSource.h" #include "Stats.h" #include "File.h" +#include "logging/WriterBackend.h" #include #include @@ -104,10 +105,10 @@ public: bool SendPrintHookEvent(BroFile* f, const char* txt, size_t len); // Send a request to create a writer on a remote side. - bool SendLogCreateWriter(PeerID peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields); + bool SendLogCreateWriter(PeerID peer, EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields); // Broadcasts a request to create a writer. - bool SendLogCreateWriter(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Field* const * fields); + bool SendLogCreateWriter(EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields); // Broadcast a log entry to everybody interested. bool SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals); diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index f0b5cc1748..b30ee26534 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -74,6 +74,7 @@ struct Manager::WriterInfo { double interval; Func* postprocessor; WriterFrontend* writer; + WriterBackend::WriterInfo info; }; struct Manager::Stream { @@ -764,8 +765,11 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) for ( int j = 0; j < filter->num_fields; ++j ) arg_fields[j] = new threading::Field(*filter->fields[j]); + WriterBackend::WriterInfo info; + info.path = path; + writer = CreateWriter(stream->id, filter->writer, - path, filter->num_fields, + info, filter->num_fields, arg_fields, filter->local, filter->remote); if ( ! writer ) @@ -953,7 +957,7 @@ threading::Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, return vals; } -WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, +WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const* fields, bool local, bool remote) { Stream* stream = FindStream(id); @@ -963,7 +967,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, return false; Stream::WriterMap::iterator w = - stream->writers.find(Stream::WriterPathPair(writer->AsEnum(), path)); + stream->writers.find(Stream::WriterPathPair(writer->AsEnum(), info.path)); if ( w != stream->writers.end() ) // If we already have a writer for this. That's fine, we just @@ -973,7 +977,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, WriterFrontend* writer_obj = new WriterFrontend(id, writer, local, remote); assert(writer_obj); - writer_obj->Init(path, num_fields, fields); + writer_obj->Init(info, num_fields, fields); WriterInfo* winfo = new WriterInfo; winfo->type = writer->Ref()->AsEnumVal(); @@ -982,6 +986,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, winfo->rotation_timer = 0; winfo->interval = 0; winfo->postprocessor = 0; + winfo->info = info; // Search for a corresponding filter for the writer/path pair and use its // rotation settings. If no matching filter is found, fall back on @@ -993,7 +998,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, { Filter* f = *it; if ( f->writer->AsEnum() == writer->AsEnum() && - f->path == winfo->writer->Path() ) + f->path == winfo->writer->info.path ) { found_filter_match = true; winfo->interval = f->interval; @@ -1012,7 +1017,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, InstallRotationTimer(winfo); stream->writers.insert( - Stream::WriterMap::value_type(Stream::WriterPathPair(writer->AsEnum(), path), + Stream::WriterMap::value_type(Stream::WriterPathPair(writer->AsEnum(), info.path), winfo)); return writer_obj; @@ -1093,7 +1098,7 @@ void Manager::SendAllWritersTo(RemoteSerializer::PeerID peer) EnumVal writer_val(i->first.first, BifType::Enum::Log::Writer); remote_serializer->SendLogCreateWriter(peer, (*s)->id, &writer_val, - i->first.second, + i->second->info, writer->NumFields(), writer->Fields()); } @@ -1246,7 +1251,7 @@ void Manager::Rotate(WriterInfo* winfo) localtime_r(&teatime, &tm); strftime(buf, sizeof(buf), date_fmt, &tm); - string tmp = string(fmt("%s-%s", winfo->writer->Path().c_str(), buf)); + string tmp = string(fmt("%s-%s", winfo->writer->Info().path.c_str(), buf)); // Trigger the rotation. winfo->writer->Rotate(tmp, winfo->open_time, network_time, terminating); @@ -1274,7 +1279,7 @@ bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string o RecordVal* info = new RecordVal(BifType::Record::Log::RotationInfo); info->Assign(0, winfo->type->Ref()); info->Assign(1, new StringVal(new_name.c_str())); - info->Assign(2, new StringVal(winfo->writer->Path().c_str())); + info->Assign(2, new StringVal(winfo->writer->Info().path.c_str())); info->Assign(3, new Val(open, TYPE_TIME)); info->Assign(4, new Val(close, TYPE_TIME)); info->Assign(5, new Val(terminating, TYPE_BOOL)); diff --git a/src/logging/Manager.h b/src/logging/Manager.h index f5e62b0683..38dd9258b3 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -9,13 +9,14 @@ #include "../EventHandler.h" #include "../RemoteSerializer.h" +#include "WriterBackend.h" + class SerializationFormat; class RemoteSerializer; class RotationTimer; namespace logging { -class WriterBackend; class WriterFrontend; class RotationFinishedMessage; @@ -162,7 +163,7 @@ protected: //// Function also used by the RemoteSerializer. // Takes ownership of fields. - WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, string path, + WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, const WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const* fields, bool local, bool remote); diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 23a95279d7..35bb27d27b 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -4,6 +4,7 @@ #include "bro_inet_ntop.h" #include "threading/SerialTypes.h" +#include "Manager.h" #include "WriterBackend.h" #include "WriterFrontend.h" @@ -60,14 +61,25 @@ public: using namespace logging; +bool WriterBackend::WriterInfo::Read(SerializationFormat* fmt) + { + return fmt->Read(&path, "path"); + } + +bool WriterBackend::WriterInfo::Write(SerializationFormat* fmt) const + { + return fmt->Write(path, "path"); + } + WriterBackend::WriterBackend(WriterFrontend* arg_frontend) : MsgThread() { - path = ""; num_fields = 0; fields = 0; buffering = true; frontend = arg_frontend; + info.path = ""; + SetName(frontend->Name()); } @@ -108,17 +120,17 @@ void WriterBackend::DisableFrontend() SendOut(new DisableMessage(frontend)); } -bool WriterBackend::Init(string arg_path, int arg_num_fields, const Field* const* arg_fields) +bool WriterBackend::Init(const WriterInfo& arg_info, int arg_num_fields, const Field* const* arg_fields) { - path = arg_path; + info = arg_info; num_fields = arg_num_fields; fields = arg_fields; - string name = Fmt("%s/%s", path.c_str(), frontend->Name().c_str()); + string name = Fmt("%s/%s", info.path.c_str(), frontend->Name().c_str()); SetName(name); - if ( ! DoInit(arg_path, arg_num_fields, arg_fields) ) + if ( ! DoInit(arg_info, arg_num_fields, arg_fields) ) { DisableFrontend(); return false; diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 1269976aee..30e1995430 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -5,12 +5,14 @@ #ifndef LOGGING_WRITERBACKEND_H #define LOGGING_WRITERBACKEND_H -#include "Manager.h" - #include "threading/MsgThread.h" +class RemoteSerializer; + namespace logging { +class WriterFrontend; + /** * Base class for writer implementation. When the logging::Manager creates a * new logging filter, it instantiates a WriterFrontend. That then in turn @@ -41,21 +43,39 @@ public: */ virtual ~WriterBackend(); + /** + * A struct passing information to the writer at initialization time. + */ + struct WriterInfo + { + /** + * A string left to the interpretation of the writer + * implementation; it corresponds to the value configured on + * the script-level for the logging filter. + */ + string path; + + private: + friend class ::RemoteSerializer; + + // Note, these need to be adapted when changing the struct's + // fields. They serialize/deserialize the struct. + bool Read(SerializationFormat* fmt); + bool Write(SerializationFormat* fmt) const; + }; + /** * One-time initialization of the writer to define the logged fields. * - * @param path A string left to the interpretation of the writer - * implementation; it corresponds to the value configured on the - * script-level for the logging filter. - * - * @param num_fields The number of log fields for the stream. + * @param info Meta information for the writer. + * @param num_fields * * @param fields An array of size \a num_fields with the log fields. * The methods takes ownership of the array. * * @return False if an error occured. */ - bool Init(string path, int num_fields, const threading::Field* const* fields); + bool Init(const WriterInfo& info, int num_fields, const threading::Field* const* fields); /** * Writes one log entry. @@ -108,9 +128,9 @@ public: void DisableFrontend(); /** - * Returns the log path as passed into the constructor. + * Returns the additional writer information into the constructor. */ - const string Path() const { return path; } + const WriterInfo& Info() const { return info; } /** * Returns the number of log fields as passed into the constructor. @@ -185,7 +205,7 @@ protected: * disabled and eventually deleted. When returning false, an * implementation should also call Error() to indicate what happened. */ - virtual bool DoInit(string path, int num_fields, + virtual bool DoInit(const WriterInfo& info, int num_fields, const threading::Field* const* fields) = 0; /** @@ -299,7 +319,7 @@ private: // this class, it's running in a different thread! WriterFrontend* frontend; - string path; // Log path. + WriterInfo info; // Meta information as passed to Init(). int num_fields; // Number of log fields. const threading::Field* const* fields; // Log fields. bool buffering; // True if buffering is enabled. diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 33c9c04c63..6ad40757d6 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -2,6 +2,7 @@ #include "Net.h" #include "threading/SerialTypes.h" +#include "Manager.h" #include "WriterFrontend.h" #include "WriterBackend.h" @@ -15,14 +16,14 @@ namespace logging { class InitMessage : public threading::InputMessage { public: - InitMessage(WriterBackend* backend, const string path, const int num_fields, const Field* const* fields) + InitMessage(WriterBackend* backend, const WriterBackend::WriterInfo& info, const int num_fields, const Field* const* fields) : threading::InputMessage("Init", backend), - path(path), num_fields(num_fields), fields(fields) { } + info(info), num_fields(num_fields), fields(fields) { } - virtual bool Process() { return Object()->Init(path, num_fields, fields); } + virtual bool Process() { return Object()->Init(info, num_fields, fields); } private: - const string path; + WriterBackend::WriterInfo info; const int num_fields; const Field * const* fields; }; @@ -134,10 +135,10 @@ WriterFrontend::~WriterFrontend() string WriterFrontend::Name() const { - if ( path.size() ) + if ( info.path.size() ) return ty_name; - return ty_name + "/" + path; + return ty_name + "/" + info.path; } void WriterFrontend::Stop() @@ -149,7 +150,7 @@ void WriterFrontend::Stop() backend->Stop(); } -void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* const * arg_fields) +void WriterFrontend::Init(const WriterBackend::WriterInfo& arg_info, int arg_num_fields, const Field* const * arg_fields) { if ( disabled ) return; @@ -157,19 +158,19 @@ void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* cons if ( initialized ) reporter->InternalError("writer initialize twice"); - path = arg_path; + info = arg_info; num_fields = arg_num_fields; fields = arg_fields; initialized = true; if ( backend ) - backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields)); + backend->SendIn(new InitMessage(backend, arg_info, arg_num_fields, arg_fields)); if ( remote ) remote_serializer->SendLogCreateWriter(stream, writer, - arg_path, + arg_info, arg_num_fields, arg_fields); @@ -183,7 +184,7 @@ void WriterFrontend::Write(int num_fields, Value** vals) if ( remote ) remote_serializer->SendLogWrite(stream, writer, - path, + info.path, num_fields, vals); diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index b83250a5b8..8a0dce4645 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -3,13 +3,13 @@ #ifndef LOGGING_WRITERFRONTEND_H #define LOGGING_WRITERFRONTEND_H -#include "Manager.h" +#include "WriterBackend.h" #include "threading/MsgThread.h" namespace logging { -class WriterBackend; +class Manager; /** * Bridge class between the logging::Manager and backend writer threads. The @@ -68,7 +68,7 @@ public: * * This method must only be called from the main thread. */ - void Init(string path, int num_fields, const threading::Field* const* fields); + void Init(const WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const* fields); /** * Write out a record. @@ -169,9 +169,9 @@ public: bool Disabled() { return disabled; } /** - * Returns the log path as passed into the constructor. + * Returns the additional writer information as passed into the constructor. */ - const string Path() const { return path; } + const WriterBackend::WriterInfo& Info() const { return info; } /** * Returns the number of log fields as passed into the constructor. @@ -207,7 +207,7 @@ protected: bool remote; // True if loggin remotely. string ty_name; // Name of the backend type. Set by the manager. - string path; // The log path. + WriterBackend::WriterInfo info; // The writer information. int num_fields; // The number of log fields. const threading::Field* const* fields; // The log fields. diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 1e7a55c34c..6e5ceef678 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -69,8 +69,10 @@ bool Ascii::WriteHeaderField(const string& key, const string& val) return (fwrite(str.c_str(), str.length(), 1, file) == 1); } -bool Ascii::DoInit(string path, int num_fields, const Field* const * fields) +bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * fields) { + string path = info.path; + if ( output_to_stdout ) path = "/dev/stdout"; @@ -290,7 +292,7 @@ bool Ascii::DoWrite(int num_fields, const Field* const * fields, Value** vals) { if ( ! file ) - DoInit(Path(), NumFields(), Fields()); + DoInit(Info(), NumFields(), Fields()); desc.Clear(); @@ -320,7 +322,7 @@ bool Ascii::DoWrite(int num_fields, const Field* const * fields, bool Ascii::DoRotate(string rotated_path, double open, double close, bool terminating) { // Don't rotate special files or if there's not one currently open. - if ( ! file || IsSpecial(Path()) ) + if ( ! file || IsSpecial(Info().path) ) return true; fclose(file); diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index 6f507aff01..a95e644d49 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -19,7 +19,7 @@ public: static string LogExt(); protected: - virtual bool DoInit(string path, int num_fields, + virtual bool DoInit(const WriterInfo& info, int num_fields, const threading::Field* const* fields); virtual bool DoWrite(int num_fields, const threading::Field* const* fields, threading::Value** vals); diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 1d5a6ea4da..b34ea3412a 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -263,7 +263,7 @@ bool DataSeries::OpenLog(string path) return true; } -bool DataSeries::DoInit(string path, int num_fields, const threading::Field* const * fields) +bool DataSeries::DoInit(const WriterInfo& info, int num_fields, const threading::Field* const * fields) { // We first construct an XML schema thing (and, if ds_dump_schema is // set, dump it to path + ".ds.xml"). Assuming that goes well, we @@ -298,11 +298,11 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con schema_list.push_back(val); } - string schema = BuildDSSchemaFromFieldTypes(schema_list, path); + string schema = BuildDSSchemaFromFieldTypes(schema_list, info.path); if( ds_dump_schema ) { - FILE* pFile = fopen ( string(path + ".ds.xml").c_str() , "wb" ); + FILE* pFile = fopen ( string(info.path + ".ds.xml").c_str() , "wb" ); if( pFile ) { @@ -340,7 +340,7 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con log_type = log_types.registerTypePtr(schema); log_series.setType(log_type); - return OpenLog(path); + return OpenLog(info.path); } bool DataSeries::DoFlush() @@ -401,7 +401,7 @@ bool DataSeries::DoRotate(string rotated_path, double open, double close, bool t // size will be (much) larger. CloseLog(); - string dsname = Path() + ".ds"; + string dsname = Info().path + ".ds"; string nname = rotated_path + ".ds"; rename(dsname.c_str(), nname.c_str()); @@ -411,7 +411,7 @@ bool DataSeries::DoRotate(string rotated_path, double open, double close, bool t return false; } - return OpenLog(Path()); + return OpenLog(Info().path); } bool DataSeries::DoSetBuf(bool enabled) diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index 0d9ab67e95..0ae3572b76 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -26,7 +26,7 @@ public: protected: // Overidden from WriterBackend. - virtual bool DoInit(string path, int num_fields, + virtual bool DoInit(const WriterInfo& info, int num_fields, const threading::Field* const * fields); virtual bool DoWrite(int num_fields, const threading::Field* const* fields, diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index a9a7872f85..e133394722 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -6,9 +6,9 @@ using namespace writer; bool None::DoRotate(string rotated_path, double open, double close, bool terminating) { - if ( ! FinishedRotation(string("/dev/null"), Path(), open, close, terminating)) + if ( ! FinishedRotation(string("/dev/null"), Info().path, open, close, terminating)) { - Error(Fmt("error rotating %s", Path().c_str())); + Error(Fmt("error rotating %s", Info().path.c_str())); return false; } diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index cce48953d1..89ba690e09 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -18,7 +18,7 @@ public: { return new None(frontend); } protected: - virtual bool DoInit(string path, int num_fields, + virtual bool DoInit(const WriterInfo& info, int num_fields, const threading::Field* const * fields) { return true; } virtual bool DoWrite(int num_fields, const threading::Field* const* fields, From 19eea409c38fdefae7cd9113b23f9d7c6fc46285 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 21 Jun 2012 17:42:33 -0700 Subject: [PATCH 431/651] Extending the log writer DoInit() API. We now pass in a Info struct that contains: - the path name (as before) - the rotation interval - the log_rotate_base_time in seconds - a table of key/value pairs with further configuration options. To fill the table, log filters have a new field "config: table[string] of strings". This gives a way to pass arbitrary values from script-land to writers. Interpretation is left up to the writer. Also splits calc_next_rotate() into two functions, one of which is thread-safe and can be used with the log_rotate_base_time value from DoInit(). Includes also updates to the None writer: - It gets its own script writers/none.bro. - New bool option LogNone::debug to enable debug output. It then prints out all the values passed to DoInit(). That's used by a btest test to ensure the new DoInit() values are right. - Fixed a bug that prevented Bro from terminating.. (scripts.base.frameworks.logging.rotate-custom currently fails. Haven't yet investigated why.) --- scripts/base/frameworks/logging/__load__.bro | 1 + scripts/base/frameworks/logging/main.bro | 6 +++ .../base/frameworks/logging/writers/none.bro | 17 ++++++++ src/File.cc | 3 +- src/Val.cc | 1 + src/bro.bif | 4 +- src/logging.bif | 6 +++ src/logging/Manager.cc | 36 +++++++++++++++-- src/logging/WriterBackend.cc | 40 ++++++++++++++++++- src/logging/WriterBackend.h | 18 +++++++++ src/logging/writers/None.cc | 27 +++++++++++++ src/logging/writers/None.h | 4 +- src/util.cc | 25 +++++++----- src/util.h | 17 +++++++- .../output | 12 ++++++ .../base/frameworks/logging/none-debug.bro | 37 +++++++++++++++++ 16 files changed, 231 insertions(+), 23 deletions(-) create mode 100644 scripts/base/frameworks/logging/writers/none.bro create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.none-debug/output create mode 100644 testing/btest/scripts/base/frameworks/logging/none-debug.bro diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.bro index 17e03e2ef7..be44a7e34f 100644 --- a/scripts/base/frameworks/logging/__load__.bro +++ b/scripts/base/frameworks/logging/__load__.bro @@ -2,3 +2,4 @@ @load ./postprocessors @load ./writers/ascii @load ./writers/dataseries +@load ./writers/none diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index bec5f31dc6..9936ae44b1 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -138,6 +138,10 @@ export { ## Callback function to trigger for rotated files. If not set, the ## default comes out of :bro:id:`Log::default_rotation_postprocessors`. postprocessor: function(info: RotationInfo) : bool &optional; + + ## A key/value table that will be passed on the writer. + ## Interpretation of the values is left to the writer. + config: table[string] of string &default=table(); }; ## Sentinel value for indicating that a filter was not found when looked up. @@ -327,6 +331,8 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool { if ( info$writer in default_rotation_postprocessors ) return default_rotation_postprocessors[info$writer](info); + + return F; } function default_path_func(id: ID, path: string, rec: any) : string diff --git a/scripts/base/frameworks/logging/writers/none.bro b/scripts/base/frameworks/logging/writers/none.bro new file mode 100644 index 0000000000..22d83bd8ec --- /dev/null +++ b/scripts/base/frameworks/logging/writers/none.bro @@ -0,0 +1,17 @@ +##! Interface for the None log writer. Thiis writer is mainly for debugging. + +module LogNone; + +export { + ## If true, output some debugging output that can be useful for unit + ##testing the logging framework. + const debug = F &redef; +} + +function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool + { + return T; + } + +redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func }; + diff --git a/src/File.cc b/src/File.cc index 8b432f4428..20e845c09f 100644 --- a/src/File.cc +++ b/src/File.cc @@ -572,8 +572,9 @@ void BroFile::InstallRotateTimer() const char* base_time = log_rotate_base_time ? log_rotate_base_time->AsString()->CheckString() : 0; + double base = parse_rotate_base_time(base_time); double delta_t = - calc_next_rotate(rotate_interval, base_time); + calc_next_rotate(network_time, rotate_interval, base); rotate_timer = new RotateTimer(network_time + delta_t, this, true); } diff --git a/src/Val.cc b/src/Val.cc index 32a3c367bb..8a8c2b18c0 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -1651,6 +1651,7 @@ int TableVal::RemoveFrom(Val* val) const while ( (v = tbl->NextEntry(k, c)) ) { Val* index = RecoverIndex(k); + Unref(index); Unref(t->Delete(k)); delete k; diff --git a/src/bro.bif b/src/bro.bif index 1feccb8639..b1f33c9c46 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -4814,7 +4814,9 @@ function calc_next_rotate%(i: interval%) : interval %{ const char* base_time = log_rotate_base_time ? log_rotate_base_time->AsString()->CheckString() : 0; - return new Val(calc_next_rotate(i, base_time), TYPE_INTERVAL); + + double base = parse_rotate_base_time(base_time); + return new Val(calc_next_rotate(network_time, i, base), TYPE_INTERVAL); %} ## Returns the size of a given file. diff --git a/src/logging.bif b/src/logging.bif index efc6ed0b4b..d25e89c33c 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -81,3 +81,9 @@ const extent_size: count; const dump_schema: bool; const use_integer_for_time: bool; const num_threads: count; + +# Options for the None writer. + +module LogNone; + +const debug: bool; diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index b30ee26534..23b6f070a1 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -51,6 +51,7 @@ struct Manager::Filter { string path; Val* path_val; EnumVal* writer; + TableVal* config; bool local; bool remote; double interval; @@ -519,6 +520,7 @@ bool Manager::AddFilter(EnumVal* id, RecordVal* fval) Val* log_remote = fval->LookupWithDefault(rtype->FieldOffset("log_remote")); Val* interv = fval->LookupWithDefault(rtype->FieldOffset("interv")); Val* postprocessor = fval->LookupWithDefault(rtype->FieldOffset("postprocessor")); + Val* config = fval->LookupWithDefault(rtype->FieldOffset("config")); Filter* filter = new Filter; filter->name = name->AsString()->CheckString(); @@ -530,6 +532,7 @@ bool Manager::AddFilter(EnumVal* id, RecordVal* fval) filter->remote = log_remote->AsBool(); filter->interval = interv->AsInterval(); filter->postprocessor = postprocessor ? postprocessor->AsFunc() : 0; + filter->config = config->Ref()->AsTableVal(); Unref(name); Unref(pred); @@ -538,6 +541,7 @@ bool Manager::AddFilter(EnumVal* id, RecordVal* fval) Unref(log_remote); Unref(interv); Unref(postprocessor); + Unref(config); // Build the list of fields that the filter wants included, including // potentially rolling out fields. @@ -768,6 +772,22 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) WriterBackend::WriterInfo info; info.path = path; + HashKey* k; + IterCookie* c = filter->config->AsTable()->InitForIteration(); + + TableEntryVal* v; + while ( (v = filter->config->AsTable()->NextEntry(k, c)) ) + { + ListVal* index = filter->config->RecoverIndex(k); + string key = index->Index(0)->AsString()->CheckString(); + string value = v->Value()->AsString()->CheckString(); + info.config.insert(std::make_pair(key, value)); + Unref(index); + delete k; + } + + // CreateWriter() will set the other fields in info. + writer = CreateWriter(stream->id, filter->writer, info, filter->num_fields, arg_fields, filter->local, filter->remote); @@ -777,7 +797,6 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) Unref(columns); return false; } - } // Alright, can do the write now. @@ -977,8 +996,6 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer WriterFrontend* writer_obj = new WriterFrontend(id, writer, local, remote); assert(writer_obj); - writer_obj->Init(info, num_fields, fields); - WriterInfo* winfo = new WriterInfo; winfo->type = writer->Ref()->AsEnumVal(); winfo->writer = writer_obj; @@ -1020,6 +1037,16 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer Stream::WriterMap::value_type(Stream::WriterPathPair(writer->AsEnum(), info.path), winfo)); + // Still need to set the WriterInfo's rotation parameters, which we + // computed above. + const char* base_time = log_rotate_base_time ? + log_rotate_base_time->AsString()->CheckString() : 0; + + winfo->info.rotation_interval = winfo->interval; + winfo->info.rotation_base = parse_rotate_base_time(base_time); + + writer_obj->Init(winfo->info, num_fields, fields); + return writer_obj; } @@ -1223,8 +1250,9 @@ void Manager::InstallRotationTimer(WriterInfo* winfo) const char* base_time = log_rotate_base_time ? log_rotate_base_time->AsString()->CheckString() : 0; + double base = parse_rotate_base_time(base_time); double delta_t = - calc_next_rotate(rotation_interval, base_time); + calc_next_rotate(network_time, rotation_interval, base); winfo->rotation_timer = new RotationTimer(network_time + delta_t, winfo, true); diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 35bb27d27b..a31b0ebc0f 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -63,12 +63,48 @@ using namespace logging; bool WriterBackend::WriterInfo::Read(SerializationFormat* fmt) { - return fmt->Read(&path, "path"); + int size; + + if ( ! (fmt->Read(&path, "path") && + fmt->Read(&rotation_base, "rotation_base") && + fmt->Read(&rotation_interval, "rotation_interval") && + fmt->Read(&size, "config_size")) ) + return false; + + config.clear(); + + while ( size ) + { + string value; + string key; + + if ( ! (fmt->Read(&value, "config-value") && fmt->Read(&value, "config-key")) ) + return false; + + config.insert(std::make_pair(value, key)); + } + + return true; } + bool WriterBackend::WriterInfo::Write(SerializationFormat* fmt) const { - return fmt->Write(path, "path"); + int size = config.size(); + + if ( ! (fmt->Write(path, "path") && + fmt->Write(rotation_base, "rotation_base") && + fmt->Write(rotation_interval, "rotation_interval") && + fmt->Write(size, "config_size")) ) + return false; + + for ( config_map::const_iterator i = config.begin(); i != config.end(); ++i ) + { + if ( ! (fmt->Write(i->first, "config-value") && fmt->Write(i->second, "config-key")) ) + return false; + } + + return true; } WriterBackend::WriterBackend(WriterFrontend* arg_frontend) : MsgThread() diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 30e1995430..84c43818a6 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -48,6 +48,8 @@ public: */ struct WriterInfo { + typedef std::map config_map; + /** * A string left to the interpretation of the writer * implementation; it corresponds to the value configured on @@ -55,6 +57,22 @@ public: */ string path; + /** + * The rotation interval as configured for this writer. + */ + double rotation_interval; + + /** + * The parsed value of log_rotate_base_time in seconds. + */ + double rotation_base; + + /** + * A map of key/value pairs corresponding to the relevant + * filter's "config" table. + */ + std::map config; + private: friend class ::RemoteSerializer; diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index e133394722..acf9355cf7 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -1,9 +1,36 @@ #include "None.h" +#include "NetVar.h" using namespace logging; using namespace writer; +bool None::DoInit(const WriterInfo& info, int num_fields, + const threading::Field* const * fields) + { + if ( BifConst::LogNone::debug ) + { + std::cout << "[logging::writer::None]" << std::endl; + std::cout << " path=" << info.path << std::endl; + std::cout << " rotation_interval=" << info.rotation_interval << std::endl; + std::cout << " rotation_base=" << info.rotation_base << std::endl; + + for ( std::map::const_iterator i = info.config.begin(); i != info.config.end(); i++ ) + std::cout << " config[" << i->first << "] = " << i->second << std::endl; + + for ( int i = 0; i < num_fields; i++ ) + { + const threading::Field* field = fields[i]; + std::cout << " field " << field->name << ": " + << type_name(field->type) << std::endl; + } + + std::cout << std::endl; + } + + return true; + } + bool None::DoRotate(string rotated_path, double open, double close, bool terminating) { if ( ! FinishedRotation(string("/dev/null"), Info().path, open, close, terminating)) diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index 89ba690e09..7e2e4ef4eb 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -19,7 +19,7 @@ public: protected: virtual bool DoInit(const WriterInfo& info, int num_fields, - const threading::Field* const * fields) { return true; } + const threading::Field* const * fields); virtual bool DoWrite(int num_fields, const threading::Field* const* fields, threading::Value** vals) { return true; } @@ -27,7 +27,7 @@ protected: virtual bool DoRotate(string rotated_path, double open, double close, bool terminating); virtual bool DoFlush() { return true; } - virtual bool DoFinish() { return true; } + virtual bool DoFinish() { WriterBackend::DoFinish(); return true; } }; } diff --git a/src/util.cc b/src/util.cc index 798be400d1..16df52b987 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1082,18 +1082,8 @@ const char* log_file_name(const char* tag) return fmt("%s.%s", tag, (env ? env : "log")); } -double calc_next_rotate(double interval, const char* rotate_base_time) +double parse_rotate_base_time(const char* rotate_base_time) { - double current = network_time; - - // Calculate start of day. - time_t teatime = time_t(current); - - struct tm t; - t = *localtime(&teatime); - t.tm_hour = t.tm_min = t.tm_sec = 0; - double startofday = mktime(&t); - double base = -1; if ( rotate_base_time && rotate_base_time[0] != '\0' ) @@ -1105,6 +1095,19 @@ double calc_next_rotate(double interval, const char* rotate_base_time) base = t.tm_min * 60 + t.tm_hour * 60 * 60; } + return base; + } + +double calc_next_rotate(double current, double interval, double base) + { + // Calculate start of day. + time_t teatime = time_t(current); + + struct tm t; + t = *localtime_r(&teatime, &t); + t.tm_hour = t.tm_min = t.tm_sec = 0; + double startofday = mktime(&t); + if ( base < 0 ) // No base time given. To get nice timestamps, we round // the time up to the next multiple of the rotation interval. diff --git a/src/util.h b/src/util.h index 6b237edfd8..6ca584900c 100644 --- a/src/util.h +++ b/src/util.h @@ -193,9 +193,22 @@ extern FILE* rotate_file(const char* name, RecordVal* rotate_info); // This mimics the script-level function with the same name. const char* log_file_name(const char* tag); +// Parse a time string of the form "HH:MM" (as used for the rotation base +// time) into a double representing the number of seconds. Returns -1 if the +// string cannot be parsed. The function's result is intended to be used with +// calc_next_rotate(). +// +// This function is not thread-safe. +double parse_rotate_base_time(const char* rotate_base_time); + // Calculate the duration until the next time a file is to be rotated, based -// on the given rotate_interval and rotate_base_time. -double calc_next_rotate(double rotate_interval, const char* rotate_base_time); +// on the given rotate_interval and rotate_base_time. 'current' the the +// current time to be used as base, 'rotate_interval' the rotation interval, +// and 'base' the value returned by parse_rotate_base_time(). For the latter, +// if the function returned -1, that's fine, calc_next_rotate() handles that. +// +// This function is thread-safe. +double calc_next_rotate(double current, double rotate_interval, double base); // Terminates processing gracefully, similar to pressing CTRL-C. void terminate_processing(); diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.none-debug/output b/testing/btest/Baseline/scripts.base.frameworks.logging.none-debug/output new file mode 100644 index 0000000000..b2a8921c38 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.none-debug/output @@ -0,0 +1,12 @@ +[logging::writer::None] + path=ssh + rotation_interval=3600 + rotation_base=300 + config[foo] = bar + config[foo2] = bar2 + field id.orig_p: port + field id.resp_h: addr + field id.resp_p: port + field status: string + field country: string + diff --git a/testing/btest/scripts/base/frameworks/logging/none-debug.bro b/testing/btest/scripts/base/frameworks/logging/none-debug.bro new file mode 100644 index 0000000000..5d2e98323a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/none-debug.bro @@ -0,0 +1,37 @@ +# +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +redef Log::default_writer = Log::WRITER_NONE; +redef LogNone::debug = T; +redef Log::default_rotation_interval= 1hr; +redef log_rotate_base_time = "00:05"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event bro_init() +{ + local config: table[string] of string; + config["foo"]="bar"; + config["foo2"]="bar2"; + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::create_stream(SSH::LOG, [$columns=Log]); + + Log::remove_default_filter(SSH::LOG); + Log::add_filter(SSH::LOG, [$name="f1", $exclude=set("t", "id.orig_h"), $config=config]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); +} + From c3b9a2a29cca3b813865e389fa23dd7723fb0699 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 25 Jun 2012 13:09:16 -0500 Subject: [PATCH 432/651] Fix typo in NEWS. --- NEWS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 848a51acc5..ec00ae921a 100644 --- a/NEWS +++ b/NEWS @@ -28,7 +28,7 @@ New Functionality contain both IPv4 and IPv6 addresses. Support for the most common ICMPv6 message types has been added. Also, the FTP EPSV and EPRT commands are now handled properly. Internally, the way IP addresses - are stored internally has been improved, so Bro can handle both IPv4 + are stored has been improved, so Bro can handle both IPv4 and IPv6 by default without any special configuration. In addition to Bro itself, the other Bro components have also been From c7338a07311102940a0ddfaf3103ae93b9b26828 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 25 Jun 2012 14:54:15 -0700 Subject: [PATCH 433/651] for bug-searching: set frontend type before starting the thread. This means that the thread type will be output correctly in the error message. return errno string of pthread functions called in thread start --- src/input/Manager.cc | 2 +- src/logging/Manager.cc | 2 +- src/threading/BasicThread.cc | 19 ++++++++++++------- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 63fa59d0bc..9d2333ac9c 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -255,10 +255,10 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) assert(ir->factory); + frontend->ty_name = ir->name; ReaderBackend* backend = (*ir->factory)(frontend); assert(backend); - frontend->ty_name = ir->name; return backend; } diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index f0b5cc1748..e916922edc 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -191,10 +191,10 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) assert(ld->factory); + frontend->ty_name = ld->name; WriterBackend* backend = (*ld->factory)(frontend); assert(backend); - frontend->ty_name = ld->name; return backend; } diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index e590b13434..96d0d5efd2 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -80,18 +80,23 @@ const char* BasicThread::Fmt(const char* format, ...) void BasicThread::Start() { + int err; + if ( started ) return; - if ( pthread_mutex_init(&terminate, 0) != 0 ) - reporter->FatalError("Cannot create terminate mutex for thread %s", name.c_str()); + err = pthread_mutex_init(&terminate, 0); + if ( err != 0 ) + reporter->FatalError("Cannot create terminate mutex for thread %s:%s", name.c_str(), strerror(err)); // We use this like a binary semaphore and acquire it immediately. - if ( pthread_mutex_lock(&terminate) != 0 ) - reporter->FatalError("Cannot aquire terminate mutex for thread %s", name.c_str()); - - if ( pthread_create(&pthread, 0, BasicThread::launcher, this) != 0 ) - reporter->FatalError("Cannot create thread %s", name.c_str()); + err = pthread_mutex_lock(&terminate); + if ( err != 0 ) + reporter->FatalError("Cannot aquire terminate mutex for thread %s:%s", name.c_str(), strerror(err)); + + err = pthread_create(&pthread, 0, BasicThread::launcher, this); + if ( err != 0 ) + reporter->FatalError("Cannot create thread %s:%s", name.c_str(), strerror(err)); DBG_LOG(DBG_THREADING, "Started thread %s", name.c_str()); From 5ab2545ff3da7b210e368b81fff87c12614d6ab8 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 26 Jun 2012 11:03:15 -0500 Subject: [PATCH 434/651] Fix typos in NEWS for Bro 2.1 beta --- NEWS | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index ec00ae921a..4377049813 100644 --- a/NEWS +++ b/NEWS @@ -38,14 +38,14 @@ New Functionality - Bro now decapsulates tunnels via its new tunnel framework located in scripts/base/frameworks/tunnels. It currently supports Teredo, AYIYA, IP-in-IP (both IPv4 and IPv6), and SOCKS. For all these, it - logs the outher tunnel connections in both conn.log and tunnel.log, + logs the outer tunnel connections in both conn.log and tunnel.log, and then proceeds to analyze the inner payload as if it were not tunneled, including also logging that session in conn.log. For SOCKS, it generates a new socks.log in addition with more information. - Bro now features a flexible input framework that allows users to - integrate external information in real-time into Bro while it + integrate external information in real-time into Bro while it's processing network traffic. The most direct use-case at the moment is reading data from ASCII files into Bro tables, with updates picked up automatically when the file changes during runtime. See @@ -57,7 +57,7 @@ New Functionality - Bro's default ASCII log format is not exactly the most efficient way for storing and searching large volumes of data. An an alternative, - Bro nows comes with experimental support for DataSeries output, an + Bro now comes with experimental support for DataSeries output, an efficient binary format for recording structured bulk data. DataSeries is developed and maintained at HP Labs. See doc/logging-dataseries for more information. @@ -66,7 +66,7 @@ New Functionality Changed Functionality ~~~~~~~~~~~~~~~~~~~~~ -The following summarized the most important differences in existing +The following summarizes the most important differences in existing functionality. Note that this list is not complete, see CHANGES for the full set. @@ -100,7 +100,7 @@ the full set. a bunch of Bro threads. - We renamed the configure option --enable-perftools to - --enable-perftool-debug to indicate that the switch is only relevant + --enable-perftools-debug to indicate that the switch is only relevant for debugging the heap. - Bro's ICMP analyzer now handles both IPv4 and IPv6 messages with a @@ -110,8 +110,8 @@ the full set. - Log postprocessor scripts get an additional argument indicating the type of the log writer in use (e.g., "ascii"). -- BroControl's make-archive-name scripts also receives the writer - type, but as it's 2nd(!) argument. If you're using a custom version +- BroControl's make-archive-name script also receives the writer + type, but as its 2nd(!) argument. If you're using a custom version of that script, you need to adapt it. See the shipped version for details. From 94f0bf215783b7b529a7960da6bb463e4fe8c0cf Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 26 Jun 2012 14:55:36 -0500 Subject: [PATCH 435/651] Fix typos in event documentation Fix typos previously committed (but apparently overwritten later), and fix typos for new events. --- src/event.bif | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/src/event.bif b/src/event.bif index a924bf4888..705c66aa6b 100644 --- a/src/event.bif +++ b/src/event.bif @@ -157,7 +157,7 @@ event new_connection%(c: connection%); ## e: The new encapsulation. event tunnel_changed%(c: connection, e: EncapsulatingConnVector%); -## Generated when reassembly starts for a TCP connection. The event is raised +## Generated when reassembly starts for a TCP connection. This event is raised ## at the moment when Bro's TCP analyzer enables stream reassembly for a ## connection. ## @@ -522,7 +522,7 @@ event esp_packet%(p: pkt_hdr%); ## .. bro:see:: new_packet tcp_packet ipv6_ext_headers event mobile_ipv6_message%(p: pkt_hdr%); -## Genereated for any IPv6 packet encapsulated in a Teredo tunnel. +## Generated for any IPv6 packet encapsulated in a Teredo tunnel. ## See :rfc:`4380` for more information about the Teredo protocol. ## ## outer: The Teredo tunnel connection. @@ -532,10 +532,10 @@ event mobile_ipv6_message%(p: pkt_hdr%); ## .. bro:see:: teredo_authentication teredo_origin_indication teredo_bubble ## ## .. note:: Since this event may be raised on a per-packet basis, handling -## it may become particular expensive for real-time analysis. +## it may become particularly expensive for real-time analysis. event teredo_packet%(outer: connection, inner: teredo_hdr%); -## Genereated for IPv6 packets encapsulated in a Teredo tunnel that +## Generated for IPv6 packets encapsulated in a Teredo tunnel that ## use the Teredo authentication encapsulation method. ## See :rfc:`4380` for more information about the Teredo protocol. ## @@ -546,10 +546,10 @@ event teredo_packet%(outer: connection, inner: teredo_hdr%); ## .. bro:see:: teredo_packet teredo_origin_indication teredo_bubble ## ## .. note:: Since this event may be raised on a per-packet basis, handling -## it may become particular expensive for real-time analysis. +## it may become particularly expensive for real-time analysis. event teredo_authentication%(outer: connection, inner: teredo_hdr%); -## Genereated for IPv6 packets encapsulated in a Teredo tunnel that +## Generated for IPv6 packets encapsulated in a Teredo tunnel that ## use the Teredo origin indication encapsulation method. ## See :rfc:`4380` for more information about the Teredo protocol. ## @@ -560,10 +560,10 @@ event teredo_authentication%(outer: connection, inner: teredo_hdr%); ## .. bro:see:: teredo_packet teredo_authentication teredo_bubble ## ## .. note:: Since this event may be raised on a per-packet basis, handling -## it may become particular expensive for real-time analysis. +## it may become particularly expensive for real-time analysis. event teredo_origin_indication%(outer: connection, inner: teredo_hdr%); -## Genereated for Teredo bubble packets. That is, IPv6 packets encapsulated +## Generated for Teredo bubble packets. That is, IPv6 packets encapsulated ## in a Teredo tunnel that have a Next Header value of :bro:id:`IPPROTO_NONE`. ## See :rfc:`4380` for more information about the Teredo protocol. ## @@ -574,15 +574,15 @@ event teredo_origin_indication%(outer: connection, inner: teredo_hdr%); ## .. bro:see:: teredo_packet teredo_authentication teredo_origin_indication ## ## .. note:: Since this event may be raised on a per-packet basis, handling -## it may become particular expensive for real-time analysis. +## it may become particularly expensive for real-time analysis. event teredo_bubble%(outer: connection, inner: teredo_hdr%); -## Generated for every packet that has non-empty transport-layer payload. This is a -## very low-level and expensive event that should be avoided when at all possible. -## It's usually infeasible to handle when processing even medium volumes of -## traffic in real-time. It's even worse than :bro:id:`new_packet`. That said, if -## you work from a trace and want to do some packet-level analysis, it may come in -## handy. +## Generated for every packet that has a non-empty transport-layer payload. +## This is a very low-level and expensive event that should be avoided when +## at all possible. It's usually infeasible to handle when processing even +## medium volumes of traffic in real-time. It's even worse than +## :bro:id:`new_packet`. That said, if you work from a trace and want to +## do some packet-level analysis, it may come in handy. ## ## c: The connection the packet is part of. ## @@ -6216,13 +6216,12 @@ event signature_match%(state: signature_state, msg: string, data: string%); ## ## request_type: The type of the request. ## -## dstaddr: Address that the tunneled traffic should be sent to. -## -## dstname: DNS name of the host that the tunneled traffic should be sent to. +## sa: Address that the tunneled traffic should be sent to. ## ## p: The destination port for the proxied traffic. ## -## user: Username given for the SOCKS connection. This is not yet implemented for SOCKSv5. +## user: Username given for the SOCKS connection. This is not yet implemented +## for SOCKSv5. event socks_request%(c: connection, version: count, request_type: count, sa: SOCKS::Address, p: port, user: string%); ## Generated when a SOCKS reply is analyzed. @@ -6233,9 +6232,7 @@ event socks_request%(c: connection, version: count, request_type: count, sa: SOC ## ## reply: The status reply from the server. ## -## dstaddr: The address that the server sent the traffic to. -## -## dstname: The name the server sent the traffic to. Only applicable for SOCKSv5. +## sa: The address that the server sent the traffic to. ## ## p: The destination port for the proxied traffic. event socks_reply%(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port%); From 9ae9b2aa4dca3c7fe1c4cb310dac8563caa36700 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 26 Jun 2012 16:59:56 -0500 Subject: [PATCH 436/651] Extract ICMPv6 NDP options and include in ICMP events (addresses #833). This adds a new parameter of type "icmp6_nd_options" to the ICMPv6 neighbor discovery events (icmp_redirect, icmp_router_solicitation, icmp_router_advertisement, icmp_neighbor_solicitation, icmp_neighbor_advertisement) which includes data extracted from all neighbor discovery options (RFC 4861) that are present in the ICMPv6 message. --- scripts/base/init-bare.bro | 55 +++++ src/ICMP.cc | 197 +++++++++++++++--- src/ICMP.h | 5 +- src/event.bif | 20 +- .../Baseline/core.icmp.icmp6-events/output | 5 + .../core.icmp.icmp6-nd-options/output | 28 +++ .../btest/Traces/icmp/icmp6-nd-options.pcap | Bin 0 -> 2144 bytes .../Traces/icmp/icmp6-redirect-hdr-opt.pcap | Bin 0 -> 198 bytes testing/btest/core/icmp/icmp6-events.test | 15 +- testing/btest/core/icmp/icmp6-nd-options.test | 35 ++++ 10 files changed, 321 insertions(+), 39 deletions(-) create mode 100644 testing/btest/Baseline/core.icmp.icmp6-nd-options/output create mode 100644 testing/btest/Traces/icmp/icmp6-nd-options.pcap create mode 100644 testing/btest/Traces/icmp/icmp6-redirect-hdr-opt.pcap create mode 100644 testing/btest/core/icmp/icmp6-nd-options.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 3a323ad7fe..ec75c76beb 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -115,6 +115,61 @@ type icmp_context: record { DF: bool; ##< True if the packets *don't fragment* flag is set. }; +## Values extracted from a Prefix Information option in an ICMPv6 neighbor +## discovery message as specified by :rfc:`4861`. +## +## .. bro:see:: icmp6_nd_option +type icmp6_nd_prefix_info: record { + ## Number of leading bits of the *prefix* that are valid. + prefix_len: count; + ## Flag indicating the prefix can be used for on-link determination. + L_flag: bool; + ## Autonomous address-configuration flag. + A_flag: bool; + ## Length of time in seconds that the prefix is valid for purpose of + ## on-link determination (0xffffffff represents infinity). + valid_lifetime: interval; + ## Length of time in seconds that the addresses generated from the prefix + ## via stateless address autoconfiguration remain preferred + ## (0xffffffff represents infinity). + preferred_lifetime: interval; + ## An IP address or prefix of an IP address. Use the *prefix_len* field + ## to convert this into a :bro:type:`subnet`. + prefix: addr; +}; + +## Options extracted from ICMPv6 neighbor discovery messages as specified +## by :rfc:`4861`. +## +## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_advertisement icmp_neighbor_solicitation icmp_redirect +## icmp6_nd_options +type icmp6_nd_option: record { + ## 8-bit identifier of the type of option. + otype: count; + ## 8-bit integer representing the length of the option (including the type + ## and length fields) in units of 8 octets. + len: count; + ## Source Link-Layer Address (Type 1) or Target Link-Layer Address (Type 2). + ## Byte ordering of this is dependent on the actual link-layer. + link_address: string &optional; + ## Prefix Information (Type 3). + prefix: icmp6_nd_prefix_info &optional; + ## Redirected header (Type 4). This field contains the context of the + ## original, redirected packet. + redirect: icmp_context &optional; + ## Recommended MTU for the link (Type 5). + mtu: count &optional; + ## The raw data of the option (everything after type & length fields), + ## useful for unknown option types or when the full option payload is + ## truncated in the captured packet. In those cases, option fields + ## won't be pre-extracted into the fields above. + payload: string &optional; +}; + +## A type alias for a vector of ICMPv6 neighbor discovery message options. +type icmp6_nd_options: vector of icmp6_nd_option; + # A DNS mapping between IP address and hostname resolved by Bro's internal # resolver. # diff --git a/src/ICMP.cc b/src/ICMP.cc index b8ddb8a292..5531d6ee45 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -169,8 +169,10 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c NeighborSolicit(t, icmpp, len, caplen, data, ip_hdr); break; case ND_ROUTER_SOLICIT: + RouterSolicit(t, icmpp, len, caplen, data, ip_hdr); + break; case ICMP6_ROUTER_RENUMBERING: - Router(t, icmpp, len, caplen, data, ip_hdr); + ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); break; #if 0 @@ -515,10 +517,12 @@ void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_router_advertisement; - uint32 reachable, retrans; + uint32 reachable = 0, retrans = 0; - memcpy(&reachable, data, sizeof(reachable)); - memcpy(&retrans, data + sizeof(reachable), sizeof(retrans)); + if ( caplen >= (int)sizeof(reachable) ) + memcpy(&reachable, data, sizeof(reachable)); + if ( caplen >= (int)sizeof(reachable) + (int)sizeof(retrans) ) + memcpy(&retrans, data + sizeof(reachable), sizeof(retrans)); val_list* vl = new val_list; vl->append(BuildConnVal()); @@ -534,6 +538,9 @@ void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, vl->append(new IntervalVal((double)ntohl(reachable), Milliseconds)); vl->append(new IntervalVal((double)ntohl(retrans), Milliseconds)); + int opt_offset = sizeof(reachable) + sizeof(retrans); + vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); + ConnectionEvent(f, vl); } @@ -542,9 +549,10 @@ void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_advertisement; - in6_addr tgtaddr; + IPAddr tgtaddr; - memcpy(&tgtaddr.s6_addr, data, sizeof(tgtaddr.s6_addr)); + if ( caplen >= (int)sizeof(in6_addr) ) + tgtaddr = IPAddr(*((const in6_addr*)data)); val_list* vl = new val_list; vl->append(BuildConnVal()); @@ -552,7 +560,10 @@ void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, vl->append(new Val(icmpp->icmp_num_addrs & 0x80, TYPE_BOOL)); // Router vl->append(new Val(icmpp->icmp_num_addrs & 0x40, TYPE_BOOL)); // Solicited vl->append(new Val(icmpp->icmp_num_addrs & 0x20, TYPE_BOOL)); // Override - vl->append(new AddrVal(IPAddr(tgtaddr))); + vl->append(new AddrVal(tgtaddr)); + + int opt_offset = sizeof(in6_addr); + vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); ConnectionEvent(f, vl); } @@ -562,14 +573,18 @@ void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_solicitation; - in6_addr tgtaddr; + IPAddr tgtaddr; - memcpy(&tgtaddr.s6_addr, data, sizeof(tgtaddr.s6_addr)); + if ( caplen >= (int)sizeof(in6_addr) ) + tgtaddr = IPAddr(*((const in6_addr*)data)); val_list* vl = new val_list; vl->append(BuildConnVal()); vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(new AddrVal(IPAddr(tgtaddr))); + vl->append(new AddrVal(tgtaddr)); + + int opt_offset = sizeof(in6_addr); + vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); ConnectionEvent(f, vl); } @@ -579,40 +594,35 @@ void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_redirect; - in6_addr tgtaddr, dstaddr; + IPAddr tgtaddr, dstaddr; - memcpy(&tgtaddr.s6_addr, data, sizeof(tgtaddr.s6_addr)); - memcpy(&dstaddr.s6_addr, data + sizeof(tgtaddr.s6_addr), sizeof(dstaddr.s6_addr)); + if ( caplen >= (int)sizeof(in6_addr) ) + tgtaddr = IPAddr(*((const in6_addr*)data)); + if ( caplen >= 2 * (int)sizeof(in6_addr) ) + dstaddr = IPAddr(*((const in6_addr*)(data + sizeof(in6_addr)))); val_list* vl = new val_list; vl->append(BuildConnVal()); vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(new AddrVal(IPAddr(tgtaddr))); - vl->append(new AddrVal(IPAddr(dstaddr))); + vl->append(new AddrVal(tgtaddr)); + vl->append(new AddrVal(dstaddr)); + + int opt_offset = 2 * sizeof(in6_addr); + vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); ConnectionEvent(f, vl); } -void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, +void ICMP_Analyzer::RouterSolicit(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { - EventHandlerPtr f = 0; - - switch ( icmpp->icmp_type ) - { - case ND_ROUTER_SOLICIT: - f = icmp_router_solicitation; - break; - case ICMP6_ROUTER_RENUMBERING: - default: - ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); - return; - } + EventHandlerPtr f = icmp_router_solicitation; val_list* vl = new val_list; vl->append(BuildConnVal()); vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); + vl->append(BuildNDOptionsVal(caplen, data)); ConnectionEvent(f, vl); } @@ -685,6 +695,137 @@ void ICMP_Analyzer::Context6(double t, const struct icmp* icmpp, } } +VectorVal* ICMP_Analyzer::BuildNDOptionsVal(int caplen, const u_char* data) + { + static RecordType* icmp6_nd_option_type = 0; + static RecordType* icmp6_nd_prefix_info_type = 0; + if ( ! icmp6_nd_option_type ) + { + icmp6_nd_option_type = internal_type("icmp6_nd_option")->AsRecordType(); + icmp6_nd_prefix_info_type = + internal_type("icmp6_nd_prefix_info")->AsRecordType(); + } + + VectorVal* vv = new VectorVal( + internal_type("icmp6_nd_options")->AsVectorType()); + + while ( caplen > 0 ) + { + // Must have at least type & length to continue parsing options. + if ( caplen < 2 ) + { + Weird("truncated_ICMPv6_ND_options"); + break; + } + + uint8 type = *((const uint8*)data); + uint8 length = *((const uint8*)(data + 1)); + + if ( length == 0 ) + { + Weird("zero_length_ICMPv6_ND_option"); + break; + } + + RecordVal* rv = new RecordVal(icmp6_nd_option_type); + rv->Assign(0, new Val(type, TYPE_COUNT)); + rv->Assign(1, new Val(length, TYPE_COUNT)); + + // adjust length to be in units of bytes, exclude type/length fields + length = length * 8 - 2; + + data += 2; + caplen -= 2; + + bool set_payload_field = false; + // Only parse out known options that are there in full. + switch ( type ) { + case 1: + case 2: + // Source/Target Link-layer Address option + { + if ( caplen >= length ) + { + BroString* link_addr = new BroString(data, length, 0); + rv->Assign(2, new StringVal(link_addr)); + } + else + set_payload_field = true; + } + break; + + case 3: + // Prefix Information option + { + if ( caplen >= 30 ) + { + RecordVal* info = new RecordVal(icmp6_nd_prefix_info_type); + uint8 prefix_len = *((const uint8*)(data)); + bool L_flag = (*((const uint8*)(data + 1)) & 0x80) != 0; + bool A_flag = (*((const uint8*)(data + 1)) & 0x40) != 0; + uint32 valid_life = *((const uint32*)(data + 2)); + uint32 prefer_life = *((const uint32*)(data + 6)); + in6_addr prefix = *((const in6_addr*)(data + 14)); + info->Assign(0, new Val(prefix_len, TYPE_COUNT)); + info->Assign(1, new Val(L_flag, TYPE_BOOL)); + info->Assign(2, new Val(A_flag, TYPE_BOOL)); + info->Assign(3, new IntervalVal((double)ntohl(valid_life), Seconds)); + info->Assign(4, new IntervalVal((double)ntohl(prefer_life), Seconds)); + info->Assign(5, new AddrVal(IPAddr(prefix))); + rv->Assign(3, info); + } + else + set_payload_field = true; + } + break; + + case 4: + // Redirected Header option + { + if ( caplen >= length ) + { + const u_char* hdr = data + 6; + rv->Assign(4, ExtractICMP6Context(length - 6, hdr)); + } + else + set_payload_field = true; + } + break; + + case 5: + // MTU option + { + if ( caplen >= 6 ) + rv->Assign(5, new Val(ntohl(*((const uint32*)(data + 2))), + TYPE_COUNT)); + else + set_payload_field = true; + } + break; + + default: + { + set_payload_field = true; + } + break; + } + + if ( set_payload_field ) + { + BroString* payload = + new BroString(data, min((int)length, caplen), 0); + rv->Assign(6, new StringVal(payload)); + } + + data += length; + caplen -= length; + + vv->Assign(vv->Size(), rv, 0); + } + + return vv; + } + int ICMP4_counterpart(int icmp_type, int icmp_code, bool& is_one_way) { is_one_way = false; diff --git a/src/ICMP.h b/src/ICMP.h index 33773b9762..1e30b7ff54 100644 --- a/src/ICMP.h +++ b/src/ICMP.h @@ -48,7 +48,7 @@ protected: int caplen, const u_char*& data, const IP_Hdr* ip_hdr); void NeighborSolicit(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); - void Router(double t, const struct icmp* icmpp, int len, + void RouterSolicit(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); void Describe(ODesc* d) const; @@ -75,6 +75,9 @@ protected: void Context6(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); + // RFC 4861 Neighbor Discover message options + VectorVal* BuildNDOptionsVal(int caplen, const u_char* data); + RecordVal* icmp_conn_val; int type; int code; diff --git a/src/event.bif b/src/event.bif index a924bf4888..e0d1c2e1c6 100644 --- a/src/event.bif +++ b/src/event.bif @@ -1054,9 +1054,11 @@ event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, conte ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## +## options: Any Neighbor Discovery options included with message (:rfc:`4861`). +## ## .. bro:see:: icmp_router_advertisement ## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect -event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); +event icmp_router_solicitation%(c: connection, icmp: icmp_conn, options: icmp6_nd_options%); ## Generated for ICMP *router advertisement* messages. ## @@ -1090,9 +1092,11 @@ event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); ## ## retrans_timer: How long a host should wait before retransmitting. ## +## options: Any Neighbor Discovery options included with message (:rfc:`4861`). +## ## .. bro:see:: icmp_router_solicitation ## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect -event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval%); +event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval, options: icmp6_nd_options%); ## Generated for ICMP *neighbor solicitation* messages. ## @@ -1107,9 +1111,11 @@ event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: ## ## tgt: The IP address of the target of the solicitation. ## +## options: Any Neighbor Discovery options included with message (:rfc:`4861`). +## ## .. bro:see:: icmp_router_solicitation icmp_router_advertisement ## icmp_neighbor_advertisement icmp_redirect -event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); +event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt: addr, options: icmp6_nd_options%); ## Generated for ICMP *neighbor advertisement* messages. ## @@ -1131,9 +1137,11 @@ event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); ## tgt: the Target Address in the soliciting message or the address whose ## link-layer address has changed for unsolicited adverts. ## +## options: Any Neighbor Discovery options included with message (:rfc:`4861`). +## ## .. bro:see:: icmp_router_solicitation icmp_router_advertisement ## icmp_neighbor_solicitation icmp_redirect -event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt:addr%); +event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt: addr, options: icmp6_nd_options%); ## Generated for ICMP *redirect* messages. ## @@ -1151,9 +1159,11 @@ event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, ## ## dest: The address of the destination which is redirected to the target. ## +## options: Any Neighbor Discovery options included with message (:rfc:`4861`). +## ## .. bro:see:: icmp_router_solicitation icmp_router_advertisement ## icmp_neighbor_solicitation icmp_neighbor_advertisement -event icmp_redirect%(c: connection, icmp: icmp_conn, tgt: addr, dest: addr%); +event icmp_redirect%(c: connection, icmp: icmp_conn, tgt: addr, dest: addr, options: icmp6_nd_options%); ## Generated when a TCP connection terminated, passing on statistics about the ## two endpoints. This event is always generated when Bro flushes the internal diff --git a/testing/btest/Baseline/core.icmp.icmp6-events/output b/testing/btest/Baseline/core.icmp.icmp6-events/output index 81075b716a..fdb58e5be1 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-events/output +++ b/testing/btest/Baseline/core.icmp.icmp6-events/output @@ -41,6 +41,7 @@ icmp_echo_reply (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) icmp_redirect (tgt=fe80::cafe, dest=fe80::babe) conn_id: [orig_h=fe80::dead, orig_p=137/icmp, resp_h=fe80::beef, resp_p=0/icmp] icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, hlim=255, v6=T] + options: [] icmp_router_advertisement cur_hop_limit=13 managed=T @@ -54,15 +55,19 @@ icmp_router_advertisement retrans_timer=1.0 sec 300.0 msecs conn_id: [orig_h=fe80::dead, orig_p=134/icmp, resp_h=fe80::beef, resp_p=133/icmp] icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, hlim=255, v6=T] + options: [] icmp_neighbor_advertisement (tgt=fe80::babe) router=T solicited=F override=T conn_id: [orig_h=fe80::dead, orig_p=136/icmp, resp_h=fe80::beef, resp_p=135/icmp] icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, hlim=255, v6=T] + options: [] icmp_router_solicitation conn_id: [orig_h=fe80::dead, orig_p=133/icmp, resp_h=fe80::beef, resp_p=134/icmp] icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, hlim=255, v6=T] + options: [] icmp_neighbor_solicitation (tgt=fe80::babe) conn_id: [orig_h=fe80::dead, orig_p=135/icmp, resp_h=fe80::beef, resp_p=136/icmp] icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, hlim=255, v6=T] + options: [] diff --git a/testing/btest/Baseline/core.icmp.icmp6-nd-options/output b/testing/btest/Baseline/core.icmp.icmp6-nd-options/output new file mode 100644 index 0000000000..1a3958f32d --- /dev/null +++ b/testing/btest/Baseline/core.icmp.icmp6-nd-options/output @@ -0,0 +1,28 @@ +icmp_redirect options + [otype=4, len=8, link_address=, prefix=, redirect=[id=[orig_h=fe80::aaaa, orig_p=30000/udp, resp_h=fe80::bbbb, resp_p=13000/udp], len=56, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F], mtu=, payload=] +icmp_neighbor_advertisement options + [otype=2, len=1, link_address=\xc2\0T\xf5\0\0, prefix=, redirect=, mtu=, payload=] + MAC: c20054f50000 +icmp_router_advertisement options + [otype=1, len=1, link_address=\xc2\0T\xf5\0\0, prefix=, redirect=, mtu=, payload=] + MAC: c20054f50000 + [otype=5, len=1, link_address=, prefix=, redirect=, mtu=1500, payload=] + [otype=3, len=4, link_address=, prefix=[prefix_len=64, L_flag=T, A_flag=T, valid_lifetime=30.0 days, preferred_lifetime=7.0 days, prefix=2001:db8:0:1::], redirect=, mtu=, payload=] +icmp_neighbor_advertisement options + [otype=2, len=1, link_address=\xc2\0T\xf5\0\0, prefix=, redirect=, mtu=, payload=] + MAC: c20054f50000 +icmp_router_advertisement options + [otype=1, len=1, link_address=\xc2\0T\xf5\0\0, prefix=, redirect=, mtu=, payload=] + MAC: c20054f50000 + [otype=5, len=1, link_address=, prefix=, redirect=, mtu=1500, payload=] + [otype=3, len=4, link_address=, prefix=[prefix_len=64, L_flag=T, A_flag=T, valid_lifetime=30.0 days, preferred_lifetime=7.0 days, prefix=2001:db8:0:1::], redirect=, mtu=, payload=] +icmp_router_advertisement options + [otype=1, len=1, link_address=\xc2\0T\xf5\0\0, prefix=, redirect=, mtu=, payload=] + MAC: c20054f50000 + [otype=5, len=1, link_address=, prefix=, redirect=, mtu=1500, payload=] + [otype=3, len=4, link_address=, prefix=[prefix_len=64, L_flag=T, A_flag=T, valid_lifetime=30.0 days, preferred_lifetime=7.0 days, prefix=2001:db8:0:1::], redirect=, mtu=, payload=] +icmp_router_advertisement options + [otype=1, len=1, link_address=\xc2\0T\xf5\0\0, prefix=, redirect=, mtu=, payload=] + MAC: c20054f50000 + [otype=5, len=1, link_address=, prefix=, redirect=, mtu=1500, payload=] + [otype=3, len=4, link_address=, prefix=[prefix_len=64, L_flag=T, A_flag=T, valid_lifetime=30.0 days, preferred_lifetime=7.0 days, prefix=2001:db8:0:1::], redirect=, mtu=, payload=] diff --git a/testing/btest/Traces/icmp/icmp6-nd-options.pcap b/testing/btest/Traces/icmp/icmp6-nd-options.pcap new file mode 100644 index 0000000000000000000000000000000000000000..1103d9bf9c853e79ba52b6d3addd9ad609dd2864 GIT binary patch literal 2144 zcmca|c+)~A1{MZ5P+(wS1ah(yHhKKiV?K(g&_9#Diq!sW_AouLe<286NL;y~DzHijer92nR+fChj8Bh++OP%tsD z-eG2OIKZIZ%fP^C)c_JvVC3Baq`HpA)H!`OoK*&cLGyB-`#L07H>M0g+2VTo6DCaYm4OSiWm!cn-?d z81f)nd>(Kz*a7807@WJAP;BAATTX%uWol)p2Ad8ttp#WrQmKpXW5t7JW#Pb;hagu-u4hD!7l8^rln^5nFF^H(c$uIE@+(Tb0p&rq z)B$Zl3Q2TZ2!$k2eLKUR=OEKz`SS1dpC490O<#*@I$815`5oDGSUgPzx_#ell$49^ tc3=ph$1ZvZAVNr51!N1l{1l)q+dP<%;{n|dglqw-N7z!d4Z{|oJOFqZl^g&7 literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/icmp/icmp6-redirect-hdr-opt.pcap b/testing/btest/Traces/icmp/icmp6-redirect-hdr-opt.pcap new file mode 100644 index 0000000000000000000000000000000000000000..d05351910849141fd8f407973a86bd7340bd3406 GIT binary patch literal 198 zcmca|c+)~A1{MYw_+QV!zzF1o>%8)Rw~>iq9*_;f|G@yFs_kw9h>>CS|6c=$0R#8e zqVo5>?__v<3?z#xe(E18f7d=14u}~EAWA^c0abq0DpdaN-K7RbCl~}K^F%-a0Iz>6 AssI20 literal 0 HcmV?d00001 diff --git a/testing/btest/core/icmp/icmp6-events.test b/testing/btest/core/icmp/icmp6-events.test index 052ba91ee6..5263dd6e7f 100644 --- a/testing/btest/core/icmp/icmp6-events.test +++ b/testing/btest/core/icmp/icmp6-events.test @@ -66,11 +66,12 @@ event icmp_parameter_problem(c: connection, icmp: icmp_conn, code: count, contex print " icmp_context: " + fmt("%s", context); } -event icmp_redirect(c: connection, icmp: icmp_conn, tgt: addr, dest: addr) +event icmp_redirect(c: connection, icmp: icmp_conn, tgt: addr, dest: addr, options: icmp6_nd_options) { print "icmp_redirect (tgt=" + fmt("%s", tgt) + ", dest=" + fmt("%s", dest) + ")"; print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); + print " options: " + fmt("%s", options); } event icmp_error_message(c: connection, icmp: icmp_conn, code: count, context: icmp_context) @@ -81,14 +82,15 @@ event icmp_error_message(c: connection, icmp: icmp_conn, code: count, context: i print " icmp_context: " + fmt("%s", context); } -event icmp_neighbor_solicitation(c: connection, icmp: icmp_conn, tgt: addr) +event icmp_neighbor_solicitation(c: connection, icmp: icmp_conn, tgt: addr, options: icmp6_nd_options) { print "icmp_neighbor_solicitation (tgt=" + fmt("%s", tgt) + ")"; print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); + print " options: " + fmt("%s", options); } -event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt: addr) +event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt: addr, options: icmp6_nd_options) { print "icmp_neighbor_advertisement (tgt=" + fmt("%s", tgt) + ")"; print " router=" + fmt("%s", router); @@ -96,16 +98,18 @@ event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, router: bool, print " override=" + fmt("%s", override); print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); + print " options: " + fmt("%s", options); } -event icmp_router_solicitation(c: connection, icmp: icmp_conn) +event icmp_router_solicitation(c: connection, icmp: icmp_conn, options: icmp6_nd_options) { print "icmp_router_solicitation"; print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); + print " options: " + fmt("%s", options); } -event icmp_router_advertisement(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval) +event icmp_router_advertisement(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval, options: icmp6_nd_options) { print "icmp_router_advertisement"; print " cur_hop_limit=" + fmt("%s", cur_hop_limit); @@ -120,4 +124,5 @@ event icmp_router_advertisement(c: connection, icmp: icmp_conn, cur_hop_limit: c print " retrans_timer=" + fmt("%s", retrans_timer); print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); + print " options: " + fmt("%s", options); } diff --git a/testing/btest/core/icmp/icmp6-nd-options.test b/testing/btest/core/icmp/icmp6-nd-options.test new file mode 100644 index 0000000000..64543852a3 --- /dev/null +++ b/testing/btest/core/icmp/icmp6-nd-options.test @@ -0,0 +1,35 @@ +# These tests all check that ICMP6 events get raised with correct arguments. + +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-redirect-hdr-opt.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-nd-options.pcap %INPUT >>output 2>&1 + +# @TEST-EXEC: btest-diff output + +event icmp_router_advertisement(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval, options: icmp6_nd_options) + { + print "icmp_router_advertisement options"; + for ( o in options ) + { + print fmt(" %s", options[o]); + if ( options[o]$otype == 1 && options[o]?$link_address ) + print fmt(" MAC: %s", + string_to_ascii_hex(options[o]$link_address)); + } + } + +event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt: addr, options: icmp6_nd_options) + { + print "icmp_neighbor_advertisement options"; + for ( o in options ) + { + print fmt(" %s", options[o]); + if ( options[o]$otype == 2 && options[o]?$link_address ) print fmt(" MAC: %s", string_to_ascii_hex(options[o]$link_address)); + } + } + +event icmp_redirect(c: connection, icmp: icmp_conn, tgt: addr, dest: addr, options: icmp6_nd_options) + { + print "icmp_redirect options"; + for ( o in options ) + print fmt(" %s", options[o]); + } From a651185ff9f93fedb3a82575e5107dd7460475de Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 27 Jun 2012 11:35:32 -0500 Subject: [PATCH 437/651] Fix strict-aliasing warning in RemoteSerializer.cc (fixes #834). --- src/RemoteSerializer.cc | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 838bafb0d6..0db77e2df3 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -4208,32 +4208,37 @@ bool SocketComm::Listen() bool SocketComm::AcceptConnection(int fd) { - sockaddr_storage client; - socklen_t len = sizeof(client); + union { + sockaddr_storage ss; + sockaddr_in s4; + sockaddr_in6 s6; + } client; + socklen_t len = sizeof(client.ss); - int clientfd = accept(fd, (sockaddr*) &client, &len); + int clientfd = accept(fd, (sockaddr*) &client.ss, &len); if ( clientfd < 0 ) { Error(fmt("accept failed, %s %d", strerror(errno), errno)); return false; } - if ( client.ss_family != AF_INET && client.ss_family != AF_INET6 ) + if ( client.ss.ss_family != AF_INET && client.ss.ss_family != AF_INET6 ) { - Error(fmt("accept fail, unknown address family %d", client.ss_family)); + Error(fmt("accept fail, unknown address family %d", + client.ss.ss_family)); close(clientfd); return false; } Peer* peer = new Peer; peer->id = id_counter++; - peer->ip = client.ss_family == AF_INET ? - IPAddr(((sockaddr_in*)&client)->sin_addr) : - IPAddr(((sockaddr_in6*)&client)->sin6_addr); + peer->ip = client.ss.ss_family == AF_INET ? + IPAddr(client.s4.sin_addr) : + IPAddr(client.s6.sin6_addr); - peer->port = client.ss_family == AF_INET ? - ntohs(((sockaddr_in*)&client)->sin_port) : - ntohs(((sockaddr_in6*)&client)->sin6_port); + peer->port = client.ss.ss_family == AF_INET ? + ntohs(client.s4.sin_port) : + ntohs(client.s6.sin6_port); peer->connected = true; peer->ssl = listen_ssl; From 21a0e74d682f0584288c6e631496bb4083e5d33f Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 28 Jun 2012 12:42:32 -0500 Subject: [PATCH 438/651] Drain events before terminating log/thread managers. Using the default scripts, the events from RemoteSerializer::LogStats() were attempting to use the logging framework after logging/threading had been terminated which never worked right and sometimes caused crashes with "fatal error: cannot lock mutex". Also made communication log baseline test pass more reliably. --- src/main.cc | 2 ++ .../send.log | 29 ++++++++++--------- .../communication_log_baseline.bro | 9 ++++-- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/src/main.cc b/src/main.cc index b1d0a4d723..d94a32df63 100644 --- a/src/main.cc +++ b/src/main.cc @@ -313,6 +313,8 @@ void terminate_bro() if ( remote_serializer ) remote_serializer->LogStats(); + mgr.Drain(); + log_mgr->Terminate(); thread_mgr->Terminate(); diff --git a/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log b/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log index d3c14c8603..94e0403238 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log +++ b/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log @@ -5,17 +5,18 @@ #path communication #fields ts peer src_name connected_peer_desc connected_peer_addr connected_peer_port level message #types time string string string addr port string string -1326492291.485390 bro parent - - - info [#1/127.0.0.1:47757] added peer -1326492291.491731 bro child - - - info [#1/127.0.0.1:47757] connected -1326492291.492024 bro parent - - - info [#1/127.0.0.1:47757] peer connected -1326492291.492024 bro parent - - - info [#1/127.0.0.1:47757] phase: version -1326492291.492740 bro script - - - info connection established -1326492291.492740 bro script - - - info requesting events matching /^?(NOTHING)$?/ -1326492291.492740 bro script - - - info accepting state -1326492291.493800 bro parent - - - info [#1/127.0.0.1:47757] phase: handshake -1326492291.493800 bro parent - - - info warning: no events to request -1326492291.494161 bro parent - - - info [#1/127.0.0.1:47757] peer_description is bro -1326492291.494404 bro parent - - - info [#1/127.0.0.1:47757] peer supports keep-in-cache; using that -1326492291.494404 bro parent - - - info [#1/127.0.0.1:47757] phase: running -1326492291.494404 bro parent - - - info terminating... -1326492291.494404 bro parent - - - info [#1/127.0.0.1:47757] closing connection +1340904724.781527 bro parent - - - info [#1/127.0.0.1:47757] added peer +1340904724.784954 bro child - - - info [#1/127.0.0.1:47757] connected +1340904724.786168 bro parent - - - info [#1/127.0.0.1:47757] peer connected +1340904724.786168 bro parent - - - info [#1/127.0.0.1:47757] phase: version +1340904724.786168 bro script - - - info connection established +1340904724.786168 bro script - - - info requesting events matching /^?(NOTHING)$?/ +1340904724.786168 bro script - - - info accepting state +1340904724.787645 bro parent - - - info [#1/127.0.0.1:47757] phase: handshake +1340904724.787645 bro parent - - - info warning: no events to request +1340904724.788857 bro parent - - - info [#1/127.0.0.1:47757] peer_description is bro +1340904724.829480 bro parent - - - info [#1/127.0.0.1:47757] peer supports keep-in-cache; using that +1340904724.829480 bro parent - - - info [#1/127.0.0.1:47757] phase: running +1340904724.829480 bro parent - - - info terminating... +1340904724.832952 bro child - - - info terminating +1340904724.834082 bro parent - - - info [#1/127.0.0.1:47757] closing connection diff --git a/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro b/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro index 3d80ef7777..4a2ed735ef 100644 --- a/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro +++ b/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro @@ -5,7 +5,7 @@ # @TEST-EXEC: btest-bg-wait -k 10 # # Don't diff the receiver log just because port is always going to change -# @TEST-EXEC: egrep -v 'pid|socket buffer size' sender/communication.log >send.log +# @TEST-EXEC: egrep -v 'CPU|bytes|pid|socket buffer size' sender/communication.log >send.log # @TEST-EXEC: btest-diff send.log @TEST-START-FILE sender.bro @@ -19,6 +19,10 @@ redef Communication::nodes += { event remote_connection_handshake_done(p: event_peer) { terminate_communication(); + } + +event remote_connection_closed(p: event_peer) + { terminate(); } @@ -30,9 +34,8 @@ event remote_connection_handshake_done(p: event_peer) @load frameworks/communication/listen -event remote_connection_handshake_done(p: event_peer) +event remote_connection_closed(p: event_peer) { - terminate_communication(); terminate(); } From 1bbd63970a9fe5529cc9c6898c510d47ea5472af Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 28 Jun 2012 15:16:33 -0500 Subject: [PATCH 439/651] Small tweak to make test complete quicker. --- testing/btest/scripts/base/frameworks/control/id_value.bro | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/btest/scripts/base/frameworks/control/id_value.bro b/testing/btest/scripts/base/frameworks/control/id_value.bro index c5d1d063f5..ffbb9a10cf 100644 --- a/testing/btest/scripts/base/frameworks/control/id_value.bro +++ b/testing/btest/scripts/base/frameworks/control/id_value.bro @@ -22,4 +22,5 @@ redef test_var = "This is the value from the controllee"; event Control::id_value_response(id: string, val: string) { print fmt("Got an id_value_response(%s, %s) event", id, val); + terminate(); } From 41f1544332cddfa9a636c05f41371698a891de63 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 28 Jun 2012 15:48:03 -0500 Subject: [PATCH 440/651] Add front-end name to InitMessage from WriterFrontend to Backend. At the time WriterBackend::Init() happens, it's in a different thread than its frontend member, but tried to access it directly to get its name, that info is now sent in the InitMessage instead. (Problem was observed segfaulting the unit test scripts.base.frameworks.notice.mail-alarms on Ubuntu 12.04). --- src/logging/WriterBackend.cc | 4 ++-- src/logging/WriterBackend.h | 4 +++- src/logging/WriterFrontend.cc | 10 ++++++---- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 23a95279d7..836c390944 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -108,13 +108,13 @@ void WriterBackend::DisableFrontend() SendOut(new DisableMessage(frontend)); } -bool WriterBackend::Init(string arg_path, int arg_num_fields, const Field* const* arg_fields) +bool WriterBackend::Init(string arg_path, int arg_num_fields, const Field* const* arg_fields, string frontend_name) { path = arg_path; num_fields = arg_num_fields; fields = arg_fields; - string name = Fmt("%s/%s", path.c_str(), frontend->Name().c_str()); + string name = Fmt("%s/%s", path.c_str(), frontend_name.c_str()); SetName(name); diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 1269976aee..64cf84630c 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -53,9 +53,11 @@ public: * @param fields An array of size \a num_fields with the log fields. * The methods takes ownership of the array. * + * @param frontend_name The name of the front-end writer implementation. + * * @return False if an error occured. */ - bool Init(string path, int num_fields, const threading::Field* const* fields); + bool Init(string path, int num_fields, const threading::Field* const* fields, string frontend_name); /** * Writes one log entry. diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 33c9c04c63..cd3bd0d563 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -15,16 +15,18 @@ namespace logging { class InitMessage : public threading::InputMessage { public: - InitMessage(WriterBackend* backend, const string path, const int num_fields, const Field* const* fields) + InitMessage(WriterBackend* backend, const string path, const int num_fields, const Field* const* fields, string frontend_name) : threading::InputMessage("Init", backend), - path(path), num_fields(num_fields), fields(fields) { } + path(path), num_fields(num_fields), fields(fields), + frontend_name(frontend_name) { } - virtual bool Process() { return Object()->Init(path, num_fields, fields); } + virtual bool Process() { return Object()->Init(path, num_fields, fields, frontend_name); } private: const string path; const int num_fields; const Field * const* fields; + const string frontend_name; }; class RotateMessage : public threading::InputMessage @@ -164,7 +166,7 @@ void WriterFrontend::Init(string arg_path, int arg_num_fields, const Field* cons initialized = true; if ( backend ) - backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields)); + backend->SendIn(new InitMessage(backend, arg_path, arg_num_fields, arg_fields, Name())); if ( remote ) remote_serializer->SendLogCreateWriter(stream, From 227159fd04e19758ee4f96e825b420039b793e10 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 28 Jun 2012 15:08:35 -0700 Subject: [PATCH 441/651] make writer-info work when debugging is enabled --- src/logging/Manager.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 23b6f070a1..69a38b1067 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -1261,14 +1261,14 @@ void Manager::InstallRotationTimer(WriterInfo* winfo) timer_mgr->Add(winfo->rotation_timer); DBG_LOG(DBG_LOGGING, "Scheduled rotation timer for %s to %.6f", - winfo->writer->Path().c_str(), winfo->rotation_timer->Time()); + winfo->writer->Name().c_str(), winfo->rotation_timer->Time()); } } void Manager::Rotate(WriterInfo* winfo) { DBG_LOG(DBG_LOGGING, "Rotating %s at %.6f", - winfo->writer->Path().c_str(), network_time); + winfo->writer->Name().c_str(), network_time); // Build a temporary path for the writer to move the file to. struct tm tm; @@ -1297,7 +1297,7 @@ bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string o return true; DBG_LOG(DBG_LOGGING, "Finished rotating %s at %.6f, new name %s", - writer->Path().c_str(), network_time, new_name.c_str()); + writer->Name().c_str(), network_time, new_name.c_str()); WriterInfo* winfo = FindWriter(writer); if ( ! winfo ) From f820ee9f5c31a4b1baae9fd164d335d2ca689568 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 28 Jun 2012 16:16:48 -0700 Subject: [PATCH 442/651] Introduce support for a table of key/value pairs with further configuration options, with the same userinterface as in the logging interface. Not really tested, but tests still work. --- scripts/base/frameworks/input/main.bro | 7 ++++ src/input/Manager.cc | 32 +++++++++++++++++-- src/input/ReaderBackend.cc | 4 +-- src/input/ReaderBackend.h | 7 ++-- src/input/ReaderFrontend.cc | 11 ++++--- src/input/ReaderFrontend.h | 2 +- src/input/readers/Ascii.cc | 2 +- src/input/readers/Ascii.h | 2 +- src/input/readers/Benchmark.cc | 2 +- src/input/readers/Benchmark.h | 2 +- src/input/readers/Raw.cc | 2 +- src/input/readers/Raw.h | 2 +- .../scripts.base.frameworks.input.event/out | 14 ++++++++ .../out | 2 ++ .../scripts.base.frameworks.input.raw/out | 16 ++++++++++ .../scripts.base.frameworks.input.reread/out | 16 ++++++++++ .../out | 32 +++++++++++++++++++ .../out | 16 ++++++++++ .../out | 28 ++++++++++++---- 19 files changed, 173 insertions(+), 26 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index f5df72473f..68b291c2e9 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -53,6 +53,10 @@ export { ## really be executed. Parameters are the same as for the event. If true is ## returned, the update is performed. If false is returned, it is skipped. pred: function(typ: Input::Event, left: any, right: any): bool &optional; + + ## A key/value table that will be passed on the reader. + ## Interpretation of the values is left to the reader. + config: table[string] of string &default=table(); }; ## EventFilter description type used for the `event` method. @@ -85,6 +89,9 @@ export { ## The event will receive an Input::Event enum as the first element, and the fields as the following arguments. ev: any; + ## A key/value table that will be passed on the reader. + ## Interpretation of the values is left to the reader. + config: table[string] of string &default=table(); }; ## Create a new table input from a given source. Returns true on success. diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 63fa59d0bc..f9979fbe6e 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -80,6 +80,8 @@ public: EnumVal* type; ReaderFrontend* reader; + TableVal* config; + std::map configmap; RecordVal* description; @@ -103,6 +105,9 @@ Manager::Stream::~Stream() if ( description ) Unref(description); + if ( config ) + Unref(config); + if ( reader ) delete(reader); } @@ -300,6 +305,7 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) Unref(sourceval); EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); + Val* config = description->LookupWithDefault(rtype->FieldOffset("config")); switch ( mode->InternalInt() ) { @@ -325,8 +331,27 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault info->name = name; info->source = source; + info->config = config->AsTableVal(); // ref'd by LookupWithDefault Ref(description); - info->description = description; + info->description = description; + + { + HashKey* k; + IterCookie* c = info->config->AsTable()->InitForIteration(); + + TableEntryVal* v; + while ( (v = info->config->AsTable()->NextEntry(k, c)) ) + { + ListVal* index = info->config->RecoverIndex(k); + string key = index->Index(0)->AsString()->CheckString(); + string value = v->Value()->AsString()->CheckString(); + info->configmap.insert(std::make_pair(key, value)); + Unref(index); + delete k; + } + + } + DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", name.c_str()); @@ -451,7 +476,8 @@ bool Manager::CreateEventStream(RecordVal* fval) Unref(want_record); // ref'd by lookupwithdefault assert(stream->reader); - stream->reader->Init(stream->source, stream->mode, stream->num_fields, logf ); + + stream->reader->Init(stream->source, stream->mode, stream->num_fields, logf, stream->configmap ); readers[stream->reader] = stream; @@ -628,7 +654,7 @@ bool Manager::CreateTableStream(RecordVal* fval) assert(stream->reader); - stream->reader->Init(stream->source, stream->mode, fieldsV.size(), fields ); + stream->reader->Init(stream->source, stream->mode, fieldsV.size(), fields, stream->configmap ); readers[stream->reader] = stream; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index e7626084a6..276b5d25b0 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -184,7 +184,7 @@ void ReaderBackend::SendEntry(Value* *vals) } bool ReaderBackend::Init(string arg_source, ReaderMode arg_mode, const int arg_num_fields, - const threading::Field* const* arg_fields) + const threading::Field* const* arg_fields, const std::map config) { source = arg_source; mode = arg_mode; @@ -194,7 +194,7 @@ bool ReaderBackend::Init(string arg_source, ReaderMode arg_mode, const int arg_n SetName("InputReader/"+source); // disable if DoInit returns error. - int success = DoInit(arg_source, mode, arg_num_fields, arg_fields); + int success = DoInit(arg_source, mode, arg_num_fields, arg_fields, config); if ( ! success ) { diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index a04508d252..c23c68bf7e 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -79,9 +79,12 @@ public: * @param fields The types and names of the fields to be retrieved * from the input source. * + * @param config A string map containing additional configuration options + * for the reader. + * * @return False if an error occured. */ - bool Init(string source, ReaderMode mode, int num_fields, const threading::Field* const* fields); + bool Init(string source, ReaderMode mode, int num_fields, const threading::Field* const* fields, std::map config); /** * Finishes reading from this input stream in a regular fashion. Must @@ -130,7 +133,7 @@ protected: * provides accessor methods to get them later, and they are passed * in here only for convinience. */ - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields) = 0; + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config) = 0; /** * Reader-specific method implementing input finalization at diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index a9a4c778dd..ec1630cd88 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -12,13 +12,13 @@ class InitMessage : public threading::InputMessage { public: InitMessage(ReaderBackend* backend, const string source, ReaderMode mode, - const int num_fields, const threading::Field* const* fields) + const int num_fields, const threading::Field* const* fields, const std::map config) : threading::InputMessage("Init", backend), - source(source), mode(mode), num_fields(num_fields), fields(fields) { } + source(source), mode(mode), num_fields(num_fields), fields(fields), config(config) { } virtual bool Process() { - return Object()->Init(source, mode, num_fields, fields); + return Object()->Init(source, mode, num_fields, fields, config); } private: @@ -26,6 +26,7 @@ private: const ReaderMode mode; const int num_fields; const threading::Field* const* fields; + const std::map config; }; class UpdateMessage : public threading::InputMessage @@ -64,7 +65,7 @@ ReaderFrontend::~ReaderFrontend() } void ReaderFrontend::Init(string arg_source, ReaderMode mode, const int num_fields, - const threading::Field* const* fields) + const threading::Field* const* fields, const std::map config) { if ( disabled ) return; @@ -75,7 +76,7 @@ void ReaderFrontend::Init(string arg_source, ReaderMode mode, const int num_fiel source = arg_source; initialized = true; - backend->SendIn(new InitMessage(backend, arg_source, mode, num_fields, fields)); + backend->SendIn(new InitMessage(backend, arg_source, mode, num_fields, fields, config)); } void ReaderFrontend::Update() diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index c61b194e24..1240831ee6 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -52,7 +52,7 @@ public: * * This method must only be called from the main thread. */ - void Init(string arg_source, ReaderMode mode, const int arg_num_fields, const threading::Field* const* fields); + void Init(string arg_source, ReaderMode mode, const int arg_num_fields, const threading::Field* const* fields, const std::map config); /** * Force an update of the current input source. Actual action depends diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 186d765d21..47bbe2a207 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -83,7 +83,7 @@ void Ascii::DoClose() } } -bool Ascii::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields) +bool Ascii::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields, const std::map config) { mtime = 0; diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index 335616abfb..c17c5220ed 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -38,7 +38,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Ascii(frontend); } protected: - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 5e4ef090f7..37888b095f 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -36,7 +36,7 @@ void Benchmark::DoClose() { } -bool Benchmark::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields) +bool Benchmark::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields, const std::map config) { num_lines = atoi(path.c_str()); diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index 6bb70781fd..e806b9ca4a 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -18,7 +18,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Benchmark(frontend); } protected: - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 59899f32fc..9971aa1aa3 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -100,7 +100,7 @@ bool Raw::CloseInput() return true; } -bool Raw::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields) +bool Raw::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields, const std::map config) { fname = path; mtime = 0; diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index b9b45f0b20..fb6b94410b 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -22,7 +22,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } protected: - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.event/out b/testing/btest/Baseline/scripts.base.frameworks.input.event/out index bb3b6d0a9e..5ccc9c0d1e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.event/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.event/out @@ -4,6 +4,8 @@ print A::description; print A::tpe; print A::i; print A::b; +}, config={ + }] Input::EVENT_NEW 1 @@ -14,6 +16,8 @@ print A::description; print A::tpe; print A::i; print A::b; +}, config={ + }] Input::EVENT_NEW 2 @@ -24,6 +28,8 @@ print A::description; print A::tpe; print A::i; print A::b; +}, config={ + }] Input::EVENT_NEW 3 @@ -34,6 +40,8 @@ print A::description; print A::tpe; print A::i; print A::b; +}, config={ + }] Input::EVENT_NEW 4 @@ -44,6 +52,8 @@ print A::description; print A::tpe; print A::i; print A::b; +}, config={ + }] Input::EVENT_NEW 5 @@ -54,6 +64,8 @@ print A::description; print A::tpe; print A::i; print A::b; +}, config={ + }] Input::EVENT_NEW 6 @@ -64,6 +76,8 @@ print A::description; print A::tpe; print A::i; print A::b; +}, config={ + }] Input::EVENT_NEW 7 diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out index a38f3fce84..51543e143c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out @@ -4,6 +4,8 @@ print outfile, description; print outfile, tpe; print outfile, s; close(outfile); +}, config={ + }] Input::EVENT_NEW 8 ../input.log diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out index 55e7610e1e..611e5ec378 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out @@ -3,6 +3,8 @@ print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF @@ -11,6 +13,8 @@ sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF @@ -19,6 +23,8 @@ DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW q3r3057fdf @@ -27,6 +33,8 @@ q3r3057fdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdfs\d @@ -35,6 +43,8 @@ sdfs\d print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW @@ -43,6 +53,8 @@ Input::EVENT_NEW print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW dfsdf @@ -51,6 +63,8 @@ dfsdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdf @@ -59,6 +73,8 @@ sdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index 5cce15f6c7..8b55ced2ac 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -46,6 +46,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_NEW @@ -139,6 +141,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_NEW @@ -244,6 +248,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_CHANGED @@ -469,6 +475,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_NEW @@ -592,6 +600,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_NEW @@ -715,6 +725,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_NEW @@ -838,6 +850,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_NEW @@ -961,6 +975,8 @@ print A::outfile, A::typ; print A::outfile, A::left; print A::outfile, A::right; return (T); +}, config={ + }] Type Input::EVENT_NEW diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out index 9d62fdbef4..7dc81ba80d 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out @@ -3,6 +3,8 @@ print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF @@ -11,6 +13,8 @@ sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF @@ -19,6 +23,8 @@ DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW q3r3057fdf @@ -27,6 +33,8 @@ q3r3057fdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdfs\d @@ -35,6 +43,8 @@ sdfs\d print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW @@ -43,6 +53,8 @@ Input::EVENT_NEW print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW dfsdf @@ -51,6 +63,8 @@ dfsdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdf @@ -59,6 +73,8 @@ sdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. @@ -67,6 +83,8 @@ Input::EVENT_NEW print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF @@ -75,6 +93,8 @@ sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF @@ -83,6 +103,8 @@ DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW q3r3057fdf @@ -91,6 +113,8 @@ q3r3057fdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdfs\d @@ -99,6 +123,8 @@ sdfs\d print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW @@ -107,6 +133,8 @@ Input::EVENT_NEW print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW dfsdf @@ -115,6 +143,8 @@ dfsdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW sdf @@ -123,6 +153,8 @@ sdf print A::description; print A::tpe; print A::s; +}, config={ + }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out index 07a3ffdba5..1bf8d4cfef 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out @@ -10,6 +10,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF @@ -25,6 +27,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF @@ -40,6 +44,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW q3r3057fdf @@ -55,6 +61,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW sdfs\d @@ -70,6 +78,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW @@ -85,6 +95,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW dfsdf @@ -100,6 +112,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW sdf @@ -115,6 +129,8 @@ close(A::outfile); Input::remove(input); } +}, config={ + }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out index a1bbb9bbe4..28bf77f057 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out @@ -12,7 +12,9 @@ print description; print tpe; print left; print right; -}, pred=] +}, pred=, config={ + +}] Input::EVENT_NEW [i=1] T @@ -30,7 +32,9 @@ print description; print tpe; print left; print right; -}, pred=] +}, pred=, config={ + +}] Input::EVENT_NEW [i=2] T @@ -48,7 +52,9 @@ print description; print tpe; print left; print right; -}, pred=] +}, pred=, config={ + +}] Input::EVENT_NEW [i=3] F @@ -66,7 +72,9 @@ print description; print tpe; print left; print right; -}, pred=] +}, pred=, config={ + +}] Input::EVENT_NEW [i=4] F @@ -84,7 +92,9 @@ print description; print tpe; print left; print right; -}, pred=] +}, pred=, config={ + +}] Input::EVENT_NEW [i=5] F @@ -102,7 +112,9 @@ print description; print tpe; print left; print right; -}, pred=] +}, pred=, config={ + +}] Input::EVENT_NEW [i=6] F @@ -120,7 +132,9 @@ print description; print tpe; print left; print right; -}, pred=] +}, pred=, config={ + +}] Input::EVENT_NEW [i=7] T From 0e48fda6ffa0be4cec2d763305a1394e19b32778 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 29 Jun 2012 12:50:57 -0500 Subject: [PATCH 443/651] Updating input framework unit tests. Generally tried to make them more reliable and execute quicker. They all now load the listen script as a trick to make sure input sources are fully read, but also terminate() at appropriate times so that they don't take more time than needed. They're also all serialized with the 'comm' group so listening on a port doesn't interfere with the communication tests. --- .../scripts.base.frameworks.input.event/out | 119 ++++++--- .../out | 1 + .../scripts.base.frameworks.input.raw/out | 120 ++++++--- .../scripts.base.frameworks.input.repeat/out | 64 ++--- .../out | 240 +++++++++++++----- .../out | 33 ++- .../out | 119 ++++++--- .../event.out | 4 + .../fin.out | 30 +++ .../out | 172 ------------- .../pred1.out | 45 ++++ .../pred2.out | 15 ++ testing/btest/coverage/bare-mode-errors.test | 2 + .../scripts/base/frameworks/input/basic.bro | 26 +- .../base/frameworks/input/emptyvals.bro | 25 +- .../scripts/base/frameworks/input/event.bro | 36 ++- .../base/frameworks/input/executeraw.bro | 16 +- .../frameworks/input/onecolumn-norecord.bro | 25 +- .../frameworks/input/onecolumn-record.bro | 25 +- .../base/frameworks/input/optional.bro | 25 +- .../scripts/base/frameworks/input/port.bro | 39 ++- .../frameworks/input/predicate-stream.bro | 67 +++-- .../base/frameworks/input/predicate.bro | 56 ++-- .../base/frameworks/input/predicatemodify.bro | 37 ++- .../input/predicatemodifyandreread.bro | 30 ++- .../scripts/base/frameworks/input/raw.bro | 34 ++- .../scripts/base/frameworks/input/repeat.bro | 40 ++- .../scripts/base/frameworks/input/reread.bro | 27 +- .../base/frameworks/input/rereadraw.bro | 34 ++- .../scripts/base/frameworks/input/stream.bro | 23 +- .../base/frameworks/input/streamraw.bro | 22 +- .../base/frameworks/input/tableevent.bro | 37 ++- .../base/frameworks/input/twotables.bro | 88 ++++--- 33 files changed, 1045 insertions(+), 631 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twotables/event.out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twotables/fin.out delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twotables/out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred1.out create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred2.out diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.event/out b/testing/btest/Baseline/scripts.base.frameworks.input.event/out index bb3b6d0a9e..d02cda5e33 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.event/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.event/out @@ -1,69 +1,118 @@ -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::i; -print A::b; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::i; +print outfile, A::b; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 1 T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::i; -print A::b; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::i; +print outfile, A::b; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 2 T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::i; -print A::b; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::i; +print outfile, A::b; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 3 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::i; -print A::b; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::i; +print outfile, A::b; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 4 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::i; -print A::b; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::i; +print outfile, A::b; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 5 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::i; -print A::b; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::i; +print outfile, A::b; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 6 F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::i; -print A::b; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::i; +print outfile, A::b; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 7 diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out index a38f3fce84..61f179780c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.executeraw/out @@ -4,6 +4,7 @@ print outfile, description; print outfile, tpe; print outfile, s; close(outfile); +terminate(); }] Input::EVENT_NEW 8 ../input.log diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out index 55e7610e1e..0d380047fb 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.raw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.raw/out @@ -1,64 +1,120 @@ -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW q3r3057fdf -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdfs\d -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW dfsdf -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdf -[source=input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::STREAM, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (8 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.repeat/out b/testing/btest/Baseline/scripts.base.frameworks.input.repeat/out index 71de0d2570..12a8c5f581 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.repeat/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.repeat/out @@ -1,160 +1,160 @@ input0 -input.log +../input.log { [1] = T } input1 -input.log +../input.log { [1] = T } input2 -input.log +../input.log { [1] = T } input3 -input.log +../input.log { [1] = T } input4 -input.log +../input.log { [1] = T } input5 -input.log +../input.log { [1] = T } input6 -input.log +../input.log { [1] = T } input7 -input.log +../input.log { [1] = T } input8 -input.log +../input.log { [1] = T } input9 -input.log +../input.log { [1] = T } input10 -input.log +../input.log { [1] = T } input11 -input.log +../input.log { [1] = T } input12 -input.log +../input.log { [1] = T } input13 -input.log +../input.log { [1] = T } input14 -input.log +../input.log { [1] = T } input15 -input.log +../input.log { [1] = T } input16 -input.log +../input.log { [1] = T } input17 -input.log +../input.log { [1] = T } input18 -input.log +../input.log { [1] = T } input19 -input.log +../input.log { [1] = T } input20 -input.log +../input.log { [1] = T } input21 -input.log +../input.log { [1] = T } input22 -input.log +../input.log { [1] = T } input23 -input.log +../input.log { [1] = T } input24 -input.log +../input.log { [1] = T } input25 -input.log +../input.log { [1] = T } input26 -input.log +../input.log { [1] = T } input27 -input.log +../input.log { [1] = T } input28 -input.log +../input.log { [1] = T } input29 -input.log +../input.log { [1] = T } input30 -input.log +../input.log { [1] = T } input31 -input.log +../input.log { [1] = T } diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out index 9d62fdbef4..7c75913bc3 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.rereadraw/out @@ -1,128 +1,240 @@ -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW q3r3057fdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdfs\d -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW dfsdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW q3r3057fdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdfs\d -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW dfsdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW sdf -[source=input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line +[source=../input.log, reader=Input::READER_RAW, mode=Input::REREAD, name=input, fields=, want_record=F, ev=line { -print A::description; -print A::tpe; -print A::s; +print outfile, A::description; +print outfile, A::tpe; +print outfile, A::s; +try = try + 1; +if (16 == try) +{ +close(outfile); +terminate(); +} + }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out index 07a3ffdba5..b934e34768 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.streamraw/out @@ -3,11 +3,13 @@ print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] @@ -18,11 +20,13 @@ sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] @@ -33,11 +37,13 @@ DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] @@ -48,11 +54,13 @@ q3r3057fdf print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] @@ -63,11 +71,13 @@ sdfs\d print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] @@ -78,11 +88,13 @@ Input::EVENT_NEW print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] @@ -93,11 +105,13 @@ dfsdf print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] @@ -108,13 +122,16 @@ sdf print A::outfile, A::description; print A::outfile, A::tpe; print A::outfile, A::s; -if (3 == A::try) +A::try = A::try + 1; +if (8 == A::try) { print A::outfile, done; close(A::outfile); Input::remove(input); +terminate(); } }] Input::EVENT_NEW 3rw43wRRERLlL#RWERERERE. +done diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out index a1bbb9bbe4..43d000676d 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.tableevent/out @@ -1,4 +1,4 @@ -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -8,15 +8,22 @@ [3] = F }, idx=, val=, want_record=F, ev=line { -print description; -print tpe; -print left; -print right; +print outfile, description; +print outfile, tpe; +print outfile, left; +print outfile, right; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }, pred=] Input::EVENT_NEW [i=1] T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -26,15 +33,22 @@ T [3] = F }, idx=, val=, want_record=F, ev=line { -print description; -print tpe; -print left; -print right; +print outfile, description; +print outfile, tpe; +print outfile, left; +print outfile, right; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }, pred=] Input::EVENT_NEW [i=2] T -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -44,15 +58,22 @@ T [3] = F }, idx=, val=, want_record=F, ev=line { -print description; -print tpe; -print left; -print right; +print outfile, description; +print outfile, tpe; +print outfile, left; +print outfile, right; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }, pred=] Input::EVENT_NEW [i=3] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -62,15 +83,22 @@ F [3] = F }, idx=, val=, want_record=F, ev=line { -print description; -print tpe; -print left; -print right; +print outfile, description; +print outfile, tpe; +print outfile, left; +print outfile, right; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }, pred=] Input::EVENT_NEW [i=4] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -80,15 +108,22 @@ F [3] = F }, idx=, val=, want_record=F, ev=line { -print description; -print tpe; -print left; -print right; +print outfile, description; +print outfile, tpe; +print outfile, left; +print outfile, right; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }, pred=] Input::EVENT_NEW [i=5] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -98,15 +133,22 @@ F [3] = F }, idx=, val=, want_record=F, ev=line { -print description; -print tpe; -print left; -print right; +print outfile, description; +print outfile, tpe; +print outfile, left; +print outfile, right; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }, pred=] Input::EVENT_NEW [i=6] F -[source=input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ +[source=../input.log, reader=Input::READER_ASCII, mode=Input::MANUAL, name=input, destination={ [2] = T, [4] = F, [6] = F, @@ -116,10 +158,17 @@ F [3] = F }, idx=, val=, want_record=F, ev=line { -print description; -print tpe; -print left; -print right; +print outfile, description; +print outfile, tpe; +print outfile, left; +print outfile, right; +try = try + 1; +if (7 == try) +{ +close(outfile); +terminate(); +} + }, pred=] Input::EVENT_NEW [i=7] diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/event.out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/event.out new file mode 100644 index 0000000000..ebf210031f --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/event.out @@ -0,0 +1,4 @@ +============EVENT============ +============EVENT============ +============EVENT============ +============EVENT============ diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/fin.out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/fin.out new file mode 100644 index 0000000000..b7e1031867 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/fin.out @@ -0,0 +1,30 @@ +==========SERVERS============ +==========SERVERS============ +==========SERVERS============ +done +{ +[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]], +[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out deleted file mode 100644 index e9e03add3a..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/out +++ /dev/null @@ -1,172 +0,0 @@ -============PREDICATE============ -Input::EVENT_NEW -[i=-42] -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============PREDICATE 2============ -Input::EVENT_NEW -[i=-43] -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -==========SERVERS============ -{ -[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -} -============EVENT============ -==========SERVERS============ -{ -[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-42] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -} -============PREDICATE============ -Input::EVENT_NEW -[i=-44] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_REMOVED -[i=-42] -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -============EVENT============ -==========SERVERS============ -{ -[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -} -done -{ -[-43] = [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]], -[-44] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -} diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred1.out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred1.out new file mode 100644 index 0000000000..84d1465428 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred1.out @@ -0,0 +1,45 @@ +============PREDICATE============ +Input::EVENT_NEW +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_NEW +[i=-44] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred2.out b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred2.out new file mode 100644 index 0000000000..ef38fa3210 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.twotables/pred2.out @@ -0,0 +1,15 @@ +============PREDICATE 2============ +Input::EVENT_NEW +[i=-43] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] diff --git a/testing/btest/coverage/bare-mode-errors.test b/testing/btest/coverage/bare-mode-errors.test index 9fd18308ce..21e7d4f4a9 100644 --- a/testing/btest/coverage/bare-mode-errors.test +++ b/testing/btest/coverage/bare-mode-errors.test @@ -5,6 +5,8 @@ # Commonly, this test may fail if one forgets to @load some base/ scripts # when writing a new bro scripts. # +# @TEST-SERIALIZE: comm +# # @TEST-EXEC: test -d $DIST/scripts # @TEST-EXEC: for script in `find $DIST/scripts -name \*\.bro -not -path '*/site/*'`; do echo $script; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0 # @TEST-EXEC: cat allerrors | grep -v "received termination signal" | sort | uniq > unique_errors diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro index 8d4028a12e..df2ab676b8 100644 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/input/basic.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -10,6 +13,11 @@ T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} @TEST-END-FILE +@load base/protocols/ssh +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -39,12 +47,16 @@ type Val: record { global servers: table[int] of Val = table(); event bro_init() -{ + { + outfile = open("../out"); # first read in the old stuff into the table... - Input::add_table([$source="input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); Input::remove("ssh"); -} + } -event Input::update_finished(name: string, source:string) { - print servers; -} +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/emptyvals.bro b/testing/btest/scripts/base/frameworks/input/emptyvals.bro index 77659d13ec..a2a9ba3070 100644 --- a/testing/btest/scripts/base/frameworks/input/emptyvals.bro +++ b/testing/btest/scripts/base/frameworks/input/emptyvals.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -11,6 +14,10 @@ T 1 - 2 @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -26,12 +33,16 @@ type Val: record { global servers: table[int] of Val = table(); event bro_init() -{ + { + outfile = open("../out"); # first read in the old stuff into the table... - Input::add_table([$source="input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); Input::remove("ssh"); -} + } -event Input::update_finished(name: string, source:string) { - print servers; -} +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro index dca75334d0..d275cee59c 100644 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -16,6 +19,10 @@ 7 T @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; +global try: count; module A; @@ -24,15 +31,24 @@ type Val: record { b: bool; }; -event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) { - print description; - print tpe; - print i; - print b; -} +event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) + { + print outfile, description; + print outfile, tpe; + print outfile, i; + print outfile, b; + try = try + 1; + if ( try == 7 ) + { + close(outfile); + terminate(); + } + } event bro_init() -{ - Input::add_event([$source="input.log", $name="input", $fields=Val, $ev=line]); + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line]); Input::remove("input"); -} + } diff --git a/testing/btest/scripts/base/frameworks/input/executeraw.bro b/testing/btest/scripts/base/frameworks/input/executeraw.bro index 6df28d08ea..222b4256d1 100644 --- a/testing/btest/scripts/base/frameworks/input/executeraw.bro +++ b/testing/btest/scripts/base/frameworks/input/executeraw.bro @@ -1,6 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait -k 1 +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: cat out.tmp | sed 's/^ *//g' >out # @TEST-EXEC: btest-diff out @@ -23,16 +25,18 @@ type Val: record { s: string; }; -event line(description: Input::EventDescription, tpe: Input::Event, s: string) { +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { print outfile, description; print outfile, tpe; print outfile, s; close(outfile); -} + terminate(); + } event bro_init() -{ - outfile = open ("../out.tmp"); + { + outfile = open("../out.tmp"); Input::add_event([$source="wc -l ../input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line]); Input::remove("input"); -} + } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro index d6c81cb2db..9707af7f94 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -10,6 +13,10 @@ T -42 @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -25,12 +32,16 @@ type Val: record { global servers: table[int] of Val = table(); event bro_init() -{ - Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); + { + outfile = open("../out"); + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); Input::remove("input"); -} + } -event Input::update_finished(name: string, source: string) { - print servers; -} +event Input::update_finished(name: string, source: string) + { + print outfile, servers; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro index ca1e956f35..18349f1515 100644 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -10,6 +13,10 @@ T -42 @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -25,12 +32,16 @@ type Val: record { global servers: table[int] of Val = table(); event bro_init() -{ - Input::add_table([$name="input", $source="input.log", $idx=Idx, $val=Val, $destination=servers]); + { + outfile = open("../out"); + Input::add_table([$name="input", $source="../input.log", $idx=Idx, $val=Val, $destination=servers]); Input::remove("input"); -} + } -event Input::update_finished(name: string, source: string) { - print servers; -} +event Input::update_finished(name: string, source: string) + { + print outfile, servers; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/optional.bro b/testing/btest/scripts/base/frameworks/input/optional.bro index c354f7c3ab..23e0b1e4d1 100644 --- a/testing/btest/scripts/base/frameworks/input/optional.bro +++ b/testing/btest/scripts/base/frameworks/input/optional.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -16,6 +19,10 @@ 7 T @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -32,14 +39,18 @@ type Val: record { global servers: table[int] of Val = table(); event bro_init() -{ + { + outfile = open("../out"); # first read in the old stuff into the table... - Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $pred(typ: Input::Event, left: Idx, right: Val) = { right$notb = !right$b; return T; } ]); Input::remove("input"); -} + } -event Input::update_finished(name: string, source: string) { - print servers; -} +event Input::update_finished(name: string, source: string) + { + print outfile, servers; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/port.bro b/testing/btest/scripts/base/frameworks/input/port.bro index 88e86eb5dc..2f061e9507 100644 --- a/testing/btest/scripts/base/frameworks/input/port.bro +++ b/testing/btest/scripts/base/frameworks/input/port.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -9,6 +12,10 @@ 1.2.3.6 30 unknown @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -24,17 +31,23 @@ type Val: record { global servers: table[addr] of Val = table(); event bro_init() -{ - Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers]); - print servers[1.2.3.4]; - print servers[1.2.3.5]; - print servers[1.2.3.6]; + { + outfile = open("../out"); + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers]); + if ( 1.2.3.4 in servers ) + print outfile, servers[1.2.3.4]; + if ( 1.2.3.5 in servers ) + print outfile, servers[1.2.3.5]; + if ( 1.2.3.6 in servers ) + print outfile, servers[1.2.3.6]; Input::remove("input"); -} - -event Input::update_finished(name: string, source: string) { - print servers[1.2.3.4]; - print servers[1.2.3.5]; - print servers[1.2.3.6]; -} + } +event Input::update_finished(name: string, source: string) + { + print outfile, servers[1.2.3.4]; + print outfile, servers[1.2.3.5]; + print outfile, servers[1.2.3.6]; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/predicate-stream.bro b/testing/btest/scripts/base/frameworks/input/predicate-stream.bro index 20c69131cb..8cf927e346 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate-stream.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate-stream.bro @@ -1,9 +1,13 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out # # only difference from predicate.bro is, that this one uses a stream source. -# the reason is, that the code-paths are quite different, because then the ascii reader uses the put and not the sendevent interface +# the reason is, that the code-paths are quite different, because then the +# ascii reader uses the put and not the sendevent interface @TEST-START-FILE input.log #separator \x09 @@ -19,6 +23,10 @@ 7 T @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -34,47 +42,38 @@ type Val: record { global servers: table[int] of Val = table(); global ct: int; -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) { +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) + { ct = ct + 1; - if ( ct < 3 ) { + if ( ct < 3 ) return; - } - if ( ct > 3 ) { - print "Too many events"; - return; - } - if ( 1 in servers ) { - print "VALID"; + if ( 1 in servers ) + print outfile, "VALID"; + if ( 2 in servers ) + print outfile, "VALID"; + if ( !(3 in servers) ) + print outfile, "VALID"; + if ( !(4 in servers) ) + print outfile, "VALID"; + if ( !(5 in servers) ) + print outfile, "VALID"; + if ( !(6 in servers) ) + print outfile, "VALID"; + if ( 7 in servers ) + print outfile, "VALID"; + close(outfile); + terminate(); } - if ( 2 in servers ) { - print "VALID"; - } - if ( !(3 in servers) ) { - print "VALID"; - } - if ( !(4 in servers) ) { - print "VALID"; - } - if ( !(5 in servers) ) { - print "VALID"; - } - if ( !(6 in servers) ) { - print "VALID"; - } - if ( 7 in servers ) { - print "VALID"; - } -} event bro_init() -{ + { + outfile = open("../out"); ct = 0; # first read in the old stuff into the table... - Input::add_table([$source="input.log", $mode=Input::STREAM, $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $ev=line, + Input::add_table([$source="../input.log", $mode=Input::STREAM, $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $ev=line, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); Input::remove("input"); - -} + } diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro index 278ac7418e..2cda6f5fb9 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -16,6 +19,10 @@ 7 T @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -31,34 +38,31 @@ type Val: record { global servers: table[int] of Val = table(); event bro_init() -{ + { + outfile = open("../out"); # first read in the old stuff into the table... - Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } ]); Input::remove("input"); -} + } -event Input::update_finished(name: string, source: string) { - if ( 1 in servers ) { - print "VALID"; +event Input::update_finished(name: string, source: string) + { + if ( 1 in servers ) + print outfile, "VALID"; + if ( 2 in servers ) + print outfile, "VALID"; + if ( !(3 in servers) ) + print outfile, "VALID"; + if ( !(4 in servers) ) + print outfile, "VALID"; + if ( !(5 in servers) ) + print outfile, "VALID"; + if ( !(6 in servers) ) + print outfile, "VALID"; + if ( 7 in servers ) + print outfile, "VALID"; + close(outfile); + terminate(); } - if ( 2 in servers ) { - print "VALID"; - } - if ( !(3 in servers) ) { - print "VALID"; - } - if ( !(4 in servers) ) { - print "VALID"; - } - if ( !(5 in servers) ) { - print "VALID"; - } - if ( !(6 in servers) ) { - print "VALID"; - } - if ( 7 in servers ) { - print "VALID"; - } -} diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodify.bro b/testing/btest/scripts/base/frameworks/input/predicatemodify.bro index c3198d8483..1d6a54fe38 100644 --- a/testing/btest/scripts/base/frameworks/input/predicatemodify.bro +++ b/testing/btest/scripts/base/frameworks/input/predicatemodify.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -11,6 +14,10 @@ 2 T test2 idx2 @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -28,23 +35,25 @@ type Val: record { global servers: table[int, string] of Val = table(); event bro_init() -{ - # first read in the old stuff into the table... - Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, - $pred(typ: Input::Event, left: Idx, right: Val) = { - if ( left$i == 1 ) { - right$s = "testmodified"; - } + { + outfile = open("../out"); - if ( left$i == 2 ) { + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { + if ( left$i == 1 ) + right$s = "testmodified"; + if ( left$i == 2 ) left$ss = "idxmodified"; - } return T; } ]); Input::remove("input"); -} + } -event Input::update_finished(name: string, source: string) { - print servers; -} +event Input::update_finished(name: string, source: string) + { + print outfile, servers; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro b/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro index 1606ff6a27..9b8758bf3f 100644 --- a/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro +++ b/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro @@ -1,6 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # # @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro %INPUT +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT # @TEST-EXEC: sleep 2 # @TEST-EXEC: cp input2.log input.log # @TEST-EXEC: sleep 2 @@ -9,7 +11,7 @@ # @TEST-EXEC: cp input4.log input.log # @TEST-EXEC: sleep 2 # @TEST-EXEC: cp input5.log input.log -# @TEST-EXEC: btest-bg-wait -k 3 +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out # @@ -77,31 +79,31 @@ global outfile: file; global try: count; event bro_init() -{ + { try = 0; - outfile = open ("../out"); + outfile = open("../out"); # first read in the old stuff into the table... Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $mode=Input::REREAD, $pred(typ: Input::Event, left: Idx, right: Val) = { - if ( left$i == 1 ) { + if ( left$i == 1 ) right$s = "testmodified"; - } - - if ( left$i == 2 ) { + if ( left$i == 2 ) left$ss = "idxmodified"; - } return T; } ]); -} + } -event Input::update_finished(name: string, source: string) { +event Input::update_finished(name: string, source: string) + { try = try + 1; print outfile, fmt("Update_finished for %s, try %d", name, try); print outfile, servers; - if ( try == 5 ) { - close (outfile); + if ( try == 5 ) + { + close(outfile); Input::remove("input"); + terminate(); + } } -} diff --git a/testing/btest/scripts/base/frameworks/input/raw.bro b/testing/btest/scripts/base/frameworks/input/raw.bro index 8ec6c12a78..cb19213173 100644 --- a/testing/btest/scripts/base/frameworks/input/raw.bro +++ b/testing/btest/scripts/base/frameworks/input/raw.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -13,6 +16,10 @@ sdf 3rw43wRRERLlL#RWERERERE. @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; +global try: count; module A; @@ -20,14 +27,23 @@ type Val: record { s: string; }; -event line(description: Input::EventDescription, tpe: Input::Event, s: string) { - print description; - print tpe; - print s; -} +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description; + print outfile, tpe; + print outfile, s; + try = try + 1; + if ( try == 8 ) + { + close(outfile); + terminate(); + } + } event bro_init() -{ - Input::add_event([$source="input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); Input::remove("input"); -} + } diff --git a/testing/btest/scripts/base/frameworks/input/repeat.bro b/testing/btest/scripts/base/frameworks/input/repeat.bro index 58ce9a1675..a5a914932c 100644 --- a/testing/btest/scripts/base/frameworks/input/repeat.bro +++ b/testing/btest/scripts/base/frameworks/input/repeat.bro @@ -1,6 +1,9 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out @TEST-START-FILE input.log #separator \x09 @@ -10,6 +13,11 @@ 1 T @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; +global try: count; + redef InputAscii::empty_field = "EMPTY"; module A; @@ -27,15 +35,25 @@ global destination: table[int] of Val = table(); const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; event bro_init() -{ - for ( i in one_to_32 ) { - Input::add_table([$source="input.log", $name=fmt("input%d", i), $idx=Idx, $val=Val, $destination=destination, $want_record=F]); + { + try = 0; + outfile = open("../out"); + for ( i in one_to_32 ) + { + Input::add_table([$source="../input.log", $name=fmt("input%d", i), $idx=Idx, $val=Val, $destination=destination, $want_record=F]); Input::remove(fmt("input%d", i)); + } } -} -event Input::update_finished(name: string, source: string) { - print name; - print source; - print destination; -} +event Input::update_finished(name: string, source: string) + { + print outfile, name; + print outfile, source; + print outfile, destination; + try = try + 1; + if ( try == 32 ) + { + close(outfile); + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro index f33b060fe0..2db58fc6b0 100644 --- a/testing/btest/scripts/base/frameworks/input/reread.bro +++ b/testing/btest/scripts/base/frameworks/input/reread.bro @@ -1,6 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # # @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro %INPUT +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT # @TEST-EXEC: sleep 2 # @TEST-EXEC: cp input2.log input.log # @TEST-EXEC: sleep 2 @@ -9,7 +11,7 @@ # @TEST-EXEC: cp input4.log input.log # @TEST-EXEC: sleep 2 # @TEST-EXEC: cp input5.log input.log -# @TEST-EXEC: btest-bg-wait -k 2 +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input1.log @@ -56,6 +58,7 @@ F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} @TEST-END-FILE +@load base/protocols/ssh @load frameworks/communication/listen redef InputAscii::empty_field = "EMPTY"; @@ -90,7 +93,8 @@ global outfile: file; global try: count; -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { print outfile, "============EVENT============"; print outfile, "Description"; print outfile, description; @@ -100,11 +104,11 @@ event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, r print outfile, left; print outfile, "Right"; print outfile, right; -} + } event bro_init() -{ - outfile = open ("../out"); + { + outfile = open("../out"); try = 0; # first read in the old stuff into the table... Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, @@ -116,17 +120,20 @@ event bro_init() return T; } ]); -} + } -event Input::update_finished(name: string, source: string) { +event Input::update_finished(name: string, source: string) + { print outfile, "==========SERVERS============"; print outfile, servers; try = try + 1; - if ( try == 5 ) { + if ( try == 5 ) + { print outfile, "done"; close(outfile); Input::remove("input"); + terminate(); + } } -} diff --git a/testing/btest/scripts/base/frameworks/input/rereadraw.bro b/testing/btest/scripts/base/frameworks/input/rereadraw.bro index 33361ad27e..1051351c2b 100644 --- a/testing/btest/scripts/base/frameworks/input/rereadraw.bro +++ b/testing/btest/scripts/base/frameworks/input/rereadraw.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -13,6 +16,10 @@ sdf 3rw43wRRERLlL#RWERERERE. @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; +global try: count; module A; @@ -20,15 +27,24 @@ type Val: record { s: string; }; -event line(description: Input::EventDescription, tpe: Input::Event, s: string) { - print description; - print tpe; - print s; -} +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description; + print outfile, tpe; + print outfile, s; + try = try + 1; + if ( try == 16 ) + { + close(outfile); + terminate(); + } + } event bro_init() -{ - Input::add_event([$source="input.log", $reader=Input::READER_RAW, $mode=Input::REREAD, $name="input", $fields=Val, $ev=line]); + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::REREAD, $name="input", $fields=Val, $ev=line]); Input::force_update("input"); Input::remove("input"); -} + } diff --git a/testing/btest/scripts/base/frameworks/input/stream.bro b/testing/btest/scripts/base/frameworks/input/stream.bro index 571a2273c1..1ecd8a2eb0 100644 --- a/testing/btest/scripts/base/frameworks/input/stream.bro +++ b/testing/btest/scripts/base/frameworks/input/stream.bro @@ -1,11 +1,13 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # # @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro %INPUT +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT # @TEST-EXEC: sleep 3 # @TEST-EXEC: cat input2.log >> input.log # @TEST-EXEC: sleep 3 # @TEST-EXEC: cat input3.log >> input.log -# @TEST-EXEC: btest-bg-wait -k 3 +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input1.log @@ -22,6 +24,7 @@ T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} @TEST-END-FILE +@load base/protocols/ssh @load frameworks/communication/listen redef InputAscii::empty_field = "EMPTY"; @@ -56,7 +59,8 @@ global outfile: file; global try: count; -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { print outfile, "============EVENT============"; print outfile, tpe; print outfile, left; @@ -66,18 +70,19 @@ event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, r try = try + 1; - if ( try == 3 ) { + if ( try == 3 ) + { print outfile, "done"; close(outfile); Input::remove("input"); + terminate(); + } } -} event bro_init() -{ - outfile = open ("../out"); + { + outfile = open("../out"); try = 0; # first read in the old stuff into the table... Input::add_table([$source="../input.log", $mode=Input::STREAM, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); -} - + } diff --git a/testing/btest/scripts/base/frameworks/input/streamraw.bro b/testing/btest/scripts/base/frameworks/input/streamraw.bro index cc0afd5ae8..a6aba88c5f 100644 --- a/testing/btest/scripts/base/frameworks/input/streamraw.bro +++ b/testing/btest/scripts/base/frameworks/input/streamraw.bro @@ -1,3 +1,5 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # # @TEST-EXEC: cp input1.log input.log # @TEST-EXEC: btest-bg-run bro bro -b %INPUT @@ -5,7 +7,7 @@ # @TEST-EXEC: cat input2.log >> input.log # @TEST-EXEC: sleep 3 # @TEST-EXEC: cat input3.log >> input.log -# @TEST-EXEC: btest-bg-wait -k 3 +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input1.log @@ -36,21 +38,25 @@ type Val: record { global try: count; global outfile: file; -event line(description: Input::EventDescription, tpe: Input::Event, s: string) { +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { print outfile, description; print outfile, tpe; print outfile, s; - - if ( try == 3 ) { + + try = try + 1; + if ( try == 8 ) + { print outfile, "done"; close(outfile); Input::remove("input"); + terminate(); + } } -} event bro_init() -{ - outfile = open ("../out"); + { + outfile = open("../out"); try = 0; Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); -} + } diff --git a/testing/btest/scripts/base/frameworks/input/tableevent.bro b/testing/btest/scripts/base/frameworks/input/tableevent.bro index e40485dd12..723e519237 100644 --- a/testing/btest/scripts/base/frameworks/input/tableevent.bro +++ b/testing/btest/scripts/base/frameworks/input/tableevent.bro @@ -1,5 +1,8 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 # @TEST-EXEC: btest-diff out @TEST-START-FILE input.log @@ -16,6 +19,11 @@ 7 T @TEST-END-FILE +@load frameworks/communication/listen + +global outfile: file; +global try: count; + redef InputAscii::empty_field = "EMPTY"; type Idx: record { @@ -28,15 +36,24 @@ type Val: record { global destination: table[int] of Val = table(); -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) { - print description; - print tpe; - print left; - print right; -} +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) + { + print outfile, description; + print outfile, tpe; + print outfile, left; + print outfile, right; + try = try + 1; + if ( try == 7 ) + { + close(outfile); + terminate(); + } + } event bro_init() -{ - Input::add_table([$source="input.log", $name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); + { + try = 0; + outfile = open("../out"); + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F,$ev=line]); Input::remove("input"); -} + } diff --git a/testing/btest/scripts/base/frameworks/input/twotables.bro b/testing/btest/scripts/base/frameworks/input/twotables.bro index 1413275e63..f404416049 100644 --- a/testing/btest/scripts/base/frameworks/input/twotables.bro +++ b/testing/btest/scripts/base/frameworks/input/twotables.bro @@ -1,10 +1,15 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm # # @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro %INPUT -# @TEST-EXEC: sleep 2 +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: sleep 5 # @TEST-EXEC: cp input3.log input.log -# @TEST-EXEC: btest-bg-wait -k 2 -# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-diff event.out +# @TEST-EXEC: btest-diff pred1.out +# @TEST-EXEC: btest-diff pred2.out +# @TEST-EXEC: btest-diff fin.out @TEST-START-FILE input1.log #separator \x09 @@ -28,6 +33,7 @@ T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} @TEST-END-FILE +@load base/protocols/ssh @load frameworks/communication/listen redef InputAscii::empty_field = "EMPTY"; @@ -58,59 +64,71 @@ type Val: record { global servers: table[int] of Val = table(); -global outfile: file; +global event_out: file; +global pred1_out: file; +global pred2_out: file; +global fin_out: file; global try: count; -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) { - print outfile, "============EVENT============"; -# print outfile, "Description"; -# print outfile, description; -# print outfile, "Type"; -# print outfile, tpe; -# print outfile, "Left"; -# print outfile, left; -# print outfile, "Right"; -# print outfile, right; -} +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print event_out, "============EVENT============"; +# print event_out, "Description"; +# print event_out, description; +# print event_out, "Type"; +# print event_out, tpe; +# print event_out, "Left"; +# print event_out, left; +# print event_out, "Right"; +# print event_out, right; + } event bro_init() -{ - outfile = open ("../out"); + { + event_out = open ("../event.out"); + pred1_out = open ("../pred1.out"); + pred2_out = open ("../pred2.out"); + fin_out = open ("../fin.out"); try = 0; # first read in the old stuff into the table... Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, $pred(typ: Input::Event, left: Idx, right: Val) = { - print outfile, "============PREDICATE============"; - print outfile, typ; - print outfile, left; - print outfile, right; + print pred1_out, "============PREDICATE============"; + print pred1_out, typ; + print pred1_out, left; + print pred1_out, right; return T; } ]); Input::add_table([$source="../input2.log", $mode=Input::REREAD, $name="ssh2", $idx=Idx, $val=Val, $destination=servers, $ev=line, $pred(typ: Input::Event, left: Idx, right: Val) = { - print outfile, "============PREDICATE 2============"; - print outfile, typ; - print outfile, left; - print outfile, right; + print pred2_out, "============PREDICATE 2============"; + print pred2_out, typ; + print pred2_out, left; + print pred2_out, right; return T; } ]); -} + } -event Input::update_finished(name: string, source: string) { - print outfile, "==========SERVERS============"; - print outfile, servers; +event Input::update_finished(name: string, source: string) + { + print fin_out, "==========SERVERS============"; + #print fin_out, servers; try = try + 1; - if ( try == 3 ) { - print outfile, "done"; - print outfile, servers; - close(outfile); + if ( try == 3 ) + { + print fin_out, "done"; + print fin_out, servers; + close(event_out); + close(pred1_out); + close(pred2_out); + close(fin_out); Input::remove("input"); Input::remove("input2"); terminate(); + } } -} From 34ead91f992cbc40dcb81053343e2ef60a3aff61 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 29 Jun 2012 16:24:31 -0500 Subject: [PATCH 444/651] Fix inconsistencies in random number generation. The srand()/rand() interface was being intermixed with the srandom()/random() one. The later is now used throughout. Changed the srand() and rand() BIFs to work deterministically if Bro was given a seed file (addresses #825). They also now wrap the system's srandom() and random() instead of srand() and rand() as per the above. --- src/bro.bif | 12 ++++++------ src/input/readers/Benchmark.cc | 10 +++++----- src/util.cc | 14 +++++++++++--- src/util.h | 4 ++++ testing/btest/Baseline/bifs.rand/out | 12 ++++++------ testing/btest/Baseline/bifs.rand/out.2 | 6 ++++++ testing/btest/bifs/rand.bro | 9 +++++++-- 7 files changed, 45 insertions(+), 22 deletions(-) create mode 100644 testing/btest/Baseline/bifs.rand/out.2 diff --git a/src/bro.bif b/src/bro.bif index 1feccb8639..a2c6ecd7c8 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -972,12 +972,12 @@ function sha256_hash_finish%(index: any%): string ## ## .. note:: ## -## This function is a wrapper about the function ``rand`` provided by -## the OS. +## This function is a wrapper about the function ``random`` +## provided by the OS. function rand%(max: count%): count %{ int result; - result = bro_uint_t(double(max) * double(rand()) / (RAND_MAX + 1.0)); + result = bro_uint_t(double(max) * double(bro_random()) / (RAND_MAX + 1.0)); return new Val(result, TYPE_COUNT); %} @@ -989,11 +989,11 @@ function rand%(max: count%): count ## ## .. note:: ## -## This function is a wrapper about the function ``srand`` provided -## by the OS. +## This function is a wrapper about the function ``srandom`` +## provided by the OS. function srand%(seed: count%): any %{ - srand(seed); + bro_srandom(seed); return 0; %} diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 5e4ef090f7..a55a69dd60 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -59,7 +59,7 @@ string Benchmark::RandomString(const int len) "abcdefghijklmnopqrstuvwxyz"; for (int i = 0; i < len; ++i) - s[i] = values[rand() / (RAND_MAX / sizeof(values))]; + s[i] = values[random() / (RAND_MAX / sizeof(values))]; return s; } @@ -134,7 +134,7 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) break; case TYPE_INT: - val->val.int_val = rand(); + val->val.int_val = random(); break; case TYPE_TIME: @@ -148,11 +148,11 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) case TYPE_COUNT: case TYPE_COUNTER: - val->val.uint_val = rand(); + val->val.uint_val = random(); break; case TYPE_PORT: - val->val.port_val.port = rand() / (RAND_MAX / 60000); + val->val.port_val.port = random() / (RAND_MAX / 60000); val->val.port_val.proto = TRANSPORT_UNKNOWN; break; @@ -175,7 +175,7 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) // Then - common stuff { // how many entries do we have... - unsigned int length = rand() / (RAND_MAX / 15); + unsigned int length = random() / (RAND_MAX / 15); Value** lvals = new Value* [length]; diff --git a/src/util.cc b/src/util.cc index 798be400d1..85aa18ef0d 100644 --- a/src/util.cc +++ b/src/util.cc @@ -633,12 +633,20 @@ static bool write_random_seeds(const char* write_file, uint32 seed, static bool bro_rand_determistic = false; static unsigned int bro_rand_state = 0; -static void bro_srand(unsigned int seed, bool deterministic) +static void bro_srandom(unsigned int seed, bool deterministic) { bro_rand_state = seed; bro_rand_determistic = deterministic; - srand(seed); + srandom(seed); + } + +void bro_srandom(unsigned int seed) + { + if ( bro_rand_determistic ) + bro_rand_state = seed; + else + srandom(seed); } void init_random_seed(uint32 seed, const char* read_file, const char* write_file) @@ -705,7 +713,7 @@ void init_random_seed(uint32 seed, const char* read_file, const char* write_file seeds_done = true; } - bro_srand(seed, seeds_done); + bro_srandom(seed, seeds_done); if ( ! hmac_key_set ) { diff --git a/src/util.h b/src/util.h index 6b237edfd8..9ab8a58760 100644 --- a/src/util.h +++ b/src/util.h @@ -159,6 +159,10 @@ extern bool have_random_seed(); // predictable PRNG. long int bro_random(); +// Calls the system srandom() function with the given seed if not running +// in deterministic mode, else it updates the state of the deterministic PRNG +void bro_srandom(unsigned int seed); + extern uint64 rand64bit(); // Each event source that may generate events gets an internally unique ID. diff --git a/testing/btest/Baseline/bifs.rand/out b/testing/btest/Baseline/bifs.rand/out index 367833f80a..a016eb6f15 100644 --- a/testing/btest/Baseline/bifs.rand/out +++ b/testing/btest/Baseline/bifs.rand/out @@ -1,6 +1,6 @@ -185 -236 -805 -47 -996 -498 +985 +474 +738 +4 +634 +473 diff --git a/testing/btest/Baseline/bifs.rand/out.2 b/testing/btest/Baseline/bifs.rand/out.2 new file mode 100644 index 0000000000..2cd43d985c --- /dev/null +++ b/testing/btest/Baseline/bifs.rand/out.2 @@ -0,0 +1,6 @@ +985 +474 +738 +974 +371 +638 diff --git a/testing/btest/bifs/rand.bro b/testing/btest/bifs/rand.bro index 229645944e..caf3f16031 100644 --- a/testing/btest/bifs/rand.bro +++ b/testing/btest/bifs/rand.bro @@ -1,6 +1,10 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: bro -b %INPUT do_seed=F >out.2 # @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff out.2 + +const do_seed = T &redef; event bro_init() { @@ -12,7 +16,8 @@ event bro_init() print b; print c; - srand(575); + if ( do_seed ) + srand(575); local d = rand(1000); local e = rand(1000); From 3559a39d59b43b040d5fb6cd5a6c081990e902c0 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 2 Jul 2012 10:03:28 -0700 Subject: [PATCH 445/651] introduce reader-info struct analogous to writer-info. All tests still pass. --- src/input/Manager.cc | 18 +++++---- src/input/ReaderBackend.cc | 53 +++++++++++++++++++++++--- src/input/ReaderBackend.h | 68 +++++++++++++++++++++++++--------- src/input/ReaderFrontend.cc | 25 +++++++------ src/input/ReaderFrontend.h | 24 +++++++++--- src/input/readers/Ascii.cc | 22 +++++------ src/input/readers/Ascii.h | 2 +- src/input/readers/Benchmark.cc | 6 +-- src/input/readers/Benchmark.h | 2 +- src/input/readers/Raw.cc | 10 ++--- src/input/readers/Raw.h | 2 +- 11 files changed, 162 insertions(+), 70 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index f9979fbe6e..1f5f17bba8 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -71,7 +71,7 @@ declare(PDict, InputHash); class Manager::Stream { public: string name; - string source; + ReaderBackend::ReaderInfo info; bool removed; ReaderMode mode; @@ -81,7 +81,6 @@ public: EnumVal* type; ReaderFrontend* reader; TableVal* config; - std::map configmap; RecordVal* description; @@ -330,8 +329,11 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) info->reader = reader_obj; info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault info->name = name; - info->source = source; info->config = config->AsTableVal(); // ref'd by LookupWithDefault + + ReaderBackend::ReaderInfo readerinfo; + readerinfo.source = source; + Ref(description); info->description = description; @@ -345,13 +347,15 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) ListVal* index = info->config->RecoverIndex(k); string key = index->Index(0)->AsString()->CheckString(); string value = v->Value()->AsString()->CheckString(); - info->configmap.insert(std::make_pair(key, value)); + info->info.config.insert(std::make_pair(key, value)); Unref(index); delete k; } } + info->info = readerinfo; + DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", name.c_str()); @@ -477,7 +481,7 @@ bool Manager::CreateEventStream(RecordVal* fval) assert(stream->reader); - stream->reader->Init(stream->source, stream->mode, stream->num_fields, logf, stream->configmap ); + stream->reader->Init(stream->info, stream->mode, stream->num_fields, logf ); readers[stream->reader] = stream; @@ -654,7 +658,7 @@ bool Manager::CreateTableStream(RecordVal* fval) assert(stream->reader); - stream->reader->Init(stream->source, stream->mode, fieldsV.size(), fields, stream->configmap ); + stream->reader->Init(stream->info, stream->mode, fieldsV.size(), fields ); readers[stream->reader] = stream; @@ -1234,7 +1238,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) #endif // Send event that the current update is indeed finished. - SendEvent(update_finished, 2, new StringVal(i->name.c_str()), new StringVal(i->source.c_str())); + SendEvent(update_finished, 2, new StringVal(i->name.c_str()), new StringVal(i->info.source.c_str())); } void Manager::Put(ReaderFrontend* reader, Value* *vals) diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 276b5d25b0..6ed70bced0 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -140,6 +140,49 @@ public: } }; +using namespace logging; + +bool ReaderBackend::ReaderInfo::Read(SerializationFormat* fmt) + { + int size; + + if ( ! (fmt->Read(&source, "source") && + fmt->Read(&size, "config_size")) ) + return false; + + config.clear(); + + while ( size ) + { + string value; + string key; + + if ( ! (fmt->Read(&value, "config-value") && fmt->Read(&value, "config-key")) ) + return false; + + config.insert(std::make_pair(value, key)); + } + + return true; + } + + +bool ReaderBackend::ReaderInfo::Write(SerializationFormat* fmt) const + { + int size = config.size(); + + if ( ! (fmt->Write(source, "source") && + fmt->Write(size, "config_size")) ) + return false; + + for ( config_map::const_iterator i = config.begin(); i != config.end(); ++i ) + { + if ( ! (fmt->Write(i->first, "config-value") && fmt->Write(i->second, "config-key")) ) + return false; + } + + return true; + } ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { @@ -183,18 +226,18 @@ void ReaderBackend::SendEntry(Value* *vals) SendOut(new SendEntryMessage(frontend, vals)); } -bool ReaderBackend::Init(string arg_source, ReaderMode arg_mode, const int arg_num_fields, - const threading::Field* const* arg_fields, const std::map config) +bool ReaderBackend::Init(const ReaderInfo& arg_info, ReaderMode arg_mode, const int arg_num_fields, + const threading::Field* const* arg_fields) { - source = arg_source; + info = arg_info; mode = arg_mode; num_fields = arg_num_fields; fields = arg_fields; - SetName("InputReader/"+source); + SetName("InputReader/"+info.source); // disable if DoInit returns error. - int success = DoInit(arg_source, mode, arg_num_fields, arg_fields, config); + int success = DoInit(arg_info, mode, arg_num_fields, arg_fields); if ( ! success ) { diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index c23c68bf7e..d7d022d5fa 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -7,6 +7,8 @@ #include "threading/SerialTypes.h" #include "threading/MsgThread.h" +class RemoteSerializer; + namespace input { @@ -65,6 +67,35 @@ public: */ virtual ~ReaderBackend(); + /** + * A struct passing information to the reader at initialization time. + */ + struct ReaderInfo + { + typedef std::map config_map; + + /** + * A string left to the interpretation of the reader + * implementation; it corresponds to the value configured on + * the script-level for the logging filter. + */ + string source; + + /** + * A map of key/value pairs corresponding to the relevant + * filter's "config" table. + */ + config_map config; + + private: + friend class ::RemoteSerializer; + + // Note, these need to be adapted when changing the struct's + // fields. They serialize/deserialize the struct. + bool Read(SerializationFormat* fmt); + bool Write(SerializationFormat* fmt) const; + }; + /** * One-time initialization of the reader to define the input source. * @@ -84,7 +115,7 @@ public: * * @return False if an error occured. */ - bool Init(string source, ReaderMode mode, int num_fields, const threading::Field* const* fields, std::map config); + bool Init(const ReaderInfo& info, ReaderMode mode, int num_fields, const threading::Field* const* fields); /** * Finishes reading from this input stream in a regular fashion. Must @@ -112,6 +143,22 @@ public: */ void DisableFrontend(); + /** + * Returns the log fields as passed into the constructor. + */ + const threading::Field* const * Fields() const { return fields; } + + /** + * Returns the additional reader information into the constructor. + */ + const ReaderInfo& Info() const { return info; } + + /** + * Returns the number of log fields as passed into the constructor. + */ + int NumFields() const { return num_fields; } + + protected: // Methods that have to be overwritten by the individual readers @@ -133,7 +180,7 @@ protected: * provides accessor methods to get them later, and they are passed * in here only for convinience. */ - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config) = 0; + virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields) = 0; /** * Reader-specific method implementing input finalization at @@ -162,26 +209,11 @@ protected: */ virtual bool DoUpdate() = 0; - /** - * Returns the input source as passed into Init()/. - */ - const string Source() const { return source; } - /** * Returns the reader mode as passed into Init(). */ const ReaderMode Mode() const { return mode; } - /** - * Returns the number of log fields as passed into Init(). - */ - unsigned int NumFields() const { return num_fields; } - - /** - * Returns the log fields as passed into Init(). - */ - const threading::Field* const * Fields() const { return fields; } - /** * Method allowing a reader to send a specified Bro event. Vals must * match the values expected by the bro event. @@ -282,7 +314,7 @@ private: // from this class, it's running in a different thread! ReaderFrontend* frontend; - string source; + ReaderInfo info; ReaderMode mode; unsigned int num_fields; const threading::Field* const * fields; // raw mapping diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index ec1630cd88..f92a8ec80c 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -11,22 +11,21 @@ namespace input { class InitMessage : public threading::InputMessage { public: - InitMessage(ReaderBackend* backend, const string source, ReaderMode mode, - const int num_fields, const threading::Field* const* fields, const std::map config) + InitMessage(ReaderBackend* backend, const ReaderBackend::ReaderInfo& info, ReaderMode mode, + const int num_fields, const threading::Field* const* fields) : threading::InputMessage("Init", backend), - source(source), mode(mode), num_fields(num_fields), fields(fields), config(config) { } + info(info), mode(mode), num_fields(num_fields), fields(fields) { } virtual bool Process() { - return Object()->Init(source, mode, num_fields, fields, config); + return Object()->Init(info, mode, num_fields, fields); } private: - const string source; + const ReaderBackend::ReaderInfo info; const ReaderMode mode; const int num_fields; const threading::Field* const* fields; - const std::map config; }; class UpdateMessage : public threading::InputMessage @@ -64,8 +63,8 @@ ReaderFrontend::~ReaderFrontend() { } -void ReaderFrontend::Init(string arg_source, ReaderMode mode, const int num_fields, - const threading::Field* const* fields, const std::map config) +void ReaderFrontend::Init(const ReaderBackend::ReaderInfo& arg_info, ReaderMode mode, const int arg_num_fields, + const threading::Field* const* arg_fields) { if ( disabled ) return; @@ -73,10 +72,12 @@ void ReaderFrontend::Init(string arg_source, ReaderMode mode, const int num_fiel if ( initialized ) reporter->InternalError("reader initialize twice"); - source = arg_source; + info = arg_info; + num_fields = arg_num_fields; + fields = arg_fields; initialized = true; - backend->SendIn(new InitMessage(backend, arg_source, mode, num_fields, fields, config)); + backend->SendIn(new InitMessage(backend, info, mode, num_fields, fields)); } void ReaderFrontend::Update() @@ -110,10 +111,10 @@ void ReaderFrontend::Close() string ReaderFrontend::Name() const { - if ( source.size() ) + if ( info.source.size() ) return ty_name; - return ty_name + "/" + source; + return ty_name + "/" + info.source; } } diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 1240831ee6..fadf2cddb5 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -52,7 +52,7 @@ public: * * This method must only be called from the main thread. */ - void Init(string arg_source, ReaderMode mode, const int arg_num_fields, const threading::Field* const* fields, const std::map config); + void Init(const ReaderBackend::ReaderInfo& info, ReaderMode mode, const int arg_num_fields, const threading::Field* const* fields); /** * Force an update of the current input source. Actual action depends @@ -102,13 +102,23 @@ public: */ string Name() const; -protected: - friend class Manager; + /** + * Returns the additional reader information into the constructor. + */ + const ReaderBackend::ReaderInfo& Info() const { return info; } /** - * Returns the source as passed into the constructor. + * Returns the number of log fields as passed into the constructor. */ - const string& Source() const { return source; }; + int NumFields() const { return num_fields; } + + /** + * Returns the log fields as passed into the constructor. + */ + const threading::Field* const * Fields() const { return fields; } + +protected: + friend class Manager; /** * Returns the name of the backend's type. @@ -117,7 +127,9 @@ protected: private: ReaderBackend* backend; // The backend we have instanatiated. - string source; + ReaderBackend::ReaderInfo info; // Meta information as passed to Init(). + const threading::Field* const* fields; // The log fields. + int num_fields; // Information as passed to init(); string ty_name; // Backend type, set by manager. bool disabled; // True if disabled. bool initialized; // True if initialized. diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 47bbe2a207..9e3ad28f9c 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -83,14 +83,14 @@ void Ascii::DoClose() } } -bool Ascii::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields, const std::map config) +bool Ascii::DoInit(const ReaderInfo& info, ReaderMode mode, int num_fields, const Field* const* fields) { mtime = 0; - file = new ifstream(path.c_str()); + file = new ifstream(info.source.c_str()); if ( ! file->is_open() ) { - Error(Fmt("Init: cannot open %s", path.c_str())); + Error(Fmt("Init: cannot open %s", info.source.c_str())); delete(file); file = 0; return false; @@ -98,7 +98,7 @@ bool Ascii::DoInit(string path, ReaderMode mode, int num_fields, const Field* co if ( ReadHeader(false) == false ) { - Error(Fmt("Init: cannot open %s; headers are incorrect", path.c_str())); + Error(Fmt("Init: cannot open %s; headers are incorrect", info.source.c_str())); file->close(); delete(file); file = 0; @@ -147,7 +147,7 @@ bool Ascii::ReadHeader(bool useCached) //printf("Updating fields from description %s\n", line.c_str()); columnMap.clear(); - for ( unsigned int i = 0; i < NumFields(); i++ ) + for ( int i = 0; i < NumFields(); i++ ) { const Field* field = Fields()[i]; @@ -164,7 +164,7 @@ bool Ascii::ReadHeader(bool useCached) } Error(Fmt("Did not find requested field %s in input data file %s.", - field->name.c_str(), Source().c_str())); + field->name.c_str(), Info().source.c_str())); return false; } @@ -367,9 +367,9 @@ bool Ascii::DoUpdate() { // check if the file has changed struct stat sb; - if ( stat(Source().c_str(), &sb) == -1 ) + if ( stat(Info().source.c_str(), &sb) == -1 ) { - Error(Fmt("Could not get stat for %s", Source().c_str())); + Error(Fmt("Could not get stat for %s", Info().source.c_str())); return false; } @@ -403,10 +403,10 @@ bool Ascii::DoUpdate() file = 0; } - file = new ifstream(Source().c_str()); + file = new ifstream(Info().source.c_str()); if ( ! file->is_open() ) { - Error(Fmt("cannot open %s", Source().c_str())); + Error(Fmt("cannot open %s", Info().source.c_str())); return false; } @@ -490,7 +490,7 @@ bool Ascii::DoUpdate() } //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); - assert ( (unsigned int) fpos == NumFields() ); + assert ( fpos == NumFields() ); if ( Mode() == MODE_STREAM ) Put(fields); diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index c17c5220ed..bb7e7a1ce2 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -38,7 +38,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Ascii(frontend); } protected: - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config); + virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 37888b095f..1b4d39ddf1 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -36,9 +36,9 @@ void Benchmark::DoClose() { } -bool Benchmark::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields, const std::map config) +bool Benchmark::DoInit(const ReaderInfo& info, ReaderMode mode, int num_fields, const Field* const* fields) { - num_lines = atoi(path.c_str()); + num_lines = atoi(info.source.c_str()); if ( autospread != 0.0 ) autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); @@ -80,7 +80,7 @@ bool Benchmark::DoUpdate() for ( int i = 0; i < linestosend; i++ ) { Value** field = new Value*[NumFields()]; - for (unsigned int j = 0; j < NumFields(); j++ ) + for (int j = 0; j < NumFields(); j++ ) field[j] = EntryToVal(Fields()[j]->type, Fields()[j]->subtype); if ( Mode() == MODE_STREAM ) diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index e806b9ca4a..0f940873e4 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -18,7 +18,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Benchmark(frontend); } protected: - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config); + virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 9971aa1aa3..2fb7e92c40 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -100,15 +100,15 @@ bool Raw::CloseInput() return true; } -bool Raw::DoInit(string path, ReaderMode mode, int num_fields, const Field* const* fields, const std::map config) +bool Raw::DoInit(const ReaderInfo& info, ReaderMode mode, int num_fields, const Field* const* fields) { - fname = path; + fname = info.source; mtime = 0; execute = false; firstrun = true; bool result; - if ( path.length() == 0 ) + if ( info.source.length() == 0 ) { Error("No source path provided"); return false; @@ -129,11 +129,11 @@ bool Raw::DoInit(string path, ReaderMode mode, int num_fields, const Field* cons } // do Initialization - char last = path[path.length()-1]; + char last = info.source[info.source.length()-1]; if ( last == '|' ) { execute = true; - fname = path.substr(0, fname.length() - 1); + fname = info.source.substr(0, fname.length() - 1); if ( (mode != MODE_MANUAL) ) { diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index fb6b94410b..7d1351e728 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -22,7 +22,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } protected: - virtual bool DoInit(string path, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields, const std::map config); + virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); From 7f83f157fcfe9c56ffb4a88065add8b303a99875 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 2 Jul 2012 10:41:02 -0700 Subject: [PATCH 446/651] add mode to readerinfo - no need to have it separately everywhere anymore. Disable remoteserialization of readerinfo - in contrast to the logging framework this is not needed here (I think). --- src/input/Manager.cc | 15 +++++++-------- src/input/ReaderBackend.cc | 11 ++++++++--- src/input/ReaderBackend.h | 26 +++++++++++--------------- src/input/ReaderFrontend.cc | 11 +++++------ src/input/ReaderFrontend.h | 2 +- src/input/readers/Ascii.cc | 12 ++++++------ src/input/readers/Ascii.h | 2 +- src/input/readers/Benchmark.cc | 8 ++++---- src/input/readers/Benchmark.h | 2 +- src/input/readers/Raw.cc | 14 +++++++------- src/input/readers/Raw.h | 2 +- 11 files changed, 52 insertions(+), 53 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 1f5f17bba8..985e67302a 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -74,8 +74,6 @@ public: ReaderBackend::ReaderInfo info; bool removed; - ReaderMode mode; - StreamType stream_type; // to distinguish between event and table streams EnumVal* type; @@ -305,19 +303,21 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); Val* config = description->LookupWithDefault(rtype->FieldOffset("config")); + + ReaderBackend::ReaderInfo readerinfo; switch ( mode->InternalInt() ) { case 0: - info->mode = MODE_MANUAL; + readerinfo.mode = MODE_MANUAL; break; case 1: - info->mode = MODE_REREAD; + readerinfo.mode = MODE_REREAD; break; case 2: - info->mode = MODE_STREAM; + readerinfo.mode = MODE_STREAM; break; default: @@ -331,7 +331,6 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) info->name = name; info->config = config->AsTableVal(); // ref'd by LookupWithDefault - ReaderBackend::ReaderInfo readerinfo; readerinfo.source = source; Ref(description); @@ -481,7 +480,7 @@ bool Manager::CreateEventStream(RecordVal* fval) assert(stream->reader); - stream->reader->Init(stream->info, stream->mode, stream->num_fields, logf ); + stream->reader->Init(stream->info, stream->num_fields, logf ); readers[stream->reader] = stream; @@ -658,7 +657,7 @@ bool Manager::CreateTableStream(RecordVal* fval) assert(stream->reader); - stream->reader->Init(stream->info, stream->mode, fieldsV.size(), fields ); + stream->reader->Init(stream->info, fieldsV.size(), fields ); readers[stream->reader] = stream; diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 6ed70bced0..94120100ab 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -142,6 +142,10 @@ public: using namespace logging; +/* + * I don't think the input framework needs remote serialization. If it doesn't, kill this. If it does add ReaderMode. + + bool ReaderBackend::ReaderInfo::Read(SerializationFormat* fmt) { int size; @@ -184,6 +188,8 @@ bool ReaderBackend::ReaderInfo::Write(SerializationFormat* fmt) const return true; } + */ + ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { disabled = true; // disabled will be set correcty in init. @@ -226,18 +232,17 @@ void ReaderBackend::SendEntry(Value* *vals) SendOut(new SendEntryMessage(frontend, vals)); } -bool ReaderBackend::Init(const ReaderInfo& arg_info, ReaderMode arg_mode, const int arg_num_fields, +bool ReaderBackend::Init(const ReaderInfo& arg_info, const int arg_num_fields, const threading::Field* const* arg_fields) { info = arg_info; - mode = arg_mode; num_fields = arg_num_fields; fields = arg_fields; SetName("InputReader/"+info.source); // disable if DoInit returns error. - int success = DoInit(arg_info, mode, arg_num_fields, arg_fields); + int success = DoInit(arg_info, arg_num_fields, arg_fields); if ( ! success ) { diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index d7d022d5fa..fd7ac769f2 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -7,8 +7,6 @@ #include "threading/SerialTypes.h" #include "threading/MsgThread.h" -class RemoteSerializer; - namespace input { @@ -87,6 +85,12 @@ public: */ config_map config; + /** + * The opening mode for the input source. + */ + ReaderMode mode; +/* + * I don't think the input framework needs remote serialization. If it doesn't, kill this. If it does add ReaderMode. private: friend class ::RemoteSerializer; @@ -94,16 +98,14 @@ public: // fields. They serialize/deserialize the struct. bool Read(SerializationFormat* fmt); bool Write(SerializationFormat* fmt) const; + + */ }; /** * One-time initialization of the reader to define the input source. * - * @param source A string left to the interpretation of the - * reader implementation; it corresponds to the value configured on - * the script-level for the input stream. - * - * @param mode The opening mode for the input source. + * @param @param info Meta information for the writer. * * @param num_fields Number of fields contained in \a fields. * @@ -115,7 +117,7 @@ public: * * @return False if an error occured. */ - bool Init(const ReaderInfo& info, ReaderMode mode, int num_fields, const threading::Field* const* fields); + bool Init(const ReaderInfo& info, int num_fields, const threading::Field* const* fields); /** * Finishes reading from this input stream in a regular fashion. Must @@ -180,7 +182,7 @@ protected: * provides accessor methods to get them later, and they are passed * in here only for convinience. */ - virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields) = 0; + virtual bool DoInit(const ReaderInfo& info, int arg_num_fields, const threading::Field* const* fields) = 0; /** * Reader-specific method implementing input finalization at @@ -209,11 +211,6 @@ protected: */ virtual bool DoUpdate() = 0; - /** - * Returns the reader mode as passed into Init(). - */ - const ReaderMode Mode() const { return mode; } - /** * Method allowing a reader to send a specified Bro event. Vals must * match the values expected by the bro event. @@ -315,7 +312,6 @@ private: ReaderFrontend* frontend; ReaderInfo info; - ReaderMode mode; unsigned int num_fields; const threading::Field* const * fields; // raw mapping diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index f92a8ec80c..2c5d522c2f 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -11,19 +11,18 @@ namespace input { class InitMessage : public threading::InputMessage { public: - InitMessage(ReaderBackend* backend, const ReaderBackend::ReaderInfo& info, ReaderMode mode, + InitMessage(ReaderBackend* backend, const ReaderBackend::ReaderInfo& info, const int num_fields, const threading::Field* const* fields) : threading::InputMessage("Init", backend), - info(info), mode(mode), num_fields(num_fields), fields(fields) { } + info(info), num_fields(num_fields), fields(fields) { } virtual bool Process() { - return Object()->Init(info, mode, num_fields, fields); + return Object()->Init(info, num_fields, fields); } private: const ReaderBackend::ReaderInfo info; - const ReaderMode mode; const int num_fields; const threading::Field* const* fields; }; @@ -63,7 +62,7 @@ ReaderFrontend::~ReaderFrontend() { } -void ReaderFrontend::Init(const ReaderBackend::ReaderInfo& arg_info, ReaderMode mode, const int arg_num_fields, +void ReaderFrontend::Init(const ReaderBackend::ReaderInfo& arg_info, const int arg_num_fields, const threading::Field* const* arg_fields) { if ( disabled ) @@ -77,7 +76,7 @@ void ReaderFrontend::Init(const ReaderBackend::ReaderInfo& arg_info, ReaderMode fields = arg_fields; initialized = true; - backend->SendIn(new InitMessage(backend, info, mode, num_fields, fields)); + backend->SendIn(new InitMessage(backend, info, num_fields, fields)); } void ReaderFrontend::Update() diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index fadf2cddb5..35235ee2bc 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -52,7 +52,7 @@ public: * * This method must only be called from the main thread. */ - void Init(const ReaderBackend::ReaderInfo& info, ReaderMode mode, const int arg_num_fields, const threading::Field* const* fields); + void Init(const ReaderBackend::ReaderInfo& info, const int arg_num_fields, const threading::Field* const* fields); /** * Force an update of the current input source. Actual action depends diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 9e3ad28f9c..1731bba872 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -83,7 +83,7 @@ void Ascii::DoClose() } } -bool Ascii::DoInit(const ReaderInfo& info, ReaderMode mode, int num_fields, const Field* const* fields) +bool Ascii::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fields) { mtime = 0; @@ -362,7 +362,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) // read the entire file and send appropriate thingies back to InputMgr bool Ascii::DoUpdate() { - switch ( Mode() ) { + switch ( Info().mode ) { case MODE_REREAD: { // check if the file has changed @@ -389,7 +389,7 @@ bool Ascii::DoUpdate() // - this is not that bad) if ( file && file->is_open() ) { - if ( Mode() == MODE_STREAM ) + if ( Info().mode == MODE_STREAM ) { file->clear(); // remove end of file evil bits if ( !ReadHeader(true) ) @@ -492,13 +492,13 @@ bool Ascii::DoUpdate() //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); assert ( fpos == NumFields() ); - if ( Mode() == MODE_STREAM ) + if ( Info().mode == MODE_STREAM ) Put(fields); else SendEntry(fields); } - if ( Mode () != MODE_STREAM ) + if ( Info().mode != MODE_STREAM ) EndCurrentSend(); return true; @@ -508,7 +508,7 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - switch ( Mode() ) { + switch ( Info().mode ) { case MODE_MANUAL: // yay, we do nothing :) break; diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index bb7e7a1ce2..e1506cbe82 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -38,7 +38,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Ascii(frontend); } protected: - virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(const ReaderInfo& info, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 1b4d39ddf1..d8dcb543f4 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -36,7 +36,7 @@ void Benchmark::DoClose() { } -bool Benchmark::DoInit(const ReaderInfo& info, ReaderMode mode, int num_fields, const Field* const* fields) +bool Benchmark::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fields) { num_lines = atoi(info.source.c_str()); @@ -83,7 +83,7 @@ bool Benchmark::DoUpdate() for (int j = 0; j < NumFields(); j++ ) field[j] = EntryToVal(Fields()[j]->type, Fields()[j]->subtype); - if ( Mode() == MODE_STREAM ) + if ( Info().mode == MODE_STREAM ) // do not do tracking, spread out elements over the second that we have... Put(field); else @@ -109,7 +109,7 @@ bool Benchmark::DoUpdate() } - if ( Mode() != MODE_STREAM ) + if ( Info().mode != MODE_STREAM ) EndCurrentSend(); return true; @@ -227,7 +227,7 @@ bool Benchmark::DoHeartbeat(double network_time, double current_time) num_lines += add; heartbeatstarttime = CurrTime(); - switch ( Mode() ) { + switch ( Info().mode ) { case MODE_MANUAL: // yay, we do nothing :) break; diff --git a/src/input/readers/Benchmark.h b/src/input/readers/Benchmark.h index 0f940873e4..bab564b12a 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/Benchmark.h @@ -18,7 +18,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Benchmark(frontend); } protected: - virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(const ReaderInfo& info, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 2fb7e92c40..d4a761a931 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -66,7 +66,7 @@ bool Raw::OpenInput() // This is defined in input/fdstream.h in = new boost::fdistream(fileno(file)); - if ( execute && Mode() == MODE_STREAM ) + if ( execute && Info().mode == MODE_STREAM ) fcntl(fileno(file), F_SETFL, O_NONBLOCK); return true; @@ -100,7 +100,7 @@ bool Raw::CloseInput() return true; } -bool Raw::DoInit(const ReaderInfo& info, ReaderMode mode, int num_fields, const Field* const* fields) +bool Raw::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fields) { fname = info.source; mtime = 0; @@ -135,10 +135,10 @@ bool Raw::DoInit(const ReaderInfo& info, ReaderMode mode, int num_fields, const execute = true; fname = info.source.substr(0, fname.length() - 1); - if ( (mode != MODE_MANUAL) ) + if ( (info.mode != MODE_MANUAL) ) { Error(Fmt("Unsupported read mode %d for source %s in execution mode", - mode, fname.c_str())); + info.mode, fname.c_str())); return false; } @@ -187,7 +187,7 @@ bool Raw::DoUpdate() else { - switch ( Mode() ) { + switch ( Info().mode ) { case MODE_REREAD: { // check if the file has changed @@ -210,7 +210,7 @@ bool Raw::DoUpdate() case MODE_MANUAL: case MODE_STREAM: - if ( Mode() == MODE_STREAM && file != NULL && in != NULL ) + if ( Info().mode == MODE_STREAM && file != NULL && in != NULL ) { //fpurge(file); in->clear(); // remove end of file evil bits @@ -254,7 +254,7 @@ bool Raw::DoHeartbeat(double network_time, double current_time) { ReaderBackend::DoHeartbeat(network_time, current_time); - switch ( Mode() ) { + switch ( Info().mode ) { case MODE_MANUAL: // yay, we do nothing :) break; diff --git a/src/input/readers/Raw.h b/src/input/readers/Raw.h index 7d1351e728..48912b70a7 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/Raw.h @@ -22,7 +22,7 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } protected: - virtual bool DoInit(const ReaderInfo& info, ReaderMode mode, int arg_num_fields, const threading::Field* const* fields); + virtual bool DoInit(const ReaderInfo& info, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); virtual bool DoUpdate(); virtual bool DoHeartbeat(double network_time, double current_time); From f65e3f5b9f55c3009ec81b2b9636074887f551d0 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 2 Jul 2012 11:07:50 -0700 Subject: [PATCH 447/651] fix small bug - now configuration actually is passed. --- src/input/Manager.cc | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 985e67302a..7b356fc05e 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -304,20 +304,18 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); Val* config = description->LookupWithDefault(rtype->FieldOffset("config")); - ReaderBackend::ReaderInfo readerinfo; - switch ( mode->InternalInt() ) { case 0: - readerinfo.mode = MODE_MANUAL; + info->info.mode = MODE_MANUAL; break; case 1: - readerinfo.mode = MODE_REREAD; + info->info.mode = MODE_REREAD; break; case 2: - readerinfo.mode = MODE_STREAM; + info->info.mode = MODE_STREAM; break; default: @@ -331,7 +329,7 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) info->name = name; info->config = config->AsTableVal(); // ref'd by LookupWithDefault - readerinfo.source = source; + info->info.source = source; Ref(description); info->description = description; @@ -353,9 +351,6 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) } - info->info = readerinfo; - - DBG_LOG(DBG_INPUT, "Successfully created new input stream %s", name.c_str()); From ef3da87b3f798b0beceb7212868d4ea28698a462 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 2 Jul 2012 16:02:21 -0700 Subject: [PATCH 448/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index 880f3e48d3..27c6e97619 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 880f3e48d33bb28d17184656f858a4a0e2e1574c +Subproject commit 27c6e97619ddfd4edc987de7c081f92dbfc58148 From ff73f3a0408f9549cd4acb61e8d2a6f641d66937 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 2 Jul 2012 16:11:42 -0700 Subject: [PATCH 449/651] Fixing merge relicts. --- src/input/ReaderFrontend.h | 2 +- src/logging/WriterFrontend.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 4467c3f608..93e416e65b 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -128,7 +128,7 @@ protected: /** * Sets the name of the backend's type. */ - void SetTypeName(const string& name) const { ty_name = name; } + void SetTypeName(const string& name) { ty_name = name; } private: ReaderBackend* backend; // The backend we have instanatiated. diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 9d57892ca3..4f98356a44 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -21,7 +21,7 @@ public: info(info), num_fields(num_fields), fields(fields), frontend_name(frontend_name) { } - virtual bool Process() { return Object()->Init(info, num_fields, fields); } + virtual bool Process() { return Object()->Init(info, num_fields, fields, frontend_name); } private: WriterBackend::WriterInfo info; From e64822f2f9aea83d27bf78233582544f5b7504c7 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 2 Jul 2012 16:12:09 -0700 Subject: [PATCH 450/651] Updating NEWS. --- NEWS | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 4377049813..d9410e1c7c 100644 --- a/NEWS +++ b/NEWS @@ -3,8 +3,9 @@ Release Notes ============= This document summarizes the most important changes in the current Bro -release. For a complete list of changes, see the ``CHANGES`` file. - +release. For a complete list of changes, see the ``CHANGES`` file +(note that submodules, such as BroControl and Broccoli, come with +their own CHANGES.) Bro 2.1 Beta ------------ @@ -62,6 +63,21 @@ New Functionality DataSeries is developed and maintained at HP Labs. See doc/logging-dataseries for more information. +- BroControl now has built-in support for host-based load-balancing + when using either PF_RING, Myricom cards, or individual interfaces. + Instead of adding a separate worker entry in node.cfg for each Bro + worker process on each worker host, it is now possible to just + specify the number of worker processes on each host and BroControl + configures everything correctly (including any neccessary enviroment + variables for the balancers). + + This change adds three new keywords to the node.cfg file (to be used + with worker entries): lb_procs (specifies number of workers on a + host), lb_method (specifies what type of load balancing to use: + pf_ring, myricom, or interfaces), and lb_interfaces (used only with + "lb_method=interfaces" to specify which interfaces to load-balance + on). + Changed Functionality ~~~~~~~~~~~~~~~~~~~~~ From 5ede1418fc9c0623bef73a34ce111843225eaadb Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 2 Jul 2012 16:12:21 -0700 Subject: [PATCH 451/651] Updating baselines. --- .../coverage.bare-load-baseline/canonified_loaded_scripts.log | 1 + .../coverage.default-load-baseline/canonified_loaded_scripts.log | 1 + 2 files changed, 2 insertions(+) diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index a7b369e337..0f12ce4ead 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -20,6 +20,7 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/./postprocessors/./sftp.bro scripts/base/frameworks/logging/./writers/ascii.bro scripts/base/frameworks/logging/./writers/dataseries.bro + scripts/base/frameworks/logging/./writers/none.bro scripts/base/frameworks/input/__load__.bro scripts/base/frameworks/input/./main.bro build/src/base/input.bif.bro diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 9414e9bd41..f1f9791fc3 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -20,6 +20,7 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/./postprocessors/./sftp.bro scripts/base/frameworks/logging/./writers/ascii.bro scripts/base/frameworks/logging/./writers/dataseries.bro + scripts/base/frameworks/logging/./writers/none.bro scripts/base/frameworks/input/__load__.bro scripts/base/frameworks/input/./main.bro build/src/base/input.bif.bro From b3155b7b4bdc163e112709cb3937165e6b62d3d6 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 2 Jul 2012 16:14:24 -0700 Subject: [PATCH 452/651] Moving make target update-doc-sources from top-level Makefile to btest Makefile. --- Makefile | 3 --- doc/scripts/DocSourcesList.cmake | 1 + testing/btest/Makefile | 3 +++ testing/btest/coverage/doc.test | 6 ++---- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 8633c736a4..455fa6ed88 100644 --- a/Makefile +++ b/Makefile @@ -41,9 +41,6 @@ broxygen: configured broxygenclean: configured $(MAKE) -C $(BUILD) $@ -update-doc-sources: - ./doc/scripts/genDocSourcesList.sh ./doc/scripts/DocSourcesList.cmake - dist: @rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz @rm -rf $(VERSION_MIN) $(VERSION_MIN).tgz diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index 650982f9bb..c5eb3d724b 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -42,6 +42,7 @@ rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro) rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro) rest_target(${psd} base/frameworks/logging/writers/ascii.bro) rest_target(${psd} base/frameworks/logging/writers/dataseries.bro) +rest_target(${psd} base/frameworks/logging/writers/none.bro) rest_target(${psd} base/frameworks/metrics/cluster.bro) rest_target(${psd} base/frameworks/metrics/main.bro) rest_target(${psd} base/frameworks/metrics/non-cluster.bro) diff --git a/testing/btest/Makefile b/testing/btest/Makefile index 257146daa0..93ccc8d5ec 100644 --- a/testing/btest/Makefile +++ b/testing/btest/Makefile @@ -21,4 +21,7 @@ cleanup: @rm -f $(DIAG) @rm -f .tmp/script-coverage* +update-doc-sources: + ../../doc/scripts/genDocSourcesList.sh ../../doc/scripts/DocSourcesList.cmake + .PHONY: all btest-verbose brief btest-brief coverage cleanup diff --git a/testing/btest/coverage/doc.test b/testing/btest/coverage/doc.test index d99122575d..074e397d88 100644 --- a/testing/btest/coverage/doc.test +++ b/testing/btest/coverage/doc.test @@ -1,10 +1,8 @@ # This tests that we're generating bro script documentation for all the # available bro scripts. If this fails, then the genDocSources.sh needs # to be run to produce a new DocSourcesList.cmake or genDocSources.sh needs -# to be updated to blacklist undesired scripts. To update, run the -# top-level Makefile: -# -# make update-doc-sources +# to be updated to blacklist undesired scripts. To update, run +# "make update-doc-sources" # # @TEST-EXEC: $DIST/doc/scripts/genDocSourcesList.sh # @TEST-EXEC: cmp $DIST/doc/scripts/DocSourcesList.cmake ./DocSourcesList.cmake From 3fcece44cb79660e602289e6df7b6e6d8f751644 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 2 Jul 2012 16:36:09 -0700 Subject: [PATCH 453/651] Tiny bugfix for returning writer name. --- src/logging/WriterFrontend.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 4f98356a44..21bde0d43c 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -137,7 +137,7 @@ WriterFrontend::~WriterFrontend() string WriterFrontend::Name() const { - if ( info.path.size() ) + if ( ! info.path.size() ) return ty_name; return ty_name + "/" + info.path; From d26a96bd470e8c58c5377ffecb9771519d83b594 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 2 Jul 2012 16:57:16 -0700 Subject: [PATCH 454/651] Bugfix. Using a custom rotate function was broken. --- src/logging/Manager.cc | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 23b6f070a1..8cefd1b2ec 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -993,12 +993,9 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer // return it. return w->second->writer; - WriterFrontend* writer_obj = new WriterFrontend(id, writer, local, remote); - assert(writer_obj); - WriterInfo* winfo = new WriterInfo; winfo->type = writer->Ref()->AsEnumVal(); - winfo->writer = writer_obj; + winfo->writer = 0; winfo->open_time = network_time; winfo->rotation_timer = 0; winfo->interval = 0; @@ -1015,7 +1012,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer { Filter* f = *it; if ( f->writer->AsEnum() == writer->AsEnum() && - f->path == winfo->writer->info.path ) + f->path == info.path ) { found_filter_match = true; winfo->interval = f->interval; @@ -1031,8 +1028,6 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer winfo->interval = id->ID_Val()->AsInterval(); } - InstallRotationTimer(winfo); - stream->writers.insert( Stream::WriterMap::value_type(Stream::WriterPathPair(writer->AsEnum(), info.path), winfo)); @@ -1045,9 +1040,12 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer winfo->info.rotation_interval = winfo->interval; winfo->info.rotation_base = parse_rotate_base_time(base_time); - writer_obj->Init(winfo->info, num_fields, fields); + winfo->writer = new WriterFrontend(id, writer, local, remote); + winfo->writer->Init(winfo->info, num_fields, fields); - return writer_obj; + InstallRotationTimer(winfo); + + return winfo->writer; } void Manager::DeleteVals(int num_fields, threading::Value** vals) From 8dc1e418761ce95964cfb66d6dc6128ce6ce2d90 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 3 Jul 2012 18:20:52 -0500 Subject: [PATCH 455/651] Fix minor typos in dataseries documentation --- doc/logging-dataseries.rst | 18 +++++++++--------- doc/logging.rst | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index 554600f055..139a13f813 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -21,7 +21,7 @@ To use DataSeries, its libraries must be available at compile-time, along with the supporting *Lintel* package. Generally, both are distributed on `HP Labs' web site `_. Currently, however, you need -to use recent developments versions for both packages, which you can +to use recent development versions for both packages, which you can download from github like this:: git clone http://github.com/dataseries/Lintel @@ -76,7 +76,7 @@ tools, which its installation process installs into ``/bin``. For example, to convert a file back into an ASCII representation:: $ ds2txt conn.log - [... We skip a bunch of meta data here ...] + [... We skip a bunch of metadata here ...] ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes 1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 @@ -86,13 +86,13 @@ For example, to convert a file back into an ASCII representation:: 1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 [...] -(``--skip-all`` suppresses the meta data.) +(``--skip-all`` suppresses the metadata.) Note that the ASCII conversion is *not* equivalent to Bro's default output format. You can also switch only individual files over to DataSeries by adding -code like this to your ``local.bro``:: +code like this to your ``local.bro``: .. code:: bro @@ -109,7 +109,7 @@ Bro's DataSeries writer comes with a few tuning options, see Working with DataSeries ======================= -Here are few examples of using DataSeries command line tools to work +Here are a few examples of using DataSeries command line tools to work with the output files. * Printing CSV:: @@ -147,7 +147,7 @@ with the output files. * Calculate some statistics: - Mean/stdev/min/max over a column:: + Mean/stddev/min/max over a column:: $ dsstatgroupby '*' basic duration from conn.ds # Begin DSStatGroupByModule @@ -158,7 +158,7 @@ with the output files. Quantiles of total connection volume:: - > dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds + $ dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds [...] 2159 data points, mean 24616 +- 343295 [0,1.26615e+07] quantiles about every 216 data points: @@ -166,7 +166,7 @@ with the output files. tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262 [...] -The ``man`` pages for these tool show further options, and their +The ``man`` pages for these tools show further options, and their ``-h`` option gives some more information (either can be a bit cryptic unfortunately though). @@ -175,7 +175,7 @@ Deficiencies Due to limitations of the DataSeries format, one cannot inspect its files before they have been fully written. In other words, when using -DataSeries, it's currently it's not possible to inspect the live log +DataSeries, it's currently not possible to inspect the live log files inside the spool directory before they are rotated to their final location. It seems that this could be fixed with some effort, and we will work with DataSeries development team on that if the diff --git a/doc/logging.rst b/doc/logging.rst index 384996c28a..cc6cb1e54d 100644 --- a/doc/logging.rst +++ b/doc/logging.rst @@ -377,7 +377,7 @@ uncommon to need to delete that data before the end of the connection. Other Writers ------------- -Bro support the following output formats other than ASCII: +Bro supports the following output formats other than ASCII: .. toctree:: :maxdepth: 1 From cee78f8f5d7cc2f21e28deb93cc2d7c49d3db58d Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 5 Jul 2012 12:59:19 -0500 Subject: [PATCH 456/651] Fix minor typos in input framework doc Also simplified the opening paragraph, and reformatted input text to fit on 80-column display for better readability. --- doc/input.rst | 308 +++++++++++++++++++++++++++----------------------- 1 file changed, 167 insertions(+), 141 deletions(-) diff --git a/doc/input.rst b/doc/input.rst index 7d62d485b9..6a089c0635 100644 --- a/doc/input.rst +++ b/doc/input.rst @@ -4,19 +4,13 @@ Loading Data into Bro with the Input Framework .. rst-class:: opening - Bro now features a flexible input frameworks that allows users + Bro now features a flexible input framework that allows users to import data into Bro. Data is either read into Bro tables or converted to events which can then be handled by scripts. - -The input framework is merged into the git master and we -will give a short summary on how to use it. -The input framework is automatically compiled and installed -together with Bro. The interface to it is exposed via the -scripting layer. - -This document gives the most common examples. For more complex -scenarios it is worthwhile to take a look at the unit tests in -``testing/btest/scripts/base/frameworks/input/``. + This document gives an overview of how to use the input framework + with some examples. For more complex scenarios it is + worthwhile to take a look at the unit tests in + ``testing/btest/scripts/base/frameworks/input/``. .. contents:: @@ -66,11 +60,12 @@ The two records are defined as: reason: string; }; -ote that the names of the fields in the record definitions have to correspond to -the column names listed in the '#fields' line of the log file, in this case 'ip', -'timestamp', and 'reason'. +Note that the names of the fields in the record definitions have to correspond +to the column names listed in the '#fields' line of the log file, in this +case 'ip', 'timestamp', and 'reason'. -The log file is read into the table with a simple call of the add_table function: +The log file is read into the table with a simple call of the ``add_table`` +function: .. code:: bro @@ -80,7 +75,7 @@ The log file is read into the table with a simple call of the add_table function Input::remove("blacklist"); With these three lines we first create an empty table that should contain the -blacklist data and then instruct the Input framework to open an input stream +blacklist data and then instruct the input framework to open an input stream named ``blacklist`` to read the data into the table. The third line removes the input stream again, because we do not need it any more after the data has been read. @@ -91,20 +86,20 @@ This thread opens the input data file, converts the data into a Bro format and sends it back to the main Bro thread. Because of this, the data is not immediately accessible. Depending on the -size of the data source it might take from a few milliseconds up to a few seconds -until all data is present in the table. Please note that this means that when Bro -is running without an input source or on very short captured files, it might terminate -before the data is present in the system (because Bro already handled all packets -before the import thread finished). +size of the data source it might take from a few milliseconds up to a few +seconds until all data is present in the table. Please note that this means +that when Bro is running without an input source or on very short captured +files, it might terminate before the data is present in the system (because +Bro already handled all packets before the import thread finished). -Subsequent calls to an input source are queued until the previous action has been -completed. Because of this, it is, for example, possible to call ``add_table`` and -``remove`` in two subsequent lines: the ``remove`` action will remain queued until -the first read has been completed. +Subsequent calls to an input source are queued until the previous action has +been completed. Because of this, it is, for example, possible to call +``add_table`` and ``remove`` in two subsequent lines: the ``remove`` action +will remain queued until the first read has been completed. -Once the input framework finishes reading from a data source, it fires the ``update_finished`` -event. Once this event has been received all data from the input file is available -in the table. +Once the input framework finishes reading from a data source, it fires +the ``update_finished`` event. Once this event has been received all data +from the input file is available in the table. .. code:: bro @@ -113,10 +108,10 @@ in the table. print blacklist; } -The table can also already be used while the data is still being read - it just might -not contain all lines in the input file when the event has not yet fired. After it has -been populated it can be used like any other Bro table and blacklist entries easily be -tested: +The table can also already be used while the data is still being read - it +just might not contain all lines in the input file when the event has not +yet fired. After it has been populated it can be used like any other Bro +table and blacklist entries can easily be tested: .. code:: bro @@ -128,13 +123,14 @@ Re-reading and streaming data ----------------------------- For many data sources, like for many blacklists, the source data is continually -changing. For this cases, the Bro input framework supports several ways to +changing. For these cases, the Bro input framework supports several ways to deal with changing data files. -The first, very basic method is an explicit refresh of an input stream. When an input -stream is open, the function ``force_update`` can be called. This will trigger -a complete refresh of the table; any changed elements from the file will be updated. -After the update is finished the ``update_finished`` event will be raised. +The first, very basic method is an explicit refresh of an input stream. When +an input stream is open, the function ``force_update`` can be called. This +will trigger a complete refresh of the table; any changed elements from the +file will be updated. After the update is finished the ``update_finished`` +event will be raised. In our example the call would look like: @@ -142,25 +138,26 @@ In our example the call would look like: Input::force_update("blacklist"); -The input framework also supports two automatic refresh mode. The first mode +The input framework also supports two automatic refresh modes. The first mode continually checks if a file has been changed. If the file has been changed, it -is re-read and the data in the Bro table is updated to reflect the current state. -Each time a change has been detected and all the new data has been read into the -table, the ``update_finished`` event is raised. +is re-read and the data in the Bro table is updated to reflect the current +state. Each time a change has been detected and all the new data has been +read into the table, the ``update_finished`` event is raised. -The second mode is a streaming mode. This mode assumes that the source data file -is an append-only file to which new data is continually appended. Bro continually -checks for new data at the end of the file and will add the new data to the table. -If newer lines in the file have the same index as previous lines, they will overwrite -the values in the output table. -Because of the nature of streaming reads (data is continually added to the table), +The second mode is a streaming mode. This mode assumes that the source data +file is an append-only file to which new data is continually appended. Bro +continually checks for new data at the end of the file and will add the new +data to the table. If newer lines in the file have the same index as previous +lines, they will overwrite the values in the output table. Because of the +nature of streaming reads (data is continually added to the table), the ``update_finished`` event is never raised when using streaming reads. -The reading mode can be selected by setting the ``mode`` option of the add_table call. -Valid values are ``MANUAL`` (the default), ``REREAD`` and ``STREAM``. +The reading mode can be selected by setting the ``mode`` option of the +add_table call. Valid values are ``MANUAL`` (the default), ``REREAD`` +and ``STREAM``. -Hence, when using adding ``$mode=Input::REREAD`` to the previous example, the blacklists -table will always reflect the state of the blacklist input file. +Hence, when adding ``$mode=Input::REREAD`` to the previous example, the +blacklist table will always reflect the state of the blacklist input file. .. code:: bro @@ -169,11 +166,11 @@ table will always reflect the state of the blacklist input file. Receiving change events ----------------------- -When re-reading files, it might be interesting to know exactly which lines in the source -files have changed. +When re-reading files, it might be interesting to know exactly which lines in +the source files have changed. -For this reason, the input framework can raise an event each time when a data item is added to, -removed from or changed in a table. +For this reason, the input framework can raise an event each time when a data +item is added to, removed from or changed in a table. The event definition looks like this: @@ -189,34 +186,42 @@ The event has to be specified in ``$ev`` in the ``add_table`` call: Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD, $ev=entry]); -The ``description`` field of the event contains the arguments that were originally supplied to the add_table call. -Hence, the name of the stream can, for example, be accessed with ``description$name``. ``tpe`` is an enum containing -the type of the change that occurred. +The ``description`` field of the event contains the arguments that were +originally supplied to the add_table call. Hence, the name of the stream can, +for example, be accessed with ``description$name``. ``tpe`` is an enum +containing the type of the change that occurred. -It will contain ``Input::EVENT_NEW``, when a line that was not previously been -present in the table has been added. In this case ``left`` contains the Index of the added table entry and ``right`` contains -the values of the added entry. +If a line that was not previously present in the table has been added, +then ``tpe`` will contain ``Input::EVENT_NEW``. In this case ``left`` contains +the index of the added table entry and ``right`` contains the values of the +added entry. -If a table entry that already was present is altered during the re-reading or streaming read of a file, ``tpe`` will contain -``Input::EVENT_CHANGED``. In this case ``left`` contains the Index of the changed table entry and ``right`` contains the -values of the entry before the change. The reason for this is, that the table already has been updated when the event is -raised. The current value in the table can be ascertained by looking up the current table value. Hence it is possible to compare -the new and the old value of the table. +If a table entry that already was present is altered during the re-reading or +streaming read of a file, ``tpe`` will contain ``Input::EVENT_CHANGED``. In +this case ``left`` contains the index of the changed table entry and ``right`` +contains the values of the entry before the change. The reason for this is +that the table already has been updated when the event is raised. The current +value in the table can be ascertained by looking up the current table value. +Hence it is possible to compare the new and the old values of the table. -``tpe`` contains ``Input::REMOVED``, when a table element is removed because it was no longer present during a re-read. -In this case ``left`` contains the index and ``right`` the values of the removed element. +If a table element is removed because it was no longer present during a +re-read, then ``tpe`` will contain ``Input::REMOVED``. In this case ``left`` +contains the index and ``right`` the values of the removed element. Filtering data during import ---------------------------- -The input framework also allows a user to filter the data during the import. To this end, predicate functions are used. A predicate -function is called before a new element is added/changed/removed from a table. The predicate can either accept or veto -the change by returning true for an accepted change and false for an rejected change. Furthermore, it can alter the data +The input framework also allows a user to filter the data during the import. +To this end, predicate functions are used. A predicate function is called +before a new element is added/changed/removed from a table. The predicate +can either accept or veto the change by returning true for an accepted +change and false for a rejected change. Furthermore, it can alter the data before it is written to the table. -The following example filter will reject to add entries to the table when they were generated over a month ago. It -will accept all changes and all removals of values that are already present in the table. +The following example filter will reject to add entries to the table when +they were generated over a month ago. It will accept all changes and all +removals of values that are already present in the table. .. code:: bro @@ -228,34 +233,43 @@ will accept all changes and all removals of values that are already present in t return ( ( current_time() - right$timestamp ) < (30 day) ); }]); -To change elements while they are being imported, the predicate function can manipulate ``left`` and ``right``. Note -that predicate functions are called before the change is committed to the table. Hence, when a table element is changed ( ``tpe`` -is ``INPUT::EVENT_CHANGED`` ), ``left`` and ``right`` contain the new values, but the destination (``blacklist`` in our example) -still contains the old values. This allows predicate functions to examine the changes between the old and the new version before -deciding if they should be allowed. +To change elements while they are being imported, the predicate function can +manipulate ``left`` and ``right``. Note that predicate functions are called +before the change is committed to the table. Hence, when a table element is +changed (``tpe`` is ``INPUT::EVENT_CHANGED``), ``left`` and ``right`` +contain the new values, but the destination (``blacklist`` in our example) +still contains the old values. This allows predicate functions to examine +the changes between the old and the new version before deciding if they +should be allowed. Different readers ----------------- -The input framework supports different kinds of readers for different kinds of source data files. At the moment, the default -reader reads ASCII files formatted in the Bro log-file-format (tab-separated values). At the moment, Bro comes with two -other readers. The ``RAW`` reader reads a file that is split by a specified record separator (usually newline). The contents -are returned line-by-line as strings; it can, for example, be used to read configuration files and the like and is probably +The input framework supports different kinds of readers for different kinds +of source data files. At the moment, the default reader reads ASCII files +formatted in the Bro log file format (tab-separated values). At the moment, +Bro comes with two other readers. The ``RAW`` reader reads a file that is +split by a specified record separator (usually newline). The contents are +returned line-by-line as strings; it can, for example, be used to read +configuration files and the like and is probably only useful in the event mode and not for reading data to tables. -Another included reader is the ``BENCHMARK`` reader, which is being used to optimize the speed of the input framework. It -can generate arbitrary amounts of semi-random data in all Bro data types supported by the input framework. +Another included reader is the ``BENCHMARK`` reader, which is being used +to optimize the speed of the input framework. It can generate arbitrary +amounts of semi-random data in all Bro data types supported by the input +framework. -In the future, the input framework will get support for new data sources like, for example, different databases. +In the future, the input framework will get support for new data sources +like, for example, different databases. Add_table options ----------------- -This section lists all possible options that can be used for the add_table function and gives -a short explanation of their use. Most of the options already have been discussed in the -previous sections. +This section lists all possible options that can be used for the add_table +function and gives a short explanation of their use. Most of the options +already have been discussed in the previous sections. -The possible fields that can be set for an table stream are: +The possible fields that can be set for a table stream are: ``source`` A mandatory string identifying the source of the data. @@ -266,51 +280,57 @@ The possible fields that can be set for an table stream are: to manipulate it further. ``idx`` - Record type that defines the index of the table + Record type that defines the index of the table. ``val`` - Record type that defines the values of the table + Record type that defines the values of the table. - ``reader`` + ``reader`` The reader used for this stream. Default is ``READER_ASCII``. ``mode`` - The mode in which the stream is opened. Possible values are ``MANUAL``, ``REREAD`` and ``STREAM``. - Default is ``MANUAL``. - ``MANUAL`` means, that the files is not updated after it has been read. Changes to the file will not - be reflected in the data Bro knows. - ``REREAD`` means that the whole file is read again each time a change is found. This should be used for - files that are mapped to a table where individual lines can change. - ``STREAM`` means that the data from the file is streamed. Events / table entries will be generated as new - data is added to the file. + The mode in which the stream is opened. Possible values are + ``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``. + ``MANUAL`` means that the file is not updated after it has + been read. Changes to the file will not be reflected in the + data Bro knows. ``REREAD`` means that the whole file is read + again each time a change is found. This should be used for + files that are mapped to a table where individual lines can + change. ``STREAM`` means that the data from the file is + streamed. Events / table entries will be generated as new + data is appended to the file. ``destination`` - The destination table + The destination table. ``ev`` - Optional event that is raised, when values are added to, changed in or deleted from the table. - Events are passed an Input::Event description as the first argument, the index record as the second argument - and the values as the third argument. + Optional event that is raised, when values are added to, + changed in, or deleted from the table. Events are passed an + Input::Event description as the first argument, the index + record as the second argument and the values as the third + argument. ``pred`` - Optional predicate, that can prevent entries from being added to the table and events from being sent. + Optional predicate, that can prevent entries from being added + to the table and events from being sent. ``want_record`` - Boolean value, that defines if the event wants to receive the fields inside of - a single record value, or individually (default). - This can be used, if ``val`` is a record containing only one type. In this case, - if ``want_record`` is set to false, the table will contain elements of the type + Boolean value, that defines if the event wants to receive the + fields inside of a single record value, or individually + (default). This can be used if ``val`` is a record + containing only one type. In this case, if ``want_record`` is + set to false, the table will contain elements of the type contained in ``val``. -Reading data to events +Reading Data to Events ====================== -The second supported mode of the input framework is reading data to Bro events instead -of reading them to a table using event streams. +The second supported mode of the input framework is reading data to Bro +events instead of reading them to a table using event streams. -Event streams work very similarly to table streams that were already discussed in much -detail. To read the blacklist of the previous example into an event stream, the following -Bro code could be used: +Event streams work very similarly to table streams that were already +discussed in much detail. To read the blacklist of the previous example +into an event stream, the following Bro code could be used: .. code:: bro @@ -329,14 +349,15 @@ Bro code could be used: } -The main difference in the declaration of the event stream is, that an event stream needs no -separate index and value declarations -- instead, all source data types are provided in a single -record definition. +The main difference in the declaration of the event stream is, that an event +stream needs no separate index and value declarations -- instead, all source +data types are provided in a single record definition. -Apart from this, event streams work exactly the same as table streams and support most of the options -that are also supported for table streams. +Apart from this, event streams work exactly the same as table streams and +support most of the options that are also supported for table streams. -The options that can be set for when creating an event stream with ``add_event`` are: +The options that can be set when creating an event stream with +``add_event`` are: ``source`` A mandatory string identifying the source of the data. @@ -347,35 +368,40 @@ The options that can be set for when creating an event stream with ``add_event`` to remove it. ``fields`` - Name of a record type containing the fields, which should be retrieved from - the input stream. + Name of a record type containing the fields, which should be + retrieved from the input stream. ``ev`` - The event which is fired, after a line has been read from the input source. - The first argument that is passed to the event is an Input::Event structure, - followed by the data, either inside of a record (if ``want_record is set``) or as - individual fields. - The Input::Event structure can contain information, if the received line is ``NEW``, has - been ``CHANGED`` or ``DELETED``. Singe the ASCII reader cannot track this information - for event filters, the value is always ``NEW`` at the moment. + The event which is fired, after a line has been read from the + input source. The first argument that is passed to the event + is an Input::Event structure, followed by the data, either + inside of a record (if ``want_record is set``) or as + individual fields. The Input::Event structure can contain + information, if the received line is ``NEW``, has been + ``CHANGED`` or ``DELETED``. Since the ASCII reader cannot + track this information for event filters, the value is + always ``NEW`` at the moment. ``mode`` - The mode in which the stream is opened. Possible values are ``MANUAL``, ``REREAD`` and ``STREAM``. - Default is ``MANUAL``. - ``MANUAL`` means, that the files is not updated after it has been read. Changes to the file will not - be reflected in the data Bro knows. - ``REREAD`` means that the whole file is read again each time a change is found. This should be used for - files that are mapped to a table where individual lines can change. - ``STREAM`` means that the data from the file is streamed. Events / table entries will be generated as new - data is added to the file. + The mode in which the stream is opened. Possible values are + ``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``. + ``MANUAL`` means that the file is not updated after it has + been read. Changes to the file will not be reflected in the + data Bro knows. ``REREAD`` means that the whole file is read + again each time a change is found. This should be used for + files that are mapped to a table where individual lines can + change. ``STREAM`` means that the data from the file is + streamed. Events / table entries will be generated as new + data is appended to the file. ``reader`` The reader used for this stream. Default is ``READER_ASCII``. ``want_record`` - Boolean value, that defines if the event wants to receive the fields inside of - a single record value, or individually (default). If this is set to true, the - event will receive a single record of the type provided in ``fields``. + Boolean value, that defines if the event wants to receive the + fields inside of a single record value, or individually + (default). If this is set to true, the event will receive a + single record of the type provided in ``fields``. From 11bc88e41a3138808f9c0d11341c561a88e7503b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 5 Jul 2012 12:33:57 -0700 Subject: [PATCH 457/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index 27c6e97619..2c0407e1f2 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 27c6e97619ddfd4edc987de7c081f92dbfc58148 +Subproject commit 2c0407e1f2ec2012a615de9fcc1d85d46dc5d176 From 658d1d08043f1eb3daf0911f832b20f4cdf01f4b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 5 Jul 2012 12:57:56 -0700 Subject: [PATCH 458/651] Updating submodule(s). [nomail] --- CHANGES | 4 ++-- VERSION | 2 +- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- aux/btest | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index f513eeeec1..ba39388080 100644 --- a/CHANGES +++ b/CHANGES @@ -1,8 +1,8 @@ -2.0-746 | 2012-07-05 12:28:54 -0700 +2.1-beta | 2012-07-05 12:57:56 -0700 * Fix typos in input framework doc. (Daniel Thayer) - + * Fix typos in DataSeries documentation. (Daniel Thayer) * Bugfix making custom rotate functions work again. (Robin Sommer) diff --git a/VERSION b/VERSION index fa1ad2daa8..0fb956a360 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-746 +2.1-beta diff --git a/aux/binpac b/aux/binpac index 6f43a8115d..4ad8d15b63 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 6f43a8115d8e6483a50957c5d21c5d69270ab3aa +Subproject commit 4ad8d15b6395925c9875c9d2912a6cc3b4918e0a diff --git a/aux/bro-aux b/aux/bro-aux index c6391412e9..c691c01e9c 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit c6391412e902e896836450ab98910309b2ca2d9b +Subproject commit c691c01e9cefae5a79bcd4b0f84ca387c8c587a7 diff --git a/aux/broccoli b/aux/broccoli index f1b0a395ab..d52337aeed 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit f1b0a395ab32388d8375ab72ec263b6029833f96 +Subproject commit d52337aeed44e46c24340b55fc636535aea64e39 diff --git a/aux/broctl b/aux/broctl index 2c0407e1f2..6bfc0bfae0 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 2c0407e1f2ec2012a615de9fcc1d85d46dc5d176 +Subproject commit 6bfc0bfae0406deddf207475582bf7a17f1787af diff --git a/aux/btest b/aux/btest index 5856453712..44441a6c91 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 585645371256e8ec028cabae24c5f4a2108546d2 +Subproject commit 44441a6c912c7c9f8d4771e042306ec5f44e461d From 818c76243faef2e9daf4e9383803fe7ac4aa042e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 5 Jul 2012 13:00:35 -0700 Subject: [PATCH 459/651] Updating submodule(s). [nomail] --- CHANGES | 2 +- aux/broccoli | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index ba39388080..15eb46a893 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ -2.1-beta | 2012-07-05 12:57:56 -0700 +2.1-beta | 2012-07-05 13:00:35 -0700 * Fix typos in input framework doc. (Daniel Thayer) diff --git a/aux/broccoli b/aux/broccoli index d52337aeed..bd9d698f70 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit d52337aeed44e46c24340b55fc636535aea64e39 +Subproject commit bd9d698f708908f7258211b534c91467d486983b From 1b8673f4b2622a2aec73ca148aab88bde834eb3b Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 5 Jul 2012 17:58:44 -0500 Subject: [PATCH 460/651] Remove a non-portable test case --- testing/btest/bifs/system.bro | 5 ----- 1 file changed, 5 deletions(-) diff --git a/testing/btest/bifs/system.bro b/testing/btest/bifs/system.bro index b73aed4d79..ab2642319c 100644 --- a/testing/btest/bifs/system.bro +++ b/testing/btest/bifs/system.bro @@ -7,9 +7,4 @@ event bro_init() local a = system("echo thistest > out"); if ( a != 0 ) exit(1); - - local b = system(""); - if ( b == 0 ) - exit(1); - } From 84e91b8b8d18e310c0f61372a19434c19dfdd709 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Mon, 9 Jul 2012 16:38:05 -0400 Subject: [PATCH 461/651] Bringing elasticsearch branch up to date with master. --- scripts/base/frameworks/logging/__load__.bro | 3 +- .../logging/writers/elasticsearch.bro | 2 +- src/logging.bif | 2 +- src/logging/writers/ElasticSearch.cc | 221 ++++++++++++------ src/logging/writers/ElasticSearch.h | 22 +- 5 files changed, 167 insertions(+), 83 deletions(-) diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.bro index 2c2a6d2f59..b65cb1dea3 100644 --- a/scripts/base/frameworks/logging/__load__.bro +++ b/scripts/base/frameworks/logging/__load__.bro @@ -2,4 +2,5 @@ @load ./postprocessors @load ./writers/ascii @load ./writers/dataseries -@load ./writers/elasticsearch@load ./writers/none +@load ./writers/elasticsearch +@load ./writers/none diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index b262201c85..93c6c98705 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -11,7 +11,7 @@ export { const server_port = 9200 &redef; ## Name of the ES index - const index_name = "bro" &redef; + const index_prefix = "bro" &redef; ## The ES type prefix comes before the name of the related log. ## e.g. prefix = "bro_" would create types of bro_dns, bro_software, etc. diff --git a/src/logging.bif b/src/logging.bif index 23b9378b26..3cdb414d80 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -89,7 +89,7 @@ module LogElasticSearch; const cluster_name: string; const server_host: string; const server_port: count; -const index_name: string; +const index_prefix: string; const type_prefix: string; const max_batch_size: count; const max_batch_interval: interval; diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 75a4e0514f..6d2f8363cc 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -30,8 +30,17 @@ ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) memcpy(cluster_name, BifConst::LogElasticSearch::cluster_name->Bytes(), cluster_name_len); cluster_name[cluster_name_len] = 0; + index_prefix = string((const char*) BifConst::LogElasticSearch::index_prefix->Bytes(), BifConst::LogElasticSearch::index_prefix->Len()); + + es_server = string(Fmt("http://%s:%d", BifConst::LogElasticSearch::server_host->Bytes(), + (int) BifConst::LogElasticSearch::server_port)); + bulk_url = string(Fmt("%s/_bulk", es_server.c_str())); + + http_headers = curl_slist_append(NULL, "Content-Type: text/json; charset=utf-8"); buffer.Clear(); counter = 0; + current_index = string(); + prev_index = string(); last_send = current_time(); curl_handle = HTTPSetup(); @@ -42,67 +51,84 @@ ElasticSearch::~ElasticSearch() delete [] cluster_name; } -bool ElasticSearch::DoInit(string path, int num_fields, const Field* const * fields) +bool ElasticSearch::DoInit(const WriterInfo& info, int num_fields, const threading::Field* const* fields) { - //TODO: Determine what, if anything, needs to be done here. return true; } bool ElasticSearch::DoFlush() { + BatchIndex(); return true; } bool ElasticSearch::DoFinish() { BatchIndex(); + curl_slist_free_all(http_headers); curl_easy_cleanup(curl_handle); return WriterBackend::DoFinish(); } bool ElasticSearch::BatchIndex() { - HTTPSend(); + curl_easy_reset(curl_handle); + curl_easy_setopt(curl_handle, CURLOPT_URL, bulk_url.c_str()); + curl_easy_setopt(curl_handle, CURLOPT_POST, 1); + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)buffer.Len()); + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); + HTTPSend(curl_handle); + buffer.Clear(); counter = 0; last_send = current_time(); + return true; } -bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) +bool ElasticSearch::AddValueToBuffer(ODesc* b, Value* val) { switch ( val->type ) { // ES treats 0 as false and any other value as true so bool types go here. case TYPE_BOOL: case TYPE_INT: - buffer.Add(val->val.int_val); + b->Add(val->val.int_val); break; case TYPE_COUNT: case TYPE_COUNTER: - buffer.Add(val->val.uint_val); + { + // ElasticSearch doesn't seem to support unsigned 64bit ints. + if ( val->val.uint_val >= INT64_MAX ) + { + Error(Fmt("count value too large: %" PRIu64, val->val.uint_val)); + b->AddRaw("null", 4); + } + else + b->Add(val->val.uint_val); break; + } case TYPE_PORT: - buffer.Add(val->val.port_val.port); + b->Add(val->val.port_val.port); break; case TYPE_SUBNET: - buffer.AddRaw("\"", 1); - buffer.Add(Render(val->val.subnet_val)); - buffer.AddRaw("\"", 1); + b->AddRaw("\"", 1); + b->Add(Render(val->val.subnet_val)); + b->AddRaw("\"", 1); break; case TYPE_ADDR: - buffer.AddRaw("\"", 1); - buffer.Add(Render(val->val.addr_val)); - buffer.AddRaw("\"", 1); + b->AddRaw("\"", 1); + b->Add(Render(val->val.addr_val)); + b->AddRaw("\"", 1); break; case TYPE_DOUBLE: case TYPE_INTERVAL: - buffer.Add(val->val.double_val); + b->Add(val->val.double_val); break; case TYPE_TIME: @@ -113,10 +139,10 @@ bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) if ( ts >= INT64_MAX ) { Error(Fmt("time value too large: %" PRIu64, ts)); - buffer.AddRaw("null", 4); + b->AddRaw("null", 4); } else - buffer.Add(ts); + b->Add(ts); break; } @@ -125,51 +151,48 @@ bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) case TYPE_FILE: case TYPE_FUNC: { - buffer.AddRaw("\"", 1); + b->AddRaw("\"", 1); for ( uint i = 0; i < val->val.string_val->size(); ++i ) { char c = val->val.string_val->data()[i]; - // HTML entity encode special characters. + // 2byte Unicode escape special characters. if ( c < 32 || c > 126 || c == '\n' || c == '"' || c == '\'' || c == '\\' || c == '&' ) { static const char hex_chars[] = "0123456789abcdef"; - buffer.AddRaw("\\u00", 4); - buffer.AddRaw(&hex_chars[(c & 0xf0) >> 4], 1); - buffer.AddRaw(&hex_chars[c & 0x0f], 1); - //buffer.AddRaw("&#//", 2); - //buffer.Add((uint8_t) c); - //buffer.AddRaw(";", 1); + b->AddRaw("\\u00", 4); + b->AddRaw(&hex_chars[(c & 0xf0) >> 4], 1); + b->AddRaw(&hex_chars[c & 0x0f], 1); } else - buffer.AddRaw(&c, 1); + b->AddRaw(&c, 1); } - buffer.AddRaw("\"", 1); + b->AddRaw("\"", 1); break; } case TYPE_TABLE: { - buffer.AddRaw("[", 1); + b->AddRaw("[", 1); for ( int j = 0; j < val->val.set_val.size; j++ ) { if ( j > 0 ) - buffer.AddRaw(",", 1); - AddFieldValueToBuffer(val->val.set_val.vals[j], field); + b->AddRaw(",", 1); + AddValueToBuffer(b, val->val.set_val.vals[j]); } - buffer.AddRaw("]", 1); + b->AddRaw("]", 1); break; } case TYPE_VECTOR: { - buffer.AddRaw("[", 1); + b->AddRaw("[", 1); for ( int j = 0; j < val->val.vector_val.size; j++ ) { if ( j > 0 ) - buffer.AddRaw(",", 1); - AddFieldValueToBuffer(val->val.vector_val.vals[j], field); + b->AddRaw(",", 1); + AddValueToBuffer(b, val->val.vector_val.vals[j]); } - buffer.AddRaw("]", 1); + b->AddRaw("]", 1); break; } @@ -179,38 +202,37 @@ bool ElasticSearch::AddFieldValueToBuffer(Value* val, const Field* field) return true; } -bool ElasticSearch::AddFieldToBuffer(Value* val, const Field* field) +bool ElasticSearch::AddFieldToBuffer(ODesc *b, Value* val, const Field* field) { if ( ! val->present ) return false; - buffer.AddRaw("\"", 1); - buffer.Add(field->name); - buffer.AddRaw("\":", 2); - AddFieldValueToBuffer(val, field); + b->AddRaw("\"", 1); + b->Add(field->name); + b->AddRaw("\":", 2); + AddValueToBuffer(b, val); return true; } bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, Value** vals) { + if ( current_index.empty() ) + UpdateIndex(network_time, Info().rotation_interval, Info().rotation_base); + // Our action line looks like: - // {"index":{"_index":"$index_name","_type":"$type_prefix$path"}}\n buffer.AddRaw("{\"index\":{\"_index\":\"", 20); - buffer.AddN((const char*) BifConst::LogElasticSearch::index_name->Bytes(), - BifConst::LogElasticSearch::index_name->Len()); + buffer.Add(current_index); buffer.AddRaw("\",\"_type\":\"", 11); - buffer.AddN((const char*) BifConst::LogElasticSearch::type_prefix->Bytes(), - BifConst::LogElasticSearch::type_prefix->Len()); - buffer.Add(Path()); - buffer.AddRaw("\"}\n", 3); + buffer.Add(Info().path); + buffer.AddRaw("\"}}\n", 4); buffer.AddRaw("{", 1); for ( int i = 0; i < num_fields; i++ ) { if ( i > 0 && buffer.Bytes()[buffer.Len()] != ',' && vals[i]->present ) buffer.AddRaw(",", 1); - AddFieldToBuffer(vals[i], fields[i]); + AddFieldToBuffer(&buffer, vals[i], fields[i]); } buffer.AddRaw("}\n", 2); @@ -221,10 +243,63 @@ bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, return true; } + +bool ElasticSearch::UpdateIndex(double now, double rinterval, double rbase) + { + if ( rinterval == 0 ) + { + // if logs aren't being rotated, don't use a rotation oriented index name. + current_index = index_prefix; + } + else + { + double nr = calc_next_rotate(now, rinterval, rbase); + double interval_beginning = now - (rinterval - nr); + + struct tm tm; + char buf[128]; + time_t teatime = (time_t)interval_beginning; + gmtime_r(&teatime, &tm); + strftime(buf, sizeof(buf), "%Y%m%d%H%M", &tm); + + prev_index = current_index; + current_index = index_prefix + "-" + buf; + } + + //printf("%s - prev:%s current:%s\n", Info().path.c_str(), prev_index.c_str(), current_index.c_str()); + return true; + } + bool ElasticSearch::DoRotate(string rotated_path, double open, double close, bool terminating) { - //TODO: Determine what, if anything, needs to be done here. + // Update the currently used index to the new rotation interval. + UpdateIndex(close, Info().rotation_interval, Info().rotation_base); + + // Only do this stuff if there was a previous index. + if ( ! prev_index.empty() ) + { + // FIXME: I think this section is taking too long and causing the thread to die. + + // Compress the previous index + //curl_easy_reset(curl_handle); + //curl_easy_setopt(curl_handle, CURLOPT_URL, Fmt("%s/%s/_settings", es_server.c_str(), prev_index.c_str())); + //curl_easy_setopt(curl_handle, CURLOPT_CUSTOMREQUEST, "PUT"); + //curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, "{\"index\":{\"store.compress.stored\":\"true\"}}"); + //curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t) 42); + //HTTPSend(curl_handle); + + // Optimize the previous index. + // TODO: make this into variables. + //curl_easy_reset(curl_handle); + //curl_easy_setopt(curl_handle, CURLOPT_URL, Fmt("%s/%s/_optimize?max_num_segments=1&wait_for_merge=false", es_server.c_str(), prev_index.c_str())); + //HTTPSend(curl_handle); + } + + //if ( ! FinishedRotation(current_index, prev_index, open, close, terminating) ) + // { + // Error(Fmt("error rotating %s to %s", prev_index.c_str(), current_index.c_str())); + // } return true; } @@ -237,7 +312,7 @@ bool ElasticSearch::DoSetBuf(bool enabled) bool ElasticSearch::DoHeartbeat(double network_time, double current_time) { - if ( last_send > 0 && + if ( last_send > 0 && buffer.Len() > 0 && current_time-last_send > BifConst::LogElasticSearch::max_batch_interval ) { BatchIndex(); @@ -247,31 +322,15 @@ bool ElasticSearch::DoHeartbeat(double network_time, double current_time) } -// HTTP Functions start here. - CURL* ElasticSearch::HTTPSetup() { - const char *URL = fmt("http://%s:%d/_bulk", BifConst::LogElasticSearch::server_host->CheckString(), - (int) BifConst::LogElasticSearch::server_port);; - CURL* handle; - struct curl_slist *headers=NULL; - - handle = curl_easy_init(); + CURL* handle = curl_easy_init(); if ( ! handle ) - return handle; + { + Error("cURL did not initialize correctly."); + return 0; + } - //sprintf(URL, "http://%s:%d/_bulk", BifConst::LogElasticSearch::server_host->CheckString(), (int) BifConst::LogElasticSearch::server_port); - curl_easy_setopt(handle, CURLOPT_URL, URL); - - headers = curl_slist_append(NULL, "Content-Type: text/json; charset=utf-8"); - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, headers); - - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, &logging::writer::ElasticSearch::HTTPReceive); // This gets called with the result. - curl_easy_setopt(handle, CURLOPT_POST, 1); // All requests are POSTs - - // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. The best (only?) way to disable that is to - // just use HTTP 1.0 - curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); return handle; } @@ -281,14 +340,16 @@ bool ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata) return true; } -bool ElasticSearch::HTTPSend() +bool ElasticSearch::HTTPSend(CURL *handle) { - CURLcode return_code; + curl_easy_setopt(handle, CURLOPT_HTTPHEADER, http_headers); + curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, &logging::writer::ElasticSearch::HTTPReceive); // This gets called with the result. + // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. + // The best (only?) way to disable that is to just use HTTP 1.0 + curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, buffer.Len()); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); + CURLcode return_code = curl_easy_perform(handle); - return_code = curl_easy_perform(curl_handle); switch ( return_code ) { case CURLE_COULDNT_CONNECT: @@ -296,6 +357,16 @@ bool ElasticSearch::HTTPSend() case CURLE_WRITE_ERROR: return false; + case CURLE_OK: + { + uint http_code = 0; + curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &http_code); + if ( http_code != 200 ) + Error(Fmt("Received a non-successful status code back from ElasticSearch server.")); + + return true; + } + default: return true; } diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h index bd1351214b..375845b002 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/ElasticSearch.h @@ -22,8 +22,8 @@ public: protected: // Overidden from WriterBackend. - virtual bool DoInit(string path, int num_fields, - const threading::Field* const * fields); + virtual bool DoInit(const WriterInfo& info, int num_fields, + const threading::Field* const* fields); virtual bool DoWrite(int num_fields, const threading::Field* const* fields, threading::Value** vals); @@ -35,18 +35,22 @@ protected: virtual bool DoHeartbeat(double network_time, double current_time); private: - bool AddFieldToBuffer(threading::Value* val, const threading::Field* field); - bool AddFieldValueToBuffer(threading::Value* val, const threading::Field* field); + bool AddFieldToBuffer(ODesc *b, threading::Value* val, const threading::Field* field); + bool AddValueToBuffer(ODesc *b, threading::Value* val); bool BatchIndex(); + bool SendMappings(); + bool UpdateIndex(double now, double rinterval, double rbase); CURL* HTTPSetup(); bool HTTPReceive(void* ptr, int size, int nmemb, void* userdata); - bool HTTPSend(); + bool HTTPSend(CURL *handle); // Buffers, etc. ODesc buffer; uint64 counter; double last_send; + string current_index; + string prev_index; CURL* curl_handle; @@ -54,6 +58,14 @@ private: char* cluster_name; int cluster_name_len; + string es_server; + string bulk_url; + + struct curl_slist *http_headers; + + string path; + string index_prefix; + uint64 batch_size; }; From 9b70ee8799ec9b52528eb750abfd34bed2278422 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Mon, 9 Jul 2012 16:50:42 -0400 Subject: [PATCH 462/651] Tiny updates. --- doc/logging-elasticsearch.rst | 3 +-- src/logging/writers/ElasticSearch.cc | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index 26b49f3a0b..b6d22cf5fa 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -78,7 +78,7 @@ Bro's ElasticSearch writer comes with a few configuration options:: - server_port: What port to send the data to. Default 9200. -- index_name: ElasticSearch indexes are like databases in a standard DB model. +- index_prefix: ElasticSearch indexes are like databases in a standard DB model. This is the name of the index to which to send the data. Default bro. - type_prefix: ElasticSearch types are like tables in a standard DB model. This is a prefix that gets prepended to Bro log names. Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. Default: none. @@ -92,5 +92,4 @@ Lots. - Perform multicast discovery for server. - Better error detection. -- Dynamic index names. - Better defaults (don't index loaded-plugins, for instance). diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 6d2f8363cc..4461508083 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -266,7 +266,7 @@ bool ElasticSearch::UpdateIndex(double now, double rinterval, double rbase) current_index = index_prefix + "-" + buf; } - //printf("%s - prev:%s current:%s\n", Info().path.c_str(), prev_index.c_str(), current_index.c_str()); + //printf("%s - prev:%s current:%s\n", Info().path.c_str(), prev_index.c_str(), current_index.c_str()); return true; } From c0bbd78ee1c856ea62687fe3f10d867e8dd760c4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 10 Jul 2012 11:15:48 -0500 Subject: [PATCH 463/651] Fix segfault when there's an error/timeout resolving DNS requests. Addresses #846. --- src/DNS_Mgr.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index 9e65d3c9a9..6b0f18f459 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -693,7 +693,7 @@ Val* DNS_Mgr::BuildMappingVal(DNS_Mapping* dm) void DNS_Mgr::AddResult(DNS_Mgr_Request* dr, struct nb_dns_result* r) { struct hostent* h = (r && r->host_errno == 0) ? r->hostent : 0; - u_int32_t ttl = r->ttl; + u_int32_t ttl = (r && r->host_errno == 0) ? r->ttl : 0; DNS_Mapping* new_dm; DNS_Mapping* prev_dm; From 7f4b0b52f8de12466125b0bf2ab44cfc1b4ea77e Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 10 Jul 2012 15:39:05 -0500 Subject: [PATCH 464/651] Fix compiler warnings These changes eliminate 405 of 571 warnings seen on OS X 10.7.4 with clang. --- src/EventHandler.cc | 2 +- src/Func.cc | 2 +- src/RemoteSerializer.h | 4 ++-- src/SMB.cc | 2 +- src/SerialObj.cc | 2 +- src/Sessions.h | 2 +- src/Type.cc | 2 +- src/input/readers/Ascii.cc | 4 ++-- src/logging/Manager.cc | 2 +- src/ssl-analyzer.pac | 4 ++-- src/threading/SerialTypes.h | 4 ++-- 11 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/EventHandler.cc b/src/EventHandler.cc index 2867b63437..5598f93f98 100644 --- a/src/EventHandler.cc +++ b/src/EventHandler.cc @@ -96,7 +96,7 @@ EventHandler* EventHandler::Unserialize(UnserialInfo* info) { char* name; if ( ! UNSERIALIZE_STR(&name, 0) ) - return false; + return 0; EventHandler* h = event_registry->Lookup(name); if ( ! h ) diff --git a/src/Func.cc b/src/Func.cc index 30689d4c26..582de1d9bb 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -100,7 +100,7 @@ Func* Func::Unserialize(UnserialInfo* info) if ( ! (id->HasVal() && id->ID_Val()->Type()->Tag() == TYPE_FUNC) ) { info->s->Error(fmt("ID %s is not a built-in", name)); - return false; + return 0; } Unref(f); diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 1d7feef585..5ff7fff8d6 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -17,8 +17,8 @@ class IncrementalSendTimer; namespace threading { - class Field; - class Value; + struct Field; + struct Value; } // This class handles the communication done in Bro's main loop. diff --git a/src/SMB.cc b/src/SMB.cc index edce2a69b8..a06707328a 100644 --- a/src/SMB.cc +++ b/src/SMB.cc @@ -368,7 +368,7 @@ int SMB_Session::ParseSetupAndx(int is_orig, binpac::SMB::SMB_header const& hdr, // The binpac type depends on the negotiated server settings - // possibly we can just pick the "right" format here, and use that? - if ( hdr.flags2() && 0x0800 ) + if ( hdr.flags2() & 0x0800 ) { binpac::SMB::SMB_setup_andx_ext msg(hdr.unicode()); msg.Parse(body.data(), body.data() + body.length()); diff --git a/src/SerialObj.cc b/src/SerialObj.cc index a8ab969f5e..73cab275c2 100644 --- a/src/SerialObj.cc +++ b/src/SerialObj.cc @@ -163,7 +163,7 @@ SerialObj* SerialObj::Unserialize(UnserialInfo* info, SerialType type) if ( ! result ) { DBG_POP(DBG_SERIAL); - return false; + return 0; } DBG_POP(DBG_SERIAL); diff --git a/src/Sessions.h b/src/Sessions.h index a7d7b1272f..25065012e6 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -18,7 +18,7 @@ struct pcap_pkthdr; class EncapsulationStack; class Connection; -class ConnID; +struct ConnID; class OSFingerprint; class ConnCompressor; diff --git a/src/Type.cc b/src/Type.cc index caba0c9fa0..414c07d3d7 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -910,7 +910,7 @@ Val* RecordType::FieldDefault(int field) const const TypeDecl* td = FieldDecl(field); if ( ! td->attrs ) - return false; + return 0; const Attr* def_attr = td->attrs->FindAttr(ATTR_DEFAULT); diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 1731bba872..dd1e742e5e 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -232,7 +232,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) { Error(Fmt("Field: %s Invalid value for boolean: %s", field.name.c_str(), s.c_str())); - return false; + return 0; } break; @@ -262,7 +262,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) if ( pos == s.npos ) { Error(Fmt("Invalid value for subnet: %s", s.c_str())); - return false; + return 0; } int width = atoi(s.substr(pos+1).c_str()); diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 511fedc984..0fea3d577d 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -983,7 +983,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer if ( ! stream ) // Don't know this stream. - return false; + return 0; Stream::WriterMap::iterator w = stream->writers.find(Stream::WriterPathPair(writer->AsEnum(), info.path)); diff --git a/src/ssl-analyzer.pac b/src/ssl-analyzer.pac index bf9cf1e0ba..bd4b76ee98 100644 --- a/src/ssl-analyzer.pac +++ b/src/ssl-analyzer.pac @@ -295,7 +295,7 @@ refine connection SSL_Conn += { for ( int k = 0; k < num_ext; ++k ) { unsigned char *pBuffer = 0; - uint length = 0; + int length = 0; X509_EXTENSION* ex = X509_get_ext(pTemp, k); if (ex) @@ -303,7 +303,7 @@ refine connection SSL_Conn += { ASN1_STRING *pString = X509_EXTENSION_get_data(ex); length = ASN1_STRING_to_UTF8(&pBuffer, pString); //i2t_ASN1_OBJECT(&pBuffer, length, obj) - // printf("extension length: %u\n", length); + // printf("extension length: %d\n", length); // -1 indicates an error. if ( length < 0 ) continue; diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index 9ce53c7cb1..283d88bf4c 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -2,8 +2,6 @@ #ifndef THREADING_SERIALIZATIONTYPES_H #define THREADING_SERIALIZATIONTYPES_H -using namespace std; - #include #include #include @@ -11,6 +9,8 @@ using namespace std; #include "Type.h" #include "net_util.h" +using namespace std; + class SerializationFormat; namespace threading { From c4b6499d858c5799845f9f312931bc845e506e05 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 10 Jul 2012 16:27:03 -0500 Subject: [PATCH 465/651] Add sorting canonifier to rotate-custom unit test. (addresses #846) The output on stderr for this test is the results of many backgrounded "echo" commands, one for each rotation, so the order in which they occur may be subject to OS process scheduling and can't be relied upon --- testing/btest/scripts/base/frameworks/logging/rotate-custom.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro index 3f6d40adaf..8a7f16d182 100644 --- a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro +++ b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro @@ -2,7 +2,7 @@ #@TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT | egrep "test|test2" | sort >out # @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | uniq >>out # @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff .stderr +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff .stderr module Test; From 6e5382da548a4d8ffbd73089a3a502778d477176 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 10 Jul 2012 23:49:31 -0400 Subject: [PATCH 466/651] Re-adding the needed call to FinishedRotation in the ES writer plugin. --- src/logging/writers/ElasticSearch.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 4461508083..1b8dfa495d 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -296,10 +296,10 @@ bool ElasticSearch::DoRotate(string rotated_path, double open, double close, boo //HTTPSend(curl_handle); } - //if ( ! FinishedRotation(current_index, prev_index, open, close, terminating) ) - // { - // Error(Fmt("error rotating %s to %s", prev_index.c_str(), current_index.c_str())); - // } + if ( ! FinishedRotation(current_index, prev_index, open, close, terminating) ) + { + Error(Fmt("error rotating %s to %s", prev_index.c_str(), current_index.c_str())); + } return true; } From b31ef8cde5c6b40a736a88ea1354f3073f99c9b1 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 11 Jul 2012 10:58:57 -0400 Subject: [PATCH 467/651] Fixing memory leak. --- src/ssl-analyzer.pac | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/ssl-analyzer.pac b/src/ssl-analyzer.pac index bd4b76ee98..d1ac470284 100644 --- a/src/ssl-analyzer.pac +++ b/src/ssl-analyzer.pac @@ -305,12 +305,12 @@ refine connection SSL_Conn += { //i2t_ASN1_OBJECT(&pBuffer, length, obj) // printf("extension length: %d\n", length); // -1 indicates an error. - if ( length < 0 ) - continue; - - StringVal* value = new StringVal(length, (char*)pBuffer); - BifEvent::generate_x509_extension(bro_analyzer(), - bro_analyzer()->Conn(), ${rec.is_orig}, value); + if ( length >= 0 ) + { + StringVal* value = new StringVal(length, (char*)pBuffer); + BifEvent::generate_x509_extension(bro_analyzer(), + bro_analyzer()->Conn(), ${rec.is_orig}, value); + } OPENSSL_free(pBuffer); } } From a44612788ee0ccee284a61e80792a645915cab5c Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 11 Jul 2012 16:53:46 -0400 Subject: [PATCH 468/651] Some small fixes to further reduce SOCKS false positive logs. --- scripts/base/protocols/socks/main.bro | 5 ++++- src/SOCKS.cc | 9 ++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro index ca35a3f5e4..052e666371 100644 --- a/scripts/base/protocols/socks/main.bro +++ b/scripts/base/protocols/socks/main.bro @@ -83,5 +83,8 @@ event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Addres event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=-5 { - Log::write(SOCKS::LOG, c$socks); + # This will handle the case where the analyzer failed in some way and was removed. We probably + # don't want to log these connections. + if ( "SOCKS" in c$service ) + Log::write(SOCKS::LOG, c$socks); } diff --git a/src/SOCKS.cc b/src/SOCKS.cc index 0064f6e700..02429aa208 100644 --- a/src/SOCKS.cc +++ b/src/SOCKS.cc @@ -67,7 +67,14 @@ void SOCKS_Analyzer::DeliverStream(int len, const u_char* data, bool orig) } else { - interp->NewData(orig, data, data + len); + try + { + interp->NewData(orig, data, data + len); + } + catch ( const binpac::Exception& e ) + { + ProtocolViolation(fmt("Binpac exception: %s", e.c_msg())); + } } } From 8ff8c66655fdfc2dfec703b332dfad02226be775 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 11 Jul 2012 20:10:49 -0700 Subject: [PATCH 469/651] make pthread_mutex_unlock include the reason for why the unlock fails. --- src/threading/BasicThread.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index e5a4dc5dbe..3dda6b5e8c 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -86,12 +86,12 @@ void BasicThread::Start() int err = pthread_mutex_init(&terminate, 0); if ( err != 0 ) - reporter->FatalError("Cannot create terminate mutex for thread %s:%s", name.c_str(), strerror(err)); + reporter->FatalError("Cannot create terminate mutex for thread %s: %s", name.c_str(), strerror(err)); // We use this like a binary semaphore and acquire it immediately. err = pthread_mutex_lock(&terminate); if ( err != 0 ) - reporter->FatalError("Cannot aquire terminate mutex for thread %s:%s", name.c_str(), strerror(err)); + reporter->FatalError("Cannot aquire terminate mutex for thread %s: %s", name.c_str(), strerror(err)); err = pthread_create(&pthread, 0, BasicThread::launcher, this); if ( err != 0 ) @@ -116,8 +116,9 @@ void BasicThread::Stop() // Signal that it's ok for the thread to exit now by unlocking the // mutex. - if ( pthread_mutex_unlock(&terminate) != 0 ) - reporter->FatalError("Failure flagging terminate condition for thread %s", name.c_str()); + int err = pthread_mutex_unlock(&terminate); + if ( err != 0 ) + reporter->FatalError("Failure flagging terminate condition for thread %s: %s", name.c_str(), strerror(err)); terminating = true; From 5607e86ad3e8349426d0205fc8867050079d24d4 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 12 Jul 2012 12:55:34 -0400 Subject: [PATCH 470/651] Reporter warnings and error now print to stderr by default. - Changed the geoip warnings to Info. --- scripts/base/frameworks/reporter/main.bro | 26 +++++++++++++++++++---- src/bro.bif | 4 ++-- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/scripts/base/frameworks/reporter/main.bro b/scripts/base/frameworks/reporter/main.bro index 3c19005364..8b45819442 100644 --- a/scripts/base/frameworks/reporter/main.bro +++ b/scripts/base/frameworks/reporter/main.bro @@ -11,7 +11,7 @@ module Reporter; export { ## The reporter logging stream identifier. redef enum Log::ID += { LOG }; - + ## An indicator of reporter message severity. type Level: enum { ## Informational, not needing specific attention. @@ -36,24 +36,42 @@ export { ## Not all reporter messages will have locations in them though. location: string &log &optional; }; + + ## Send reporter error messages to STDERR by default. The option to + ## turn it off is presented here in case Bro is being run by some + ## external harness and shouldn't output anything to the console. + const errors_to_stderr = T &redef; + + ## Send reporter warning messages to STDERR by default. The option to + ## turn it off is presented here in case Bro is being run by some + ## external harness and shouldn't output anything to the console. + const warnings_to_stderr = T &redef; } +global stderr: file; + event bro_init() &priority=5 { Log::create_stream(Reporter::LOG, [$columns=Info]); + + if ( errors_to_stderr || warnings_to_stderr ) + stderr = open("/dev/stderr"); } -event reporter_info(t: time, msg: string, location: string) +event reporter_info(t: time, msg: string, location: string) &priority=-5 { Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]); } -event reporter_warning(t: time, msg: string, location: string) +event reporter_warning(t: time, msg: string, location: string) &priority=-5 { Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]); } -event reporter_error(t: time, msg: string, location: string) +event reporter_error(t: time, msg: string, location: string) &priority=-5 { + if ( errors_to_stderr ) + print stderr, fmt("ERROR: %s", msg); + Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]); } diff --git a/src/bro.bif b/src/bro.bif index f18d3ba1b5..1d4aeed4d6 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -3764,7 +3764,7 @@ static GeoIP* open_geoip_db(GeoIPDBTypes type) geoip = GeoIP_open_type(type, GEOIP_MEMORY_CACHE); if ( ! geoip ) - reporter->Warning("Failed to open GeoIP database: %s", + reporter->Info("Failed to open GeoIP database: %s", GeoIPDBFileName[type]); return geoip; } @@ -3804,7 +3804,7 @@ function lookup_location%(a: addr%) : geo_location if ( ! geoip ) builtin_error("Can't initialize GeoIP City/Country database"); else - reporter->Warning("Fell back to GeoIP Country database"); + reporter->Info("Fell back to GeoIP Country database"); } else have_city_db = true; From e1bd9609264a4d067e3c58016806877f0f859c8d Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 13 Jul 2012 02:20:41 -0700 Subject: [PATCH 471/651] Unblock SIGFPE, SIGILL, SIGSEGV and SIGBUS for threads. According to POSIX, behavior is unspecified if a specific thread receives one of those signals (because of e.g. executing an invalid instruction) if the signal is blocked. This resulted in segfaults in threads not propagating to the main thread. Adresses #848 --- src/threading/BasicThread.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 3dda6b5e8c..6ce5ad5f52 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -164,6 +164,13 @@ void* BasicThread::launcher(void *arg) // process. sigset_t mask_set; sigfillset(&mask_set); + // Unblock the signals where according to POSIX the result is undefined if they are blocked + // in a thread and received by that thread. If those are not unblocked, threads will just + // hang when they crash without the user being notified. + sigdelset(&mask_set, SIGFPE); + sigdelset(&mask_set, SIGILL); + sigdelset(&mask_set, SIGSEGV); + sigdelset(&mask_set, SIGBUS); int res = pthread_sigmask(SIG_BLOCK, &mask_set, 0); assert(res == 0); // From f43576cff346bcecde12bd477f444d532e4b0632 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Fri, 13 Jul 2012 14:04:24 -0400 Subject: [PATCH 472/651] Fix some Info:Record field documentation. --- .../base/frameworks/communication/main.bro | 2 +- scripts/base/protocols/conn/main.bro | 25 +++++++++-------- scripts/base/protocols/dns/main.bro | 8 +++--- scripts/base/protocols/ftp/main.bro | 2 ++ scripts/base/protocols/http/main.bro | 2 ++ scripts/base/protocols/irc/main.bro | 2 ++ scripts/base/protocols/smtp/main.bro | 28 +++++++++++++++---- scripts/base/protocols/socks/main.bro | 8 ++++-- scripts/base/protocols/ssh/main.bro | 10 ++++--- scripts/base/protocols/ssl/main.bro | 10 ++++--- scripts/base/protocols/syslog/main.bro | 4 ++- 11 files changed, 67 insertions(+), 34 deletions(-) diff --git a/scripts/base/frameworks/communication/main.bro b/scripts/base/frameworks/communication/main.bro index ceae357f78..81f9a383b9 100644 --- a/scripts/base/frameworks/communication/main.bro +++ b/scripts/base/frameworks/communication/main.bro @@ -42,7 +42,7 @@ export { type Info: record { ## The network time at which a communication event occurred. ts: time &log; - ## The peer name (if any) for which a communication event is concerned. + ## The peer name (if any) with which a communication event is concerned. peer: string &log &optional; ## Where the communication event message originated from, that is, ## either from the scripting layer or inside the Bro process. diff --git a/scripts/base/protocols/conn/main.bro b/scripts/base/protocols/conn/main.bro index 5796c3c6b1..6cc2510027 100644 --- a/scripts/base/protocols/conn/main.bro +++ b/scripts/base/protocols/conn/main.bro @@ -17,7 +17,7 @@ export { type Info: record { ## This is the time of the first packet. ts: time &log; - ## A unique identifier of a connection. + ## A unique identifier of the connection. uid: string &log; ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; @@ -61,7 +61,7 @@ export { ## be left empty at all times. local_orig: bool &log &optional; - ## Indicates the number of bytes missed in content gaps which is + ## Indicates the number of bytes missed in content gaps, which is ## representative of packet loss. A value other than zero will ## normally cause protocol analysis to fail but some analysis may ## have been completed prior to the packet loss. @@ -83,23 +83,24 @@ export { ## i inconsistent packet (e.g. SYN+RST bits both set) ## ====== ==================================================== ## - ## If the letter is in upper case it means the event comes from the - ## originator and lower case then means the responder. - ## Also, there is compression. We only record one "d" in each direction, - ## for instance. I.e., we just record that data went in that direction. - ## This history is not meant to encode how much data that happened to - ## be. + ## If the event comes from the originator, the letter is in upper-case; if it comes + ## from the responder, it's in lower-case. Multiple packets of the same type will + ## only be noted once (e.g. we only record one "d" in each direction, regardless of + ## how many data packets were seen.) history: string &log &optional; - ## Number of packets the originator sent. + ## Number of packets that the originator sent. ## Only set if :bro:id:`use_conn_size_analyzer` = T orig_pkts: count &log &optional; - ## Number IP level bytes the originator sent (as seen on the wire, + ## Number of IP level bytes that the originator sent (as seen on the wire, ## taken from IP total_length header field). ## Only set if :bro:id:`use_conn_size_analyzer` = T orig_ip_bytes: count &log &optional; - ## Number of packets the responder sent. See ``orig_pkts``. + ## Number of packets that the responder sent. + ## Only set if :bro:id:`use_conn_size_analyzer` = T resp_pkts: count &log &optional; - ## Number IP level bytes the responder sent. See ``orig_pkts``. + ## Number og IP level bytes that the responder sent (as seen on the wire, + ## taken from IP total_length header field). + ## Only set if :bro:id:`use_conn_size_analyzer` = T resp_ip_bytes: count &log &optional; ## If this connection was over a tunnel, indicate the ## *uid* values for any encapsulating parent connections diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index c50a8bdc54..600de4beaf 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -45,16 +45,16 @@ export { AA: bool &log &default=F; ## The Truncation bit specifies that the message was truncated. TC: bool &log &default=F; - ## The Recursion Desired bit indicates to a name server to recursively - ## purse the query. + ## The Recursion Desired bit in a request message indicates that + ## the client wants recursive service for this query. RD: bool &log &default=F; - ## The Recursion Available bit in a response message indicates if + ## The Recursion Available bit in a response message indicates that ## the name server supports recursive queries. RA: bool &log &default=F; ## A reserved field that is currently supposed to be zero in all ## queries and responses. Z: count &log &default=0; - ## The set of resource descriptions in answer of the query. + ## The set of resource descriptions in the query answer. answers: vector of string &log &optional; ## The caching intervals of the associated RRs described by the ## ``answers`` field. diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro index 7c5bbaefdc..d20bc92d8a 100644 --- a/scripts/base/protocols/ftp/main.bro +++ b/scripts/base/protocols/ftp/main.bro @@ -28,7 +28,9 @@ export { type Info: record { ## Time when the command was sent. ts: time &log; + ## Unique ID for the connection. uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; ## User name for the current FTP session. user: string &log &default=""; diff --git a/scripts/base/protocols/http/main.bro b/scripts/base/protocols/http/main.bro index 6571548145..f4377e03de 100644 --- a/scripts/base/protocols/http/main.bro +++ b/scripts/base/protocols/http/main.bro @@ -22,7 +22,9 @@ export { type Info: record { ## Timestamp for when the request happened. ts: time &log; + ## Unique ID for the connection. uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; ## Represents the pipelined depth into the connection of this ## request/response transaction. diff --git a/scripts/base/protocols/irc/main.bro b/scripts/base/protocols/irc/main.bro index 2bf2a9bbb9..1cf542b8ea 100644 --- a/scripts/base/protocols/irc/main.bro +++ b/scripts/base/protocols/irc/main.bro @@ -11,7 +11,9 @@ export { type Info: record { ## Timestamp when the command was seen. ts: time &log; + ## Unique ID for the connection. uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; ## Nick name given for the connection. nick: string &log &optional; diff --git a/scripts/base/protocols/smtp/main.bro b/scripts/base/protocols/smtp/main.bro index 513b85e342..03b3d36a24 100644 --- a/scripts/base/protocols/smtp/main.bro +++ b/scripts/base/protocols/smtp/main.bro @@ -8,33 +8,51 @@ export { redef enum Log::ID += { LOG }; type Info: record { + ## Time when the message was first seen. ts: time &log; + ## Unique ID for the connection. uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; - ## This is a number that indicates the number of messages deep into - ## this connection where this particular message was transferred. + ## A count to represent the depth of this message transaction in a single + ## connection where multiple messages were transferred. trans_depth: count &log; + ## Contents of the Helo header. helo: string &log &optional; + ## Contents of the From header. mailfrom: string &log &optional; + ## Contents of the Rcpt header. rcptto: set[string] &log &optional; + ## Contents of the Date header. date: string &log &optional; + ## Contents of the From header. from: string &log &optional; + ## Contents of the To header. to: set[string] &log &optional; + ## Contents of the ReplyTo header. reply_to: string &log &optional; + ## Contents of the MsgID header. msg_id: string &log &optional; + ## Contents of the In-Reply-To header. in_reply_to: string &log &optional; + ## Contents of the Subject header. subject: string &log &optional; + ## Contents of the X-Origininating-IP header. x_originating_ip: addr &log &optional; + ## Contents of the first Received header. first_received: string &log &optional; + ## Contents of the second Received header. second_received: string &log &optional; - ## The last message the server sent to the client. + ## The last message that the server sent to the client. last_reply: string &log &optional; + ## The message transmission path, as extracted from the headers. path: vector of addr &log &optional; + ## Value of the User-Agent header from the client. user_agent: string &log &optional; - ## Indicate if the "Received: from" headers should still be processed. + ## Indicates if the "Received: from" headers should still be processed. process_received_from: bool &default=T; - ## Indicates if client activity has been seen, but not yet logged + ## Indicates if client activity has been seen, but not yet logged. has_client_activity: bool &default=F; }; diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro index 052e666371..79ae4baa19 100644 --- a/scripts/base/protocols/socks/main.bro +++ b/scripts/base/protocols/socks/main.bro @@ -9,19 +9,21 @@ export { type Info: record { ## Time when the proxy connection was first detected. ts: time &log; + ## Unique ID for the tunnel - may correspond to connection uid or be non-existent. uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; ## Protocol version of SOCKS. version: count &log; - ## Username for the proxy if extracted from the network. + ## Username for the proxy if extracted from the network.. user: string &log &optional; ## Server status for the attempt at using the proxy. status: string &log &optional; - ## Client requested SOCKS address. Could be an address, a name or both. + ## Client requested SOCKS address. Could be an address, a name or both. request: SOCKS::Address &log &optional; ## Client requested port. request_p: port &log &optional; - ## Server bound address. Could be an address, a name or both. + ## Server bound address. Could be an address, a name or both. bound: SOCKS::Address &log &optional; ## Server bound port. bound_p: port &log &optional; diff --git a/scripts/base/protocols/ssh/main.bro b/scripts/base/protocols/ssh/main.bro index 0d3439bb1f..cd20f4e913 100644 --- a/scripts/base/protocols/ssh/main.bro +++ b/scripts/base/protocols/ssh/main.bro @@ -26,21 +26,23 @@ export { type Info: record { ## Time when the SSH connection began. ts: time &log; + ## Unique ID for the connection. uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; ## Indicates if the login was heuristically guessed to be "success" ## or "failure". status: string &log &optional; ## Direction of the connection. If the client was a local host - ## logging into an external host, this would be OUTBOUD. INBOUND + ## logging into an external host, this would be OUTBOUND. INBOUND ## would be set for the opposite situation. # TODO: handle local-local and remote-remote better. direction: Direction &log &optional; - ## Software string given by the client. + ## Software string from the client. client: string &log &optional; - ## Software string given by the server. + ## Software string from the server. server: string &log &optional; - ## Amount of data returned from the server. This is currently + ## Amount of data returned from the server. This is currently ## the only measure of the success heuristic and it is logged to ## assist analysts looking at the logs to make their own determination ## about the success on a case-by-case basis. diff --git a/scripts/base/protocols/ssl/main.bro b/scripts/base/protocols/ssl/main.bro index b5f74d5122..f61e0d68ab 100644 --- a/scripts/base/protocols/ssl/main.bro +++ b/scripts/base/protocols/ssl/main.bro @@ -9,13 +9,15 @@ export { redef enum Log::ID += { LOG }; type Info: record { - ## Time when the SSL connection began. + ## Time when the SSL connection was first detected. ts: time &log; - uid: string &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; - ## SSL/TLS version the server offered. + ## SSL/TLS version that the server offered. version: string &log &optional; - ## SSL/TLS cipher suite the server chose. + ## SSL/TLS cipher suite that the server chose. cipher: string &log &optional; ## Value of the Server Name Indicator SSL/TLS extension. It ## indicates the server name that the client was requesting. diff --git a/scripts/base/protocols/syslog/main.bro b/scripts/base/protocols/syslog/main.bro index 79f89d5e71..61334e3f2b 100644 --- a/scripts/base/protocols/syslog/main.bro +++ b/scripts/base/protocols/syslog/main.bro @@ -9,9 +9,11 @@ export { redef enum Log::ID += { LOG }; type Info: record { - ## Timestamp of when the syslog message was seen. + ## Timestamp when the syslog message was seen. ts: time &log; + ## Unique ID for the connection. uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; ## Protocol over which the message was seen. proto: transport_proto &log; From 8279de25c98e54e526a7d7abacd4c69d28b3300a Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 13 Jul 2012 14:25:31 -0500 Subject: [PATCH 473/651] Remove baselines for some leak-detecting unit tests. They were drifting from the non-leak-detecting unit tests and the point of these was just leak detecting anyway, don't need the redundancy. --- .../btest/Baseline/core.leaks.ayiya/conn.log | 15 ---- .../btest/Baseline/core.leaks.ayiya/http.log | 10 --- .../Baseline/core.leaks.ayiya/tunnel.log | 11 --- .../btest/Baseline/core.leaks.teredo/conn.log | 28 ------- .../btest/Baseline/core.leaks.teredo/http.log | 11 --- .../btest/Baseline/core.leaks.teredo/output | 83 ------------------- .../Baseline/core.leaks.teredo/tunnel.log | 13 --- testing/btest/core/leaks/ayiya.test | 3 - testing/btest/core/leaks/teredo.bro | 4 - 9 files changed, 178 deletions(-) delete mode 100644 testing/btest/Baseline/core.leaks.ayiya/conn.log delete mode 100644 testing/btest/Baseline/core.leaks.ayiya/http.log delete mode 100644 testing/btest/Baseline/core.leaks.ayiya/tunnel.log delete mode 100644 testing/btest/Baseline/core.leaks.teredo/conn.log delete mode 100644 testing/btest/Baseline/core.leaks.teredo/http.log delete mode 100644 testing/btest/Baseline/core.leaks.teredo/output delete mode 100644 testing/btest/Baseline/core.leaks.teredo/tunnel.log diff --git a/testing/btest/Baseline/core.leaks.ayiya/conn.log b/testing/btest/Baseline/core.leaks.ayiya/conn.log deleted file mode 100644 index 5c23b4c404..0000000000 --- a/testing/btest/Baseline/core.leaks.ayiya/conn.log +++ /dev/null @@ -1,15 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents -#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] -1257655301.595604 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 tcp http 2.101052 2981 4665 S1 - 0 ShADad 10 3605 11 5329 k6kgXLOoSKl -1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 udp ayiya 20.879001 5129 6109 SF - 0 Dd 21 5717 13 6473 (empty) -1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 udp ayiya - - - SHR - 0 d 0 0 1 176 (empty) -1257655296.585333 FrJExwHcSal :: 135 ff02::1:ff00:2 136 icmp - - - - OTH - 0 - 1 64 0 0 k6kgXLOoSKl -1257655293.629048 arKYeMETxOg 2001:4978:f:4c::1 128 2001:4978:f:4c::2 129 icmp - 23.834987 168 56 OTH - 0 - 3 312 1 104 UWkUyAuUGXf,k6kgXLOoSKl -1257655296.585188 TEfuqmmG4bh fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff00:2 130 icmp - 0.919988 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl -1257655296.585151 j4u32Pc5bif fe80::216:cbff:fe9a:4cb9 131 ff02::2:f901:d225 130 icmp - 0.719947 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl -1257655296.585034 nQcgTWjvg4c fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff9a:4cb9 130 icmp - 4.922880 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl diff --git a/testing/btest/Baseline/core.leaks.ayiya/http.log b/testing/btest/Baseline/core.leaks.ayiya/http.log deleted file mode 100644 index 7cef1a1b8e..0000000000 --- a/testing/btest/Baseline/core.leaks.ayiya/http.log +++ /dev/null @@ -1,10 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path http -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file -#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1257655301.652206 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 10102 200 OK - - - (empty) - - - text/html - - -1257655302.514424 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 2 GET ipv6.google.com /csi?v=3&s=webhp&action=&tran=undefined&e=17259,19771,21517,21766,21887,22212&ei=BUz2Su7PMJTglQfz3NzCAw&rt=prt.77,xjs.565,ol.645 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - -1257655303.603569 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 3 GET ipv6.google.com /gen_204?atyp=i&ct=fade&cad=1254&ei=BUz2Su7PMJTglQfz3NzCAw&zx=1257655303600 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - diff --git a/testing/btest/Baseline/core.leaks.ayiya/tunnel.log b/testing/btest/Baseline/core.leaks.ayiya/tunnel.log deleted file mode 100644 index 512f49b6ee..0000000000 --- a/testing/btest/Baseline/core.leaks.ayiya/tunnel.log +++ /dev/null @@ -1,11 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type -#types time string addr port addr port enum enum -1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA -1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::DISCOVER Tunnel::AYIYA -1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA -1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::CLOSE Tunnel::AYIYA diff --git a/testing/btest/Baseline/core.leaks.teredo/conn.log b/testing/btest/Baseline/core.leaks.teredo/conn.log deleted file mode 100644 index 151230886b..0000000000 --- a/testing/btest/Baseline/core.leaks.teredo/conn.log +++ /dev/null @@ -1,28 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path conn -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes parents -#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] -1210953047.736921 arKYeMETxOg 192.168.2.16 1576 75.126.130.163 80 tcp - 0.000357 0 0 SHR - 0 fA 1 40 1 40 (empty) -1210953050.867067 k6kgXLOoSKl 192.168.2.16 1577 75.126.203.78 80 tcp - 0.000387 0 0 SHR - 0 fA 1 40 1 40 (empty) -1210953057.833364 5OKnoww6xl4 192.168.2.16 1577 75.126.203.78 80 tcp - 0.079208 0 0 SH - 0 Fa 1 40 1 40 (empty) -1210953058.007081 VW0XPVINV8a 192.168.2.16 1576 75.126.130.163 80 tcp - - - - RSTOS0 - 0 R 1 40 0 0 (empty) -1210953057.834454 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 tcp http 0.407908 790 171 RSTO - 0 ShADadR 6 1038 4 335 (empty) -1210953058.350065 fRFu0wcOle6 192.168.2.16 1920 192.168.2.1 53 udp dns 0.223055 66 438 SF - 0 Dd 2 122 2 494 (empty) -1210953058.577231 qSsw6ESzHV4 192.168.2.16 137 192.168.2.255 137 udp dns 1.499261 150 0 S0 - 0 D 3 234 0 0 (empty) -1210953074.264819 Tw8jXtpTGu6 192.168.2.16 1920 192.168.2.1 53 udp dns 0.297723 123 598 SF - 0 Dd 3 207 3 682 (empty) -1210953061.312379 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 tcp http 12.810848 1675 10467 S1 - 0 ShADad 10 2279 12 11191 GSxOnSLghOa -1210953076.058333 EAr0uf4mhq 192.168.2.16 1578 75.126.203.78 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) -1210953074.055744 h5DsfNtYzi1 192.168.2.16 1577 75.126.203.78 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) -1210953074.057124 P654jzLoe3a 192.168.2.16 1576 75.126.130.163 80 tcp - - - - RSTRH - 0 r 0 0 1 40 (empty) -1210953074.570439 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 tcp http 0.466677 469 3916 SF - 0 ShADadFf 7 757 6 4164 (empty) -1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 udp teredo 8.928880 129 48 SF - 0 Dd 2 185 1 76 (empty) -1210953060.829233 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 udp teredo 13.293994 2359 11243 SF - 0 Dd 12 2695 13 11607 (empty) -1210953058.933954 iE6yhOq3SF 0.0.0.0 68 255.255.255.255 67 udp - - - - S0 - 0 D 1 328 0 0 (empty) -1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 udp teredo - - - SHR - 0 d 0 0 1 137 (empty) -1210953046.591933 UWkUyAuUGXf 192.168.2.16 138 192.168.2.255 138 udp - 28.448321 416 0 S0 - 0 D 2 472 0 0 (empty) -1210953052.324629 FrJExwHcSal fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 TEfuqmmG4bh -1210953060.829303 qCaWGmzFtM5 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.463615 4 4 OTH - 0 - 1 52 1 52 GSxOnSLghOa,nQcgTWjvg4c -1210953052.202579 j4u32Pc5bif fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 nQcgTWjvg4c diff --git a/testing/btest/Baseline/core.leaks.teredo/http.log b/testing/btest/Baseline/core.leaks.teredo/http.log deleted file mode 100644 index b3cf832083..0000000000 --- a/testing/btest/Baseline/core.leaks.teredo/http.log +++ /dev/null @@ -1,11 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path http -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file -#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1210953057.917183 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 1 POST download913.avast.com /cgi-bin/iavs4stats.cgi - Syncer/4.80 (av_pro-1169;f) 589 0 204 - - - (empty) - - - text/plain - - -1210953061.585996 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - -1210953073.381474 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - -1210953074.674817 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 1 GET www.wireshark.org / http://ipv6.google.com/search?hl=en&q=Wireshark+%21&btnG=Google+Search Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 11845 200 OK - - - (empty) - - - text/xml - - diff --git a/testing/btest/Baseline/core.leaks.teredo/output b/testing/btest/Baseline/core.leaks.teredo/output deleted file mode 100644 index 02d5a41e74..0000000000 --- a/testing/btest/Baseline/core.leaks.teredo/output +++ /dev/null @@ -1,83 +0,0 @@ -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] - ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] - auth: [id=, value=, nonce=14796129349558001544, confirm=0] -auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] - ip6: [class=0, flow=0, len=24, nxt=58, hlim=255, src=fe80::8000:ffff:ffff:fffd, dst=ff02::2, exts=[]] - auth: [id=, value=, nonce=14796129349558001544, confirm=0] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] - ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] - auth: [id=, value=, nonce=14796129349558001544, confirm=0] - origin: [p=3797/udp, a=70.55.215.234] -auth: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] - ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] - auth: [id=, value=, nonce=14796129349558001544, confirm=0] - origin: [p=3797/udp, a=70.55.215.234] -origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.81, resp_p=3544/udp] - ip6: [class=0, flow=0, len=48, nxt=58, hlim=255, src=fe80::8000:f227:bec8:61af, dst=fe80::8000:ffff:ffff:fffd, exts=[]] - auth: [id=, value=, nonce=14796129349558001544, confirm=0] - origin: [p=3797/udp, a=70.55.215.234] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=0, nxt=59, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] - ip6: [class=0, flow=0, len=12, nxt=58, hlim=21, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] - ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] - origin: [p=32900/udp, a=83.170.1.38] -origin: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] - ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] - origin: [p=32900/udp, a=83.170.1.38] -bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=65.55.158.80, resp_p=3544/udp] - ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=fe80::708d:fe83:4114:a512, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] - origin: [p=32900/udp, a=83.170.1.38] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] -bubble: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=0, nxt=59, hlim=0, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=fe80::708d:fe83:4114:a512, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=12, nxt=58, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=24, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=24, nxt=6, hlim=245, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=817, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=514, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=898, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=812, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=1232, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=717, nxt=6, hlim=58, src=2001:4860:0:2001::68, dst=2001:0:4137:9e50:8000:f12a:b9c8:2815, exts=[]] -packet: [orig_h=192.168.2.16, orig_p=3797/udp, resp_h=83.170.1.38, resp_p=32900/udp] - ip6: [class=0, flow=0, len=20, nxt=6, hlim=128, src=2001:0:4137:9e50:8000:f12a:b9c8:2815, dst=2001:4860:0:2001::68, exts=[]] diff --git a/testing/btest/Baseline/core.leaks.teredo/tunnel.log b/testing/btest/Baseline/core.leaks.teredo/tunnel.log deleted file mode 100644 index 5a2114dd1c..0000000000 --- a/testing/btest/Baseline/core.leaks.teredo/tunnel.log +++ /dev/null @@ -1,13 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path tunnel -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p action tunnel_type -#types time string addr port addr port enum enum -1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::DISCOVER Tunnel::TEREDO -1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::DISCOVER Tunnel::TEREDO -1210953061.292918 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::DISCOVER Tunnel::TEREDO -1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::CLOSE Tunnel::TEREDO -1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::CLOSE Tunnel::TEREDO -1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::CLOSE Tunnel::TEREDO diff --git a/testing/btest/core/leaks/ayiya.test b/testing/btest/core/leaks/ayiya.test index adad42a822..2093924c7a 100644 --- a/testing/btest/core/leaks/ayiya.test +++ b/testing/btest/core/leaks/ayiya.test @@ -5,6 +5,3 @@ # @TEST-GROUP: leaks # # @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/tunnels/ayiya3.trace -# @TEST-EXEC: btest-diff tunnel.log -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/core/leaks/teredo.bro b/testing/btest/core/leaks/teredo.bro index 9902f1258b..be298f4d68 100644 --- a/testing/btest/core/leaks/teredo.bro +++ b/testing/btest/core/leaks/teredo.bro @@ -5,10 +5,6 @@ # @TEST-GROUP: leaks # # @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/tunnels/Teredo.pcap %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff tunnel.log -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff http.log function print_teredo(name: string, outer: connection, inner: teredo_hdr) { From 353393f9bd9c43bab74b1ba4244d82e414b0698c Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 13 Jul 2012 14:32:50 -0500 Subject: [PATCH 474/651] Fix segfault when incrementing whole vector values. Also removed RefExpr::Eval(Val*) method since it was never called (Clang emitted warning about this hiding overloaded virtual function UnaryExpr::Eval(Frame*)) and doesn't appear to be necessary even if it was called to avoid the default vector handling of UnaryExpr::Eval (as the comment suggests as the intention). --- src/Expr.cc | 7 ---- src/Expr.h | 4 --- .../btest/Baseline/language.incr-vec-expr/out | 5 +++ testing/btest/core/leaks/incr-vec-expr.test | 35 +++++++++++++++++++ testing/btest/language/incr-vec-expr.test | 27 ++++++++++++++ 5 files changed, 67 insertions(+), 11 deletions(-) create mode 100644 testing/btest/Baseline/language.incr-vec-expr/out create mode 100644 testing/btest/core/leaks/incr-vec-expr.test create mode 100644 testing/btest/language/incr-vec-expr.test diff --git a/src/Expr.cc b/src/Expr.cc index 58f5db3fd1..b62f119bae 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -1035,12 +1035,10 @@ Val* IncrExpr::Eval(Frame* f) const { Val* new_elt = DoSingleEval(f, elt); v_vec->Assign(i, new_elt, this, OP_INCR); - Unref(new_elt); // was Ref()'d by Assign() } else v_vec->Assign(i, 0, this, OP_INCR); } - // FIXME: Is the next line needed? op->Assign(f, v_vec, OP_INCR); } @@ -2402,11 +2400,6 @@ Expr* RefExpr::MakeLvalue() return this; } -Val* RefExpr::Eval(Val* v) const - { - return Fold(v); - } - void RefExpr::Assign(Frame* f, Val* v, Opcode opcode) { op->Assign(f, v, opcode); diff --git a/src/Expr.h b/src/Expr.h index f0798359c2..c16cf86612 100644 --- a/src/Expr.h +++ b/src/Expr.h @@ -608,10 +608,6 @@ public: void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN); Expr* MakeLvalue(); - // Only overridden to avoid special vector handling which doesn't apply - // for this class. - Val* Eval(Val* v) const; - protected: friend class Expr; RefExpr() { } diff --git a/testing/btest/Baseline/language.incr-vec-expr/out b/testing/btest/Baseline/language.incr-vec-expr/out new file mode 100644 index 0000000000..b6c108a2d8 --- /dev/null +++ b/testing/btest/Baseline/language.incr-vec-expr/out @@ -0,0 +1,5 @@ +[0, 0, 0] +[a=0, b=test, c=[1, 2, 3]] +[1, 1, 1] +[a=1, b=test, c=[1, 2, 3]] +[a=1, b=test, c=[2, 3, 4]] diff --git a/testing/btest/core/leaks/incr-vec-expr.test b/testing/btest/core/leaks/incr-vec-expr.test new file mode 100644 index 0000000000..d2b94a5e63 --- /dev/null +++ b/testing/btest/core/leaks/incr-vec-expr.test @@ -0,0 +1,35 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -b -m -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT + +type rec: record { + a: count; + b: string; + c: vector of count; +}; + +global vec: vector of count = vector(0,0,0); + +global v: rec = [$a=0, $b="test", $c=vector(1,2,3)]; + +event new_connection(c: connection) + { + print vec; + print v; + + ++vec; + + print vec; + + ++v$a; + + print v; + + ++v$c; + + print v; + } diff --git a/testing/btest/language/incr-vec-expr.test b/testing/btest/language/incr-vec-expr.test new file mode 100644 index 0000000000..c9945061a2 --- /dev/null +++ b/testing/btest/language/incr-vec-expr.test @@ -0,0 +1,27 @@ +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type rec: record { + a: count; + b: string; + c: vector of count; +}; + +global vec: vector of count = vector(0,0,0); + +global v: rec = [$a=0, $b="test", $c=vector(1,2,3)]; + +print vec; +print v; + +++vec; + +print vec; + +++v$a; + +print v; + +++v$c; + +print v; From 0ef91538dbd5890dc7eaf265e74c0c3a85880000 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 13 Jul 2012 16:25:58 -0500 Subject: [PATCH 475/651] Fix overrides of TCP_ApplicationAnalyzer::EndpointEOF. In many cases, classes derived from TCP_ApplicationAnalyzer were *overloading* instead of overriding EndpointEOF causing the parent class version to become hidden in the child and also for the child's version to never to called polymorphically from TCP_Analyzer::EndpointEOF. Clang gave a warning in each case. --- src/BitTorrent.cc | 6 +++--- src/BitTorrent.h | 2 +- src/BitTorrentTracker.cc | 4 ++-- src/BitTorrentTracker.h | 2 +- src/DNS-binpac.cc | 6 +++--- src/DNS-binpac.h | 2 +- src/HTTP-binpac.cc | 6 +++--- src/HTTP-binpac.h | 2 +- src/SOCKS.cc | 6 +++--- src/SOCKS.h | 2 +- src/SSL.cc | 6 +++--- src/SSL.h | 2 +- 12 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/BitTorrent.cc b/src/BitTorrent.cc index 824e4ec98d..fa8fb09e43 100644 --- a/src/BitTorrent.cc +++ b/src/BitTorrent.cc @@ -106,10 +106,10 @@ void BitTorrent_Analyzer::Undelivered(int seq, int len, bool orig) // } } -void BitTorrent_Analyzer::EndpointEOF(TCP_Reassembler* endp) +void BitTorrent_Analyzer::EndpointEOF(bool is_orig) { - TCP_ApplicationAnalyzer::EndpointEOF(endp); - interp->FlowEOF(endp->IsOrig()); + TCP_ApplicationAnalyzer::EndpointEOF(is_orig); + interp->FlowEOF(is_orig); } void BitTorrent_Analyzer::DeliverWeird(const char* msg, bool orig) diff --git a/src/BitTorrent.h b/src/BitTorrent.h index 191b4c50d7..f083cf4fc7 100644 --- a/src/BitTorrent.h +++ b/src/BitTorrent.h @@ -15,7 +15,7 @@ public: virtual void Done(); virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void Undelivered(int seq, int len, bool orig); - virtual void EndpointEOF(TCP_Reassembler* endp); + virtual void EndpointEOF(bool is_orig); static Analyzer* InstantiateAnalyzer(Connection* conn) { return new BitTorrent_Analyzer(conn); } diff --git a/src/BitTorrentTracker.cc b/src/BitTorrentTracker.cc index 995a01dd63..12c5a199de 100644 --- a/src/BitTorrentTracker.cc +++ b/src/BitTorrentTracker.cc @@ -215,9 +215,9 @@ void BitTorrentTracker_Analyzer::Undelivered(int seq, int len, bool orig) stop_resp = true; } -void BitTorrentTracker_Analyzer::EndpointEOF(TCP_Reassembler* endp) +void BitTorrentTracker_Analyzer::EndpointEOF(bool is_orig) { - TCP_ApplicationAnalyzer::EndpointEOF(endp); + TCP_ApplicationAnalyzer::EndpointEOF(is_orig); } void BitTorrentTracker_Analyzer::InitBencParser(void) diff --git a/src/BitTorrentTracker.h b/src/BitTorrentTracker.h index d57665d104..3b9efe0430 100644 --- a/src/BitTorrentTracker.h +++ b/src/BitTorrentTracker.h @@ -48,7 +48,7 @@ public: virtual void Done(); virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void Undelivered(int seq, int len, bool orig); - virtual void EndpointEOF(TCP_Reassembler* endp); + virtual void EndpointEOF(bool is_orig); static Analyzer* InstantiateAnalyzer(Connection* conn) { return new BitTorrentTracker_Analyzer(conn); } diff --git a/src/DNS-binpac.cc b/src/DNS-binpac.cc index eb95ac2e1c..999f6015c0 100644 --- a/src/DNS-binpac.cc +++ b/src/DNS-binpac.cc @@ -63,10 +63,10 @@ void DNS_TCP_Analyzer_binpac::Done() interp->FlowEOF(false); } -void DNS_TCP_Analyzer_binpac::EndpointEOF(TCP_Reassembler* endp) +void DNS_TCP_Analyzer_binpac::EndpointEOF(bool is_orig) { - TCP_ApplicationAnalyzer::EndpointEOF(endp); - interp->FlowEOF(endp->IsOrig()); + TCP_ApplicationAnalyzer::EndpointEOF(is_orig); + interp->FlowEOF(is_orig); } void DNS_TCP_Analyzer_binpac::DeliverStream(int len, const u_char* data, diff --git a/src/DNS-binpac.h b/src/DNS-binpac.h index 9e8cb16f69..0bbacf9192 100644 --- a/src/DNS-binpac.h +++ b/src/DNS-binpac.h @@ -45,7 +45,7 @@ public: virtual void Done(); virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void Undelivered(int seq, int len, bool orig); - virtual void EndpointEOF(TCP_Reassembler* endp); + virtual void EndpointEOF(bool is_orig); static Analyzer* InstantiateAnalyzer(Connection* conn) { return new DNS_TCP_Analyzer_binpac(conn); } diff --git a/src/HTTP-binpac.cc b/src/HTTP-binpac.cc index 70cf37457b..47b2c479ec 100644 --- a/src/HTTP-binpac.cc +++ b/src/HTTP-binpac.cc @@ -20,10 +20,10 @@ void HTTP_Analyzer_binpac::Done() interp->FlowEOF(false); } -void HTTP_Analyzer_binpac::EndpointEOF(TCP_Reassembler* endp) +void HTTP_Analyzer_binpac::EndpointEOF(bool is_orig) { - TCP_ApplicationAnalyzer::EndpointEOF(endp); - interp->FlowEOF(endp->IsOrig()); + TCP_ApplicationAnalyzer::EndpointEOF(is_orig); + interp->FlowEOF(is_orig); } void HTTP_Analyzer_binpac::DeliverStream(int len, const u_char* data, bool orig) diff --git a/src/HTTP-binpac.h b/src/HTTP-binpac.h index 62b6fd0db3..ef7cc7dd7d 100644 --- a/src/HTTP-binpac.h +++ b/src/HTTP-binpac.h @@ -13,7 +13,7 @@ public: virtual void Done(); virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void Undelivered(int seq, int len, bool orig); - virtual void EndpointEOF(TCP_Reassembler* endp); + virtual void EndpointEOF(bool is_orig); static Analyzer* InstantiateAnalyzer(Connection* conn) { return new HTTP_Analyzer_binpac(conn); } diff --git a/src/SOCKS.cc b/src/SOCKS.cc index 02429aa208..4a6eda7043 100644 --- a/src/SOCKS.cc +++ b/src/SOCKS.cc @@ -31,10 +31,10 @@ void SOCKS_Analyzer::Done() interp->FlowEOF(false); } -void SOCKS_Analyzer::EndpointEOF(TCP_Reassembler* endp) +void SOCKS_Analyzer::EndpointEOF(bool is_orig) { - TCP_ApplicationAnalyzer::EndpointEOF(endp); - interp->FlowEOF(endp->IsOrig()); + TCP_ApplicationAnalyzer::EndpointEOF(is_orig); + interp->FlowEOF(is_orig); } void SOCKS_Analyzer::DeliverStream(int len, const u_char* data, bool orig) diff --git a/src/SOCKS.h b/src/SOCKS.h index c9a7338496..9753abb660 100644 --- a/src/SOCKS.h +++ b/src/SOCKS.h @@ -23,7 +23,7 @@ public: virtual void Done(); virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void Undelivered(int seq, int len, bool orig); - virtual void EndpointEOF(TCP_Reassembler* endp); + virtual void EndpointEOF(bool is_orig); static Analyzer* InstantiateAnalyzer(Connection* conn) { return new SOCKS_Analyzer(conn); } diff --git a/src/SSL.cc b/src/SSL.cc index 218b17080b..4658bbbc16 100644 --- a/src/SSL.cc +++ b/src/SSL.cc @@ -23,10 +23,10 @@ void SSL_Analyzer::Done() interp->FlowEOF(false); } -void SSL_Analyzer::EndpointEOF(TCP_Reassembler* endp) +void SSL_Analyzer::EndpointEOF(bool is_orig) { - TCP_ApplicationAnalyzer::EndpointEOF(endp); - interp->FlowEOF(endp->IsOrig()); + TCP_ApplicationAnalyzer::EndpointEOF(is_orig); + interp->FlowEOF(is_orig); } void SSL_Analyzer::DeliverStream(int len, const u_char* data, bool orig) diff --git a/src/SSL.h b/src/SSL.h index c9f8d9be91..d0ef164877 100644 --- a/src/SSL.h +++ b/src/SSL.h @@ -15,7 +15,7 @@ public: virtual void Undelivered(int seq, int len, bool orig); // Overriden from TCP_ApplicationAnalyzer. - virtual void EndpointEOF(TCP_Reassembler* endp); + virtual void EndpointEOF(bool is_orig); static Analyzer* InstantiateAnalyzer(Connection* conn) { return new SSL_Analyzer(conn); } From ce05600a717e31f36170d6c47dabd91bd914cd2d Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 13 Jul 2012 22:24:34 -0400 Subject: [PATCH 476/651] Mozilla's current certificate bundle. --- scripts/base/protocols/ssl/mozilla-ca-list.bro | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/scripts/base/protocols/ssl/mozilla-ca-list.bro b/scripts/base/protocols/ssl/mozilla-ca-list.bro index 4c4dccb755..ad8e445912 100644 --- a/scripts/base/protocols/ssl/mozilla-ca-list.bro +++ b/scripts/base/protocols/ssl/mozilla-ca-list.bro @@ -1,5 +1,5 @@ # Don't edit! This file is automatically generated. -# Generated at: 2011-10-25 11:03:20 -0500 +# Generated at: Fri Jul 13 22:22:40 -0400 2012 @load base/protocols/ssl module SSL; redef root_certs += { @@ -11,7 +11,6 @@ redef root_certs += { ["OU=DSTCA E2,O=Digital Signature Trust Co.,C=US"] = "\x30\x82\x03\x29\x30\x82\x02\x92\xA0\x03\x02\x01\x02\x02\x04\x36\x6E\xD3\xCE\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x46\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x24\x30\x22\x06\x03\x55\x04\x0A\x13\x1B\x44\x69\x67\x69\x74\x61\x6C\x20\x53\x69\x67\x6E\x61\x74\x75\x72\x65\x20\x54\x72\x75\x73\x74\x20\x43\x6F\x2E\x31\x11\x30\x0F\x06\x03\x55\x04\x0B\x13\x08\x44\x53\x54\x43\x41\x20\x45\x32\x30\x1E\x17\x0D\x39\x38\x31\x32\x30\x39\x31\x39\x31\x37\x32\x36\x5A\x17\x0D\x31\x38\x31\x32\x30\x39\x31\x39\x34\x37\x32\x36\x5A\x30\x46\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x24\x30\x22\x06\x03\x55\x04\x0A\x13\x1B\x44\x69\x67\x69\x74\x61\x6C\x20\x53\x69\x67\x6E\x61\x74\x75\x72\x65\x20\x54\x72\x75\x73\x74\x20\x43\x6F\x2E\x31\x11\x30\x0F\x06\x03\x55\x04\x0B\x13\x08\x44\x53\x54\x43\x41\x20\x45\x32\x30\x81\x9D\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8B\x00\x30\x81\x87\x02\x81\x81\x00\xBF\x93\x8F\x17\x92\xEF\x33\x13\x18\xEB\x10\x7F\x4E\x16\xBF\xFF\x06\x8F\x2A\x85\xBC\x5E\xF9\x24\xA6\x24\x88\xB6\x03\xB7\xC1\xC3\x5F\x03\x5B\xD1\x6F\xAE\x7E\x42\xEA\x66\x23\xB8\x63\x83\x56\xFB\x28\x2D\xE1\x38\x8B\xB4\xEE\xA8\x01\xE1\xCE\x1C\xB6\x88\x2A\x22\x46\x85\xFB\x9F\xA7\x70\xA9\x47\x14\x3F\xCE\xDE\x65\xF0\xA8\x71\xF7\x4F\x26\x6C\x8C\xBC\xC6\xB5\xEF\xDE\x49\x27\xFF\x48\x2A\x7D\xE8\x4D\x03\xCC\xC7\xB2\x52\xC6\x17\x31\x13\x3B\xB5\x4D\xDB\xC8\xC4\xF6\xC3\x0F\x24\x2A\xDA\x0C\x9D\xE7\x91\x5B\x80\xCD\x94\x9D\x02\x01\x03\xA3\x82\x01\x24\x30\x82\x01\x20\x30\x11\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x01\x04\x04\x03\x02\x00\x07\x30\x68\x06\x03\x55\x1D\x1F\x04\x61\x30\x5F\x30\x5D\xA0\x5B\xA0\x59\xA4\x57\x30\x55\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x24\x30\x22\x06\x03\x55\x04\x0A\x13\x1B\x44\x69\x67\x69\x74\x61\x6C\x20\x53\x69\x67\x6E\x61\x74\x75\x72\x65\x20\x54\x72\x75\x73\x74\x20\x43\x6F\x2E\x31\x11\x30\x0F\x06\x03\x55\x04\x0B\x13\x08\x44\x53\x54\x43\x41\x20\x45\x32\x31\x0D\x30\x0B\x06\x03\x55\x04\x03\x13\x04\x43\x52\x4C\x31\x30\x2B\x06\x03\x55\x1D\x10\x04\x24\x30\x22\x80\x0F\x31\x39\x39\x38\x31\x32\x30\x39\x31\x39\x31\x37\x32\x36\x5A\x81\x0F\x32\x30\x31\x38\x31\x32\x30\x39\x31\x39\x31\x37\x32\x36\x5A\x30\x0B\x06\x03\x55\x1D\x0F\x04\x04\x03\x02\x01\x06\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x1E\x82\x4D\x28\x65\x80\x3C\xC9\x41\x6E\xAC\x35\x2E\x5A\xCB\xDE\xEE\xF8\x39\x5B\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x1E\x82\x4D\x28\x65\x80\x3C\xC9\x41\x6E\xAC\x35\x2E\x5A\xCB\xDE\xEE\xF8\x39\x5B\x30\x0C\x06\x03\x55\x1D\x13\x04\x05\x30\x03\x01\x01\xFF\x30\x19\x06\x09\x2A\x86\x48\x86\xF6\x7D\x07\x41\x00\x04\x0C\x30\x0A\x1B\x04\x56\x34\x2E\x30\x03\x02\x04\x90\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x81\x81\x00\x47\x8D\x83\xAD\x62\xF2\xDB\xB0\x9E\x45\x22\x05\xB9\xA2\xD6\x03\x0E\x38\x72\xE7\x9E\xFC\x7B\xE6\x93\xB6\x9A\xA5\xA2\x94\xC8\x34\x1D\x91\xD1\xC5\xD7\xF4\x0A\x25\x0F\x3D\x78\x81\x9E\x0F\xB1\x67\xC4\x90\x4C\x63\xDD\x5E\xA7\xE2\xBA\x9F\xF5\xF7\x4D\xA5\x31\x7B\x9C\x29\x2D\x4C\xFE\x64\x3E\xEC\xB6\x53\xFE\xEA\x9B\xED\x82\xDB\x74\x75\x4B\x07\x79\x6E\x1E\xD8\x19\x83\x73\xDE\xF5\x3E\xD0\xB5\xDE\xE7\x4B\x68\x7D\x43\x2E\x2A\x20\xE1\x7E\xA0\x78\x44\x9E\x08\xF5\x98\xF9\xC7\x7F\x1B\x1B\xD6\x06\x20\x02\x58\xA1\xC3\xA2\x03", ["OU=Class 3 Public Primary Certification Authority,O=VeriSign\, Inc.,C=US"] = "\x30\x82\x02\x3C\x30\x82\x01\xA5\x02\x10\x70\xBA\xE4\x1D\x10\xD9\x29\x34\xB6\x38\xCA\x7B\x03\xCC\xBA\xBF\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x02\x05\x00\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x37\x30\x35\x06\x03\x55\x04\x0B\x13\x2E\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x1E\x17\x0D\x39\x36\x30\x31\x32\x39\x30\x30\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x38\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x37\x30\x35\x06\x03\x55\x04\x0B\x13\x2E\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xC9\x5C\x59\x9E\xF2\x1B\x8A\x01\x14\xB4\x10\xDF\x04\x40\xDB\xE3\x57\xAF\x6A\x45\x40\x8F\x84\x0C\x0B\xD1\x33\xD9\xD9\x11\xCF\xEE\x02\x58\x1F\x25\xF7\x2A\xA8\x44\x05\xAA\xEC\x03\x1F\x78\x7F\x9E\x93\xB9\x9A\x00\xAA\x23\x7D\xD6\xAC\x85\xA2\x63\x45\xC7\x72\x27\xCC\xF4\x4C\xC6\x75\x71\xD2\x39\xEF\x4F\x42\xF0\x75\xDF\x0A\x90\xC6\x8E\x20\x6F\x98\x0F\xF8\xAC\x23\x5F\x70\x29\x36\xA4\xC9\x86\xE7\xB1\x9A\x20\xCB\x53\xA5\x85\xE7\x3D\xBE\x7D\x9A\xFE\x24\x45\x33\xDC\x76\x15\xED\x0F\xA2\x71\x64\x4C\x65\x2E\x81\x68\x45\xA7\x02\x03\x01\x00\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x02\x05\x00\x03\x81\x81\x00\xBB\x4C\x12\x2B\xCF\x2C\x26\x00\x4F\x14\x13\xDD\xA6\xFB\xFC\x0A\x11\x84\x8C\xF3\x28\x1C\x67\x92\x2F\x7C\xB6\xC5\xFA\xDF\xF0\xE8\x95\xBC\x1D\x8F\x6C\x2C\xA8\x51\xCC\x73\xD8\xA4\xC0\x53\xF0\x4E\xD6\x26\xC0\x76\x01\x57\x81\x92\x5E\x21\xF1\xD1\xB1\xFF\xE7\xD0\x21\x58\xCD\x69\x17\xE3\x44\x1C\x9C\x19\x44\x39\x89\x5C\xDC\x9C\x00\x0F\x56\x8D\x02\x99\xED\xA2\x90\x45\x4C\xE4\xBB\x10\xA4\x3D\xF0\x32\x03\x0E\xF1\xCE\xF8\xE8\xC9\x51\x8C\xE6\x62\x9F\xE6\x9F\xC0\x7D\xB7\x72\x9C\xC9\x36\x3A\x6B\x9F\x4E\xA8\xFF\x64\x0D\x64", ["OU=VeriSign Trust Network,OU=(c) 1998 VeriSign\, Inc. - For authorized use only,OU=Class 3 Public Primary Certification Authority - G2,O=VeriSign\, Inc.,C=US"] = "\x30\x82\x03\x02\x30\x82\x02\x6B\x02\x10\x7D\xD9\xFE\x07\xCF\xA8\x1E\xB7\x10\x79\x67\xFB\xA7\x89\x34\xC6\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\xC1\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x3C\x30\x3A\x06\x03\x55\x04\x0B\x13\x33\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x2D\x20\x47\x32\x31\x3A\x30\x38\x06\x03\x55\x04\x0B\x13\x31\x28\x63\x29\x20\x31\x39\x39\x38\x20\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x20\x2D\x20\x46\x6F\x72\x20\x61\x75\x74\x68\x6F\x72\x69\x7A\x65\x64\x20\x75\x73\x65\x20\x6F\x6E\x6C\x79\x31\x1F\x30\x1D\x06\x03\x55\x04\x0B\x13\x16\x56\x65\x72\x69\x53\x69\x67\x6E\x20\x54\x72\x75\x73\x74\x20\x4E\x65\x74\x77\x6F\x72\x6B\x30\x1E\x17\x0D\x39\x38\x30\x35\x31\x38\x30\x30\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x38\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x81\xC1\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x3C\x30\x3A\x06\x03\x55\x04\x0B\x13\x33\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x2D\x20\x47\x32\x31\x3A\x30\x38\x06\x03\x55\x04\x0B\x13\x31\x28\x63\x29\x20\x31\x39\x39\x38\x20\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x20\x2D\x20\x46\x6F\x72\x20\x61\x75\x74\x68\x6F\x72\x69\x7A\x65\x64\x20\x75\x73\x65\x20\x6F\x6E\x6C\x79\x31\x1F\x30\x1D\x06\x03\x55\x04\x0B\x13\x16\x56\x65\x72\x69\x53\x69\x67\x6E\x20\x54\x72\x75\x73\x74\x20\x4E\x65\x74\x77\x6F\x72\x6B\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xCC\x5E\xD1\x11\x5D\x5C\x69\xD0\xAB\xD3\xB9\x6A\x4C\x99\x1F\x59\x98\x30\x8E\x16\x85\x20\x46\x6D\x47\x3F\xD4\x85\x20\x84\xE1\x6D\xB3\xF8\xA4\xED\x0C\xF1\x17\x0F\x3B\xF9\xA7\xF9\x25\xD7\xC1\xCF\x84\x63\xF2\x7C\x63\xCF\xA2\x47\xF2\xC6\x5B\x33\x8E\x64\x40\x04\x68\xC1\x80\xB9\x64\x1C\x45\x77\xC7\xD8\x6E\xF5\x95\x29\x3C\x50\xE8\x34\xD7\x78\x1F\xA8\xBA\x6D\x43\x91\x95\x8F\x45\x57\x5E\x7E\xC5\xFB\xCA\xA4\x04\xEB\xEA\x97\x37\x54\x30\x6F\xBB\x01\x47\x32\x33\xCD\xDC\x57\x9B\x64\x69\x61\xF8\x9B\x1D\x1C\x89\x4F\x5C\x67\x02\x03\x01\x00\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x81\x81\x00\x51\x4D\xCD\xBE\x5C\xCB\x98\x19\x9C\x15\xB2\x01\x39\x78\x2E\x4D\x0F\x67\x70\x70\x99\xC6\x10\x5A\x94\xA4\x53\x4D\x54\x6D\x2B\xAF\x0D\x5D\x40\x8B\x64\xD3\xD7\xEE\xDE\x56\x61\x92\x5F\xA6\xC4\x1D\x10\x61\x36\xD3\x2C\x27\x3C\xE8\x29\x09\xB9\x11\x64\x74\xCC\xB5\x73\x9F\x1C\x48\xA9\xBC\x61\x01\xEE\xE2\x17\xA6\x0C\xE3\x40\x08\x3B\x0E\xE7\xEB\x44\x73\x2A\x9A\xF1\x69\x92\xEF\x71\x14\xC3\x39\xAC\x71\xA7\x91\x09\x6F\xE4\x71\x06\xB3\xBA\x59\x57\x26\x79\x00\xF6\xF8\x0D\xA2\x33\x30\x28\xD4\xAA\x58\xA0\x9D\x9D\x69\x91\xFD", - ["OU=VeriSign Trust Network,OU=(c) 1998 VeriSign\, Inc. - For authorized use only,OU=Class 4 Public Primary Certification Authority - G2,O=VeriSign\, Inc.,C=US"] = "\x30\x82\x03\x02\x30\x82\x02\x6B\x02\x10\x32\x88\x8E\x9A\xD2\xF5\xEB\x13\x47\xF8\x7F\xC4\x20\x37\x25\xF8\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\xC1\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x3C\x30\x3A\x06\x03\x55\x04\x0B\x13\x33\x43\x6C\x61\x73\x73\x20\x34\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x2D\x20\x47\x32\x31\x3A\x30\x38\x06\x03\x55\x04\x0B\x13\x31\x28\x63\x29\x20\x31\x39\x39\x38\x20\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x20\x2D\x20\x46\x6F\x72\x20\x61\x75\x74\x68\x6F\x72\x69\x7A\x65\x64\x20\x75\x73\x65\x20\x6F\x6E\x6C\x79\x31\x1F\x30\x1D\x06\x03\x55\x04\x0B\x13\x16\x56\x65\x72\x69\x53\x69\x67\x6E\x20\x54\x72\x75\x73\x74\x20\x4E\x65\x74\x77\x6F\x72\x6B\x30\x1E\x17\x0D\x39\x38\x30\x35\x31\x38\x30\x30\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x38\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x81\xC1\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x3C\x30\x3A\x06\x03\x55\x04\x0B\x13\x33\x43\x6C\x61\x73\x73\x20\x34\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x2D\x20\x47\x32\x31\x3A\x30\x38\x06\x03\x55\x04\x0B\x13\x31\x28\x63\x29\x20\x31\x39\x39\x38\x20\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x20\x2D\x20\x46\x6F\x72\x20\x61\x75\x74\x68\x6F\x72\x69\x7A\x65\x64\x20\x75\x73\x65\x20\x6F\x6E\x6C\x79\x31\x1F\x30\x1D\x06\x03\x55\x04\x0B\x13\x16\x56\x65\x72\x69\x53\x69\x67\x6E\x20\x54\x72\x75\x73\x74\x20\x4E\x65\x74\x77\x6F\x72\x6B\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xBA\xF0\xE4\xCF\xF9\xC4\xAE\x85\x54\xB9\x07\x57\xF9\x8F\xC5\x7F\x68\x11\xF8\xC4\x17\xB0\x44\xDC\xE3\x30\x73\xD5\x2A\x62\x2A\xB8\xD0\xCC\x1C\xED\x28\x5B\x7E\xBD\x6A\xDC\xB3\x91\x24\xCA\x41\x62\x3C\xFC\x02\x01\xBF\x1C\x16\x31\x94\x05\x97\x76\x6E\xA2\xAD\xBD\x61\x17\x6C\x4E\x30\x86\xF0\x51\x37\x2A\x50\xC7\xA8\x62\x81\xDC\x5B\x4A\xAA\xC1\xA0\xB4\x6E\xEB\x2F\xE5\x57\xC5\xB1\x2B\x40\x70\xDB\x5A\x4D\xA1\x8E\x1F\xBD\x03\x1F\xD8\x03\xD4\x8F\x4C\x99\x71\xBC\xE2\x82\xCC\x58\xE8\x98\x3A\x86\xD3\x86\x38\xF3\x00\x29\x1F\x02\x03\x01\x00\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x81\x81\x00\x85\x8C\x12\xC1\xA7\xB9\x50\x15\x7A\xCB\x3E\xAC\xB8\x43\x8A\xDC\xAA\xDD\x14\xBA\x89\x81\x7E\x01\x3C\x23\x71\x21\x88\x2F\x82\xDC\x63\xFA\x02\x45\xAC\x45\x59\xD7\x2A\x58\x44\x5B\xB7\x9F\x81\x3B\x92\x68\x3D\xE2\x37\x24\xF5\x7B\x6C\x8F\x76\x35\x96\x09\xA8\x59\x9D\xB9\xCE\x23\xAB\x74\xD6\x83\xFD\x32\x73\x27\xD8\x69\x3E\x43\x74\xF6\xAE\xC5\x89\x9A\xE7\x53\x7C\xE9\x7B\xF6\x4B\xF3\xC1\x65\x83\xDE\x8D\x8A\x9C\x3C\x88\x8D\x39\x59\xFC\xAA\x3F\x22\x8D\xA1\xC1\x66\x50\x81\x72\x4C\xED\x22\x64\x4F\x4F\xCA\x80\x91\xB6\x29", ["CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE"] = "\x30\x82\x03\x75\x30\x82\x02\x5D\xA0\x03\x02\x01\x02\x02\x0B\x04\x00\x00\x00\x00\x01\x15\x4B\x5A\xC3\x94\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x39\x38\x30\x39\x30\x31\x31\x32\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x31\x32\x38\x31\x32\x30\x30\x30\x30\x5A\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xDA\x0E\xE6\x99\x8D\xCE\xA3\xE3\x4F\x8A\x7E\xFB\xF1\x8B\x83\x25\x6B\xEA\x48\x1F\xF1\x2A\xB0\xB9\x95\x11\x04\xBD\xF0\x63\xD1\xE2\x67\x66\xCF\x1C\xDD\xCF\x1B\x48\x2B\xEE\x8D\x89\x8E\x9A\xAF\x29\x80\x65\xAB\xE9\xC7\x2D\x12\xCB\xAB\x1C\x4C\x70\x07\xA1\x3D\x0A\x30\xCD\x15\x8D\x4F\xF8\xDD\xD4\x8C\x50\x15\x1C\xEF\x50\xEE\xC4\x2E\xF7\xFC\xE9\x52\xF2\x91\x7D\xE0\x6D\xD5\x35\x30\x8E\x5E\x43\x73\xF2\x41\xE9\xD5\x6A\xE3\xB2\x89\x3A\x56\x39\x38\x6F\x06\x3C\x88\x69\x5B\x2A\x4D\xC5\xA7\x54\xB8\x6C\x89\xCC\x9B\xF9\x3C\xCA\xE5\xFD\x89\xF5\x12\x3C\x92\x78\x96\xD6\xDC\x74\x6E\x93\x44\x61\xD1\x8D\xC7\x46\xB2\x75\x0E\x86\xE8\x19\x8A\xD5\x6D\x6C\xD5\x78\x16\x95\xA2\xE9\xC8\x0A\x38\xEB\xF2\x24\x13\x4F\x73\x54\x93\x13\x85\x3A\x1B\xBC\x1E\x34\xB5\x8B\x05\x8C\xB9\x77\x8B\xB1\xDB\x1F\x20\x91\xAB\x09\x53\x6E\x90\xCE\x7B\x37\x74\xB9\x70\x47\x91\x22\x51\x63\x16\x79\xAE\xB1\xAE\x41\x26\x08\xC8\x19\x2B\xD1\x46\xAA\x48\xD6\x64\x2A\xD7\x83\x34\xFF\x2C\x2A\xC1\x6C\x19\x43\x4A\x07\x85\xE7\xD3\x7C\xF6\x21\x68\xEF\xEA\xF2\x52\x9F\x7F\x93\x90\xCF\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x60\x7B\x66\x1A\x45\x0D\x97\xCA\x89\x50\x2F\x7D\x04\xCD\x34\xA8\xFF\xFC\xFD\x4B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\xD6\x73\xE7\x7C\x4F\x76\xD0\x8D\xBF\xEC\xBA\xA2\xBE\x34\xC5\x28\x32\xB5\x7C\xFC\x6C\x9C\x2C\x2B\xBD\x09\x9E\x53\xBF\x6B\x5E\xAA\x11\x48\xB6\xE5\x08\xA3\xB3\xCA\x3D\x61\x4D\xD3\x46\x09\xB3\x3E\xC3\xA0\xE3\x63\x55\x1B\xF2\xBA\xEF\xAD\x39\xE1\x43\xB9\x38\xA3\xE6\x2F\x8A\x26\x3B\xEF\xA0\x50\x56\xF9\xC6\x0A\xFD\x38\xCD\xC4\x0B\x70\x51\x94\x97\x98\x04\xDF\xC3\x5F\x94\xD5\x15\xC9\x14\x41\x9C\xC4\x5D\x75\x64\x15\x0D\xFF\x55\x30\xEC\x86\x8F\xFF\x0D\xEF\x2C\xB9\x63\x46\xF6\xAA\xFC\xDF\xBC\x69\xFD\x2E\x12\x48\x64\x9A\xE0\x95\xF0\xA6\xEF\x29\x8F\x01\xB1\x15\xB5\x0C\x1D\xA5\xFE\x69\x2C\x69\x24\x78\x1E\xB3\xA7\x1C\x71\x62\xEE\xCA\xC8\x97\xAC\x17\x5D\x8A\xC2\xF8\x47\x86\x6E\x2A\xC4\x56\x31\x95\xD0\x67\x89\x85\x2B\xF9\x6C\xA6\x5D\x46\x9D\x0C\xAA\x82\xE4\x99\x51\xDD\x70\xB7\xDB\x56\x3D\x61\xE4\x6A\xE1\x5C\xD6\xF6\xFE\x3D\xDE\x41\xCC\x07\xAE\x63\x52\xBF\x53\x53\xF4\x2B\xE9\xC7\xFD\xB6\xF7\x82\x5F\x85\xD2\x41\x18\xDB\x81\xB3\x04\x1C\xC5\x1F\xA4\x80\x6F\x15\x20\xC9\xDE\x0C\x88\x0A\x1D\xD6\x66\x55\xE2\xFC\x48\xC9\x29\x26\x69\xE0", ["CN=GlobalSign,O=GlobalSign,OU=GlobalSign Root CA - R2"] = "\x30\x82\x03\xBA\x30\x82\x02\xA2\xA0\x03\x02\x01\x02\x02\x0B\x04\x00\x00\x00\x00\x01\x0F\x86\x26\xE6\x0D\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x4C\x31\x20\x30\x1E\x06\x03\x55\x04\x0B\x13\x17\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x20\x2D\x20\x52\x32\x31\x13\x30\x11\x06\x03\x55\x04\x0A\x13\x0A\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x31\x13\x30\x11\x06\x03\x55\x04\x03\x13\x0A\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x30\x1E\x17\x0D\x30\x36\x31\x32\x31\x35\x30\x38\x30\x30\x30\x30\x5A\x17\x0D\x32\x31\x31\x32\x31\x35\x30\x38\x30\x30\x30\x30\x5A\x30\x4C\x31\x20\x30\x1E\x06\x03\x55\x04\x0B\x13\x17\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x20\x2D\x20\x52\x32\x31\x13\x30\x11\x06\x03\x55\x04\x0A\x13\x0A\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x31\x13\x30\x11\x06\x03\x55\x04\x03\x13\x0A\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xA6\xCF\x24\x0E\xBE\x2E\x6F\x28\x99\x45\x42\xC4\xAB\x3E\x21\x54\x9B\x0B\xD3\x7F\x84\x70\xFA\x12\xB3\xCB\xBF\x87\x5F\xC6\x7F\x86\xD3\xB2\x30\x5C\xD6\xFD\xAD\xF1\x7B\xDC\xE5\xF8\x60\x96\x09\x92\x10\xF5\xD0\x53\xDE\xFB\x7B\x7E\x73\x88\xAC\x52\x88\x7B\x4A\xA6\xCA\x49\xA6\x5E\xA8\xA7\x8C\x5A\x11\xBC\x7A\x82\xEB\xBE\x8C\xE9\xB3\xAC\x96\x25\x07\x97\x4A\x99\x2A\x07\x2F\xB4\x1E\x77\xBF\x8A\x0F\xB5\x02\x7C\x1B\x96\xB8\xC5\xB9\x3A\x2C\xBC\xD6\x12\xB9\xEB\x59\x7D\xE2\xD0\x06\x86\x5F\x5E\x49\x6A\xB5\x39\x5E\x88\x34\xEC\xBC\x78\x0C\x08\x98\x84\x6C\xA8\xCD\x4B\xB4\xA0\x7D\x0C\x79\x4D\xF0\xB8\x2D\xCB\x21\xCA\xD5\x6C\x5B\x7D\xE1\xA0\x29\x84\xA1\xF9\xD3\x94\x49\xCB\x24\x62\x91\x20\xBC\xDD\x0B\xD5\xD9\xCC\xF9\xEA\x27\x0A\x2B\x73\x91\xC6\x9D\x1B\xAC\xC8\xCB\xE8\xE0\xA0\xF4\x2F\x90\x8B\x4D\xFB\xB0\x36\x1B\xF6\x19\x7A\x85\xE0\x6D\xF2\x61\x13\x88\x5C\x9F\xE0\x93\x0A\x51\x97\x8A\x5A\xCE\xAF\xAB\xD5\xF7\xAA\x09\xAA\x60\xBD\xDC\xD9\x5F\xDF\x72\xA9\x60\x13\x5E\x00\x01\xC9\x4A\xFA\x3F\xA4\xEA\x07\x03\x21\x02\x8E\x82\xCA\x03\xC2\x9B\x8F\x02\x03\x01\x00\x01\xA3\x81\x9C\x30\x81\x99\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x9B\xE2\x07\x57\x67\x1C\x1E\xC0\x6A\x06\xDE\x59\xB4\x9A\x2D\xDF\xDC\x19\x86\x2E\x30\x36\x06\x03\x55\x1D\x1F\x04\x2F\x30\x2D\x30\x2B\xA0\x29\xA0\x27\x86\x25\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x67\x6C\x6F\x62\x61\x6C\x73\x69\x67\x6E\x2E\x6E\x65\x74\x2F\x72\x6F\x6F\x74\x2D\x72\x32\x2E\x63\x72\x6C\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x9B\xE2\x07\x57\x67\x1C\x1E\xC0\x6A\x06\xDE\x59\xB4\x9A\x2D\xDF\xDC\x19\x86\x2E\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x99\x81\x53\x87\x1C\x68\x97\x86\x91\xEC\xE0\x4A\xB8\x44\x0B\xAB\x81\xAC\x27\x4F\xD6\xC1\xB8\x1C\x43\x78\xB3\x0C\x9A\xFC\xEA\x2C\x3C\x6E\x61\x1B\x4D\x4B\x29\xF5\x9F\x05\x1D\x26\xC1\xB8\xE9\x83\x00\x62\x45\xB6\xA9\x08\x93\xB9\xA9\x33\x4B\x18\x9A\xC2\xF8\x87\x88\x4E\xDB\xDD\x71\x34\x1A\xC1\x54\xDA\x46\x3F\xE0\xD3\x2A\xAB\x6D\x54\x22\xF5\x3A\x62\xCD\x20\x6F\xBA\x29\x89\xD7\xDD\x91\xEE\xD3\x5C\xA2\x3E\xA1\x5B\x41\xF5\xDF\xE5\x64\x43\x2D\xE9\xD5\x39\xAB\xD2\xA2\xDF\xB7\x8B\xD0\xC0\x80\x19\x1C\x45\xC0\x2D\x8C\xE8\xF8\x2D\xA4\x74\x56\x49\xC5\x05\xB5\x4F\x15\xDE\x6E\x44\x78\x39\x87\xA8\x7E\xBB\xF3\x79\x18\x91\xBB\xF4\x6F\x9D\xC1\xF0\x8C\x35\x8C\x5D\x01\xFB\xC3\x6D\xB9\xEF\x44\x6D\x79\x46\x31\x7E\x0A\xFE\xA9\x82\xC1\xFF\xEF\xAB\x6E\x20\xC4\x50\xC9\x5F\x9D\x4D\x9B\x17\x8C\x0C\xE5\x01\xC9\xA0\x41\x6A\x73\x53\xFA\xA5\x50\xB4\x6E\x25\x0F\xFB\x4C\x18\xF4\xFD\x52\xD9\x8E\x69\xB1\xE8\x11\x0F\xDE\x88\xD8\xFB\x1D\x49\xF7\xAA\xDE\x95\xCF\x20\x78\xC2\x60\x12\xDB\x25\x40\x8C\x6A\xFC\x7E\x42\x38\x40\x64\x12\xF7\x9E\x81\xE1\x93\x2E", ["emailAddress=info@valicert.com,CN=http://www.valicert.com/,OU=ValiCert Class 1 Policy Validation Authority,O=ValiCert\, Inc.,L=ValiCert Validation Network"] = "\x30\x82\x02\xE7\x30\x82\x02\x50\x02\x01\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\xBB\x31\x24\x30\x22\x06\x03\x55\x04\x07\x13\x1B\x56\x61\x6C\x69\x43\x65\x72\x74\x20\x56\x61\x6C\x69\x64\x61\x74\x69\x6F\x6E\x20\x4E\x65\x74\x77\x6F\x72\x6B\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x61\x6C\x69\x43\x65\x72\x74\x2C\x20\x49\x6E\x63\x2E\x31\x35\x30\x33\x06\x03\x55\x04\x0B\x13\x2C\x56\x61\x6C\x69\x43\x65\x72\x74\x20\x43\x6C\x61\x73\x73\x20\x31\x20\x50\x6F\x6C\x69\x63\x79\x20\x56\x61\x6C\x69\x64\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x31\x21\x30\x1F\x06\x03\x55\x04\x03\x13\x18\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x76\x61\x6C\x69\x63\x65\x72\x74\x2E\x63\x6F\x6D\x2F\x31\x20\x30\x1E\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x09\x01\x16\x11\x69\x6E\x66\x6F\x40\x76\x61\x6C\x69\x63\x65\x72\x74\x2E\x63\x6F\x6D\x30\x1E\x17\x0D\x39\x39\x30\x36\x32\x35\x32\x32\x32\x33\x34\x38\x5A\x17\x0D\x31\x39\x30\x36\x32\x35\x32\x32\x32\x33\x34\x38\x5A\x30\x81\xBB\x31\x24\x30\x22\x06\x03\x55\x04\x07\x13\x1B\x56\x61\x6C\x69\x43\x65\x72\x74\x20\x56\x61\x6C\x69\x64\x61\x74\x69\x6F\x6E\x20\x4E\x65\x74\x77\x6F\x72\x6B\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x61\x6C\x69\x43\x65\x72\x74\x2C\x20\x49\x6E\x63\x2E\x31\x35\x30\x33\x06\x03\x55\x04\x0B\x13\x2C\x56\x61\x6C\x69\x43\x65\x72\x74\x20\x43\x6C\x61\x73\x73\x20\x31\x20\x50\x6F\x6C\x69\x63\x79\x20\x56\x61\x6C\x69\x64\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x31\x21\x30\x1F\x06\x03\x55\x04\x03\x13\x18\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x76\x61\x6C\x69\x63\x65\x72\x74\x2E\x63\x6F\x6D\x2F\x31\x20\x30\x1E\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x09\x01\x16\x11\x69\x6E\x66\x6F\x40\x76\x61\x6C\x69\x63\x65\x72\x74\x2E\x63\x6F\x6D\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xD8\x59\x82\x7A\x89\xB8\x96\xBA\xA6\x2F\x68\x6F\x58\x2E\xA7\x54\x1C\x06\x6E\xF4\xEA\x8D\x48\xBC\x31\x94\x17\xF0\xF3\x4E\xBC\xB2\xB8\x35\x92\x76\xB0\xD0\xA5\xA5\x01\xD7\x00\x03\x12\x22\x19\x08\xF8\xFF\x11\x23\x9B\xCE\x07\xF5\xBF\x69\x1A\x26\xFE\x4E\xE9\xD1\x7F\x9D\x2C\x40\x1D\x59\x68\x6E\xA6\xF8\x58\xB0\x9D\x1A\x8F\xD3\x3F\xF1\xDC\x19\x06\x81\xA8\x0E\xE0\x3A\xDD\xC8\x53\x45\x09\x06\xE6\x0F\x70\xC3\xFA\x40\xA6\x0E\xE2\x56\x05\x0F\x18\x4D\xFC\x20\x82\xD1\x73\x55\x74\x8D\x76\x72\xA0\x1D\x9D\x1D\xC0\xDD\x3F\x71\x02\x03\x01\x00\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x81\x81\x00\x50\x68\x3D\x49\xF4\x2C\x1C\x06\x94\xDF\x95\x60\x7F\x96\x7B\x17\xFE\x4F\x71\xAD\x64\xC8\xDD\x77\xD2\xEF\x59\x55\xE8\x3F\xE8\x8E\x05\x2A\x21\xF2\x07\xD2\xB5\xA7\x52\xFE\x9C\xB1\xB6\xE2\x5B\x77\x17\x40\xEA\x72\xD6\x23\xCB\x28\x81\x32\xC3\x00\x79\x18\xEC\x59\x17\x89\xC9\xC6\x6A\x1E\x71\xC9\xFD\xB7\x74\xA5\x25\x45\x69\xC5\x48\xAB\x19\xE1\x45\x8A\x25\x6B\x19\xEE\xE5\xBB\x12\xF5\x7F\xF7\xA6\x8D\x51\xC3\xF0\x9D\x74\xB7\xA9\x3E\xA0\xA5\xFF\xB6\x49\x03\x13\xDA\x22\xCC\xED\x71\x82\x2B\x99\xCF\x3A\xB7\xF5\x2D\x72\xC8", @@ -38,8 +37,6 @@ redef root_certs += { ["CN=America Online Root Certification Authority 1,O=America Online Inc.,C=US"] = "\x30\x82\x03\xA4\x30\x82\x02\x8C\xA0\x03\x02\x01\x02\x02\x01\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x1C\x30\x1A\x06\x03\x55\x04\x0A\x13\x13\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x49\x6E\x63\x2E\x31\x36\x30\x34\x06\x03\x55\x04\x03\x13\x2D\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x52\x6F\x6F\x74\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x31\x30\x1E\x17\x0D\x30\x32\x30\x35\x32\x38\x30\x36\x30\x30\x30\x30\x5A\x17\x0D\x33\x37\x31\x31\x31\x39\x32\x30\x34\x33\x30\x30\x5A\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x1C\x30\x1A\x06\x03\x55\x04\x0A\x13\x13\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x49\x6E\x63\x2E\x31\x36\x30\x34\x06\x03\x55\x04\x03\x13\x2D\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x52\x6F\x6F\x74\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x31\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xA8\x2F\xE8\xA4\x69\x06\x03\x47\xC3\xE9\x2A\x98\xFF\x19\xA2\x70\x9A\xC6\x50\xB2\x7E\xA5\xDF\x68\x4D\x1B\x7C\x0F\xB6\x97\x68\x7D\x2D\xA6\x8B\x97\xE9\x64\x86\xC9\xA3\xEF\xA0\x86\xBF\x60\x65\x9C\x4B\x54\x88\xC2\x48\xC5\x4A\x39\xBF\x14\xE3\x59\x55\xE5\x19\xB4\x74\xC8\xB4\x05\x39\x5C\x16\xA5\xE2\x95\x05\xE0\x12\xAE\x59\x8B\xA2\x33\x68\x58\x1C\xA6\xD4\x15\xB7\xD8\x9F\xD7\xDC\x71\xAB\x7E\x9A\xBF\x9B\x8E\x33\x0F\x22\xFD\x1F\x2E\xE7\x07\x36\xEF\x62\x39\xC5\xDD\xCB\xBA\x25\x14\x23\xDE\x0C\xC6\x3D\x3C\xCE\x82\x08\xE6\x66\x3E\xDA\x51\x3B\x16\x3A\xA3\x05\x7F\xA0\xDC\x87\xD5\x9C\xFC\x72\xA9\xA0\x7D\x78\xE4\xB7\x31\x55\x1E\x65\xBB\xD4\x61\xB0\x21\x60\xED\x10\x32\x72\xC5\x92\x25\x1E\xF8\x90\x4A\x18\x78\x47\xDF\x7E\x30\x37\x3E\x50\x1B\xDB\x1C\xD3\x6B\x9A\x86\x53\x07\xB0\xEF\xAC\x06\x78\xF8\x84\x99\xFE\x21\x8D\x4C\x80\xB6\x0C\x82\xF6\x66\x70\x79\x1A\xD3\x4F\xA3\xCF\xF1\xCF\x46\xB0\x4B\x0F\x3E\xDD\x88\x62\xB8\x8C\xA9\x09\x28\x3B\x7A\xC7\x97\xE1\x1E\xE5\xF4\x9F\xC0\xC0\xAE\x24\xA0\xC8\xA1\xD9\x0F\xD6\x7B\x26\x82\x69\x32\x3D\xA7\x02\x03\x01\x00\x01\xA3\x63\x30\x61\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x00\xAD\xD9\xA3\xF6\x79\xF6\x6E\x74\xA9\x7F\x33\x3D\x81\x17\xD7\x4C\xCF\x33\xDE\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x00\xAD\xD9\xA3\xF6\x79\xF6\x6E\x74\xA9\x7F\x33\x3D\x81\x17\xD7\x4C\xCF\x33\xDE\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x86\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x7C\x8A\xD1\x1F\x18\x37\x82\xE0\xB8\xB0\xA3\xED\x56\x95\xC8\x62\x61\x9C\x05\xA2\xCD\xC2\x62\x26\x61\xCD\x10\x16\xD7\xCC\xB4\x65\x34\xD0\x11\x8A\xAD\xA8\xA9\x05\x66\xEF\x74\xF3\x6D\x5F\x9D\x99\xAF\xF6\x8B\xFB\xEB\x52\xB2\x05\x98\xA2\x6F\x2A\xC5\x54\xBD\x25\xBD\x5F\xAE\xC8\x86\xEA\x46\x2C\xC1\xB3\xBD\xC1\xE9\x49\x70\x18\x16\x97\x08\x13\x8C\x20\xE0\x1B\x2E\x3A\x47\xCB\x1E\xE4\x00\x30\x95\x5B\xF4\x45\xA3\xC0\x1A\xB0\x01\x4E\xAB\xBD\xC0\x23\x6E\x63\x3F\x80\x4A\xC5\x07\xED\xDC\xE2\x6F\xC7\xC1\x62\xF1\xE3\x72\xD6\x04\xC8\x74\x67\x0B\xFA\x88\xAB\xA1\x01\xC8\x6F\xF0\x14\xAF\xD2\x99\xCD\x51\x93\x7E\xED\x2E\x38\xC7\xBD\xCE\x46\x50\x3D\x72\xE3\x79\x25\x9D\x9B\x88\x2B\x10\x20\xDD\xA5\xB8\x32\x9F\x8D\xE0\x29\xDF\x21\x74\x86\x82\xDB\x2F\x82\x30\xC6\xC7\x35\x86\xB3\xF9\x96\x5F\x46\xDB\x0C\x45\xFD\xF3\x50\xC3\x6F\xC6\xC3\x48\xAD\x46\xA6\xE1\x27\x47\x0A\x1D\x0E\x9B\xB6\xC2\x77\x7F\x63\xF2\xE0\x7D\x1A\xBE\xFC\xE0\xDF\xD7\xC7\xA7\x6C\xB0\xF9\xAE\xBA\x3C\xFD\x74\xB4\x11\xE8\x58\x0D\x80\xBC\xD3\xA8\x80\x3A\x99\xED\x75\xCC\x46\x7B", ["CN=America Online Root Certification Authority 2,O=America Online Inc.,C=US"] = "\x30\x82\x05\xA4\x30\x82\x03\x8C\xA0\x03\x02\x01\x02\x02\x01\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x1C\x30\x1A\x06\x03\x55\x04\x0A\x13\x13\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x49\x6E\x63\x2E\x31\x36\x30\x34\x06\x03\x55\x04\x03\x13\x2D\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x52\x6F\x6F\x74\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x32\x30\x1E\x17\x0D\x30\x32\x30\x35\x32\x38\x30\x36\x30\x30\x30\x30\x5A\x17\x0D\x33\x37\x30\x39\x32\x39\x31\x34\x30\x38\x30\x30\x5A\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x1C\x30\x1A\x06\x03\x55\x04\x0A\x13\x13\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x49\x6E\x63\x2E\x31\x36\x30\x34\x06\x03\x55\x04\x03\x13\x2D\x41\x6D\x65\x72\x69\x63\x61\x20\x4F\x6E\x6C\x69\x6E\x65\x20\x52\x6F\x6F\x74\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x32\x30\x82\x02\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x02\x0F\x00\x30\x82\x02\x0A\x02\x82\x02\x01\x00\xCC\x41\x45\x1D\xE9\x3D\x4D\x10\xF6\x8C\xB1\x41\xC9\xE0\x5E\xCB\x0D\xB7\xBF\x47\x73\xD3\xF0\x55\x4D\xDD\xC6\x0C\xFA\xB1\x66\x05\x6A\xCD\x78\xB4\xDC\x02\xDB\x4E\x81\xF3\xD7\xA7\x7C\x71\xBC\x75\x63\xA0\x5D\xE3\x07\x0C\x48\xEC\x25\xC4\x03\x20\xF4\xFF\x0E\x3B\x12\xFF\x9B\x8D\xE1\xC6\xD5\x1B\xB4\x6D\x22\xE3\xB1\xDB\x7F\x21\x64\xAF\x86\xBC\x57\x22\x2A\xD6\x47\x81\x57\x44\x82\x56\x53\xBD\x86\x14\x01\x0B\xFC\x7F\x74\xA4\x5A\xAE\xF1\xBA\x11\xB5\x9B\x58\x5A\x80\xB4\x37\x78\x09\x33\x7C\x32\x47\x03\x5C\xC4\xA5\x83\x48\xF4\x57\x56\x6E\x81\x36\x27\x18\x4F\xEC\x9B\x28\xC2\xD4\xB4\xD7\x7C\x0C\x3E\x0C\x2B\xDF\xCA\x04\xD7\xC6\x8E\xEA\x58\x4E\xA8\xA4\xA5\x18\x1C\x6C\x45\x98\xA3\x41\xD1\x2D\xD2\xC7\x6D\x8D\x19\xF1\xAD\x79\xB7\x81\x3F\xBD\x06\x82\x27\x2D\x10\x58\x05\xB5\x78\x05\xB9\x2F\xDB\x0C\x6B\x90\x90\x7E\x14\x59\x38\xBB\x94\x24\x13\xE5\xD1\x9D\x14\xDF\xD3\x82\x4D\x46\xF0\x80\x39\x52\x32\x0F\xE3\x84\xB2\x7A\x43\xF2\x5E\xDE\x5F\x3F\x1D\xDD\xE3\xB2\x1B\xA0\xA1\x2A\x23\x03\x6E\x2E\x01\x15\x87\x5C\xA6\x75\x75\xC7\x97\x61\xBE\xDE\x86\xDC\xD4\x48\xDB\xBD\x2A\xBF\x4A\x55\xDA\xE8\x7D\x50\xFB\xB4\x80\x17\xB8\x94\xBF\x01\x3D\xEA\xDA\xBA\x7C\xE0\x58\x67\x17\xB9\x58\xE0\x88\x86\x46\x67\x6C\x9D\x10\x47\x58\x32\xD0\x35\x7C\x79\x2A\x90\xA2\x5A\x10\x11\x23\x35\xAD\x2F\xCC\xE4\x4A\x5B\xA7\xC8\x27\xF2\x83\xDE\x5E\xBB\x5E\x77\xE7\xE8\xA5\x6E\x63\xC2\x0D\x5D\x61\xD0\x8C\xD2\x6C\x5A\x21\x0E\xCA\x28\xA3\xCE\x2A\xE9\x95\xC7\x48\xCF\x96\x6F\x1D\x92\x25\xC8\xC6\xC6\xC1\xC1\x0C\x05\xAC\x26\xC4\xD2\x75\xD2\xE1\x2A\x67\xC0\x3D\x5B\xA5\x9A\xEB\xCF\x7B\x1A\xA8\x9D\x14\x45\xE5\x0F\xA0\x9A\x65\xDE\x2F\x28\xBD\xCE\x6F\x94\x66\x83\x48\x29\xD8\xEA\x65\x8C\xAF\x93\xD9\x64\x9F\x55\x57\x26\xBF\x6F\xCB\x37\x31\x99\xA3\x60\xBB\x1C\xAD\x89\x34\x32\x62\xB8\x43\x21\x06\x72\x0C\xA1\x5C\x6D\x46\xC5\xFA\x29\xCF\x30\xDE\x89\xDC\x71\x5B\xDD\xB6\x37\x3E\xDF\x50\xF5\xB8\x07\x25\x26\xE5\xBC\xB5\xFE\x3C\x02\xB3\xB7\xF8\xBE\x43\xC1\x87\x11\x94\x9E\x23\x6C\x17\x8A\xB8\x8A\x27\x0C\x54\x47\xF0\xA9\xB3\xC0\x80\x8C\xA0\x27\xEB\x1D\x19\xE3\x07\x8E\x77\x70\xCA\x2B\xF4\x7D\x76\xE0\x78\x67\x02\x03\x01\x00\x01\xA3\x63\x30\x61\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x4D\x45\xC1\x68\x38\xBB\x73\xA9\x69\xA1\x20\xE7\xED\xF5\x22\xA1\x23\x14\xD7\x9E\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x4D\x45\xC1\x68\x38\xBB\x73\xA9\x69\xA1\x20\xE7\xED\xF5\x22\xA1\x23\x14\xD7\x9E\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x86\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x02\x01\x00\x67\x6B\x06\xB9\x5F\x45\x3B\x2A\x4B\x33\xB3\xE6\x1B\x6B\x59\x4E\x22\xCC\xB9\xB7\xA4\x25\xC9\xA7\xC4\xF0\x54\x96\x0B\x64\xF3\xB1\x58\x4F\x5E\x51\xFC\xB2\x97\x7B\x27\x65\xC2\xE5\xCA\xE7\x0D\x0C\x25\x7B\x62\xE3\xFA\x9F\xB4\x87\xB7\x45\x46\xAF\x83\xA5\x97\x48\x8C\xA5\xBD\xF1\x16\x2B\x9B\x76\x2C\x7A\x35\x60\x6C\x11\x80\x97\xCC\xA9\x92\x52\xE6\x2B\xE6\x69\xED\xA9\xF8\x36\x2D\x2C\x77\xBF\x61\x48\xD1\x63\x0B\xB9\x5B\x52\xED\x18\xB0\x43\x42\x22\xA6\xB1\x77\xAE\xDE\x69\xC5\xCD\xC7\x1C\xA1\xB1\xA5\x1C\x10\xFB\x18\xBE\x1A\x70\xDD\xC1\x92\x4B\xBE\x29\x5A\x9D\x3F\x35\xBE\xE5\x7D\x51\xF8\x55\xE0\x25\x75\x23\x87\x1E\x5C\xDC\xBA\x9D\xB0\xAC\xB3\x69\xDB\x17\x83\xC9\xF7\xDE\x0C\xBC\x08\xDC\x91\x9E\xA8\xD0\xD7\x15\x37\x73\xA5\x35\xB8\xFC\x7E\xC5\x44\x40\x06\xC3\xEB\xF8\x22\x80\x5C\x47\xCE\x02\xE3\x11\x9F\x44\xFF\xFD\x9A\x32\xCC\x7D\x64\x51\x0E\xEB\x57\x26\x76\x3A\xE3\x1E\x22\x3C\xC2\xA6\x36\xDD\x19\xEF\xA7\xFC\x12\xF3\x26\xC0\x59\x31\x85\x4C\x9C\xD8\xCF\xDF\xA4\xCC\xCC\x29\x93\xFF\x94\x6D\x76\x5C\x13\x08\x97\xF2\xED\xA5\x0B\x4D\xDD\xE8\xC9\x68\x0E\x66\xD3\x00\x0E\x33\x12\x5B\xBC\x95\xE5\x32\x90\xA8\xB3\xC6\x6C\x83\xAD\x77\xEE\x8B\x7E\x7E\xB1\xA9\xAB\xD3\xE1\xF1\xB6\xC0\xB1\xEA\x88\xC0\xE7\xD3\x90\xE9\x28\x92\x94\x7B\x68\x7B\x97\x2A\x0A\x67\x2D\x85\x02\x38\x10\xE4\x03\x61\xD4\xDA\x25\x36\xC7\x08\x58\x2D\xA1\xA7\x51\xAF\x30\x0A\x49\xF5\xA6\x69\x87\x07\x2D\x44\x46\x76\x8E\x2A\xE5\x9A\x3B\xD7\x18\xA2\xFC\x9C\x38\x10\xCC\xC6\x3B\xD2\xB5\x17\x3A\x6F\xFD\xAE\x25\xBD\xF5\x72\x59\x64\xB1\x74\x2A\x38\x5F\x18\x4C\xDF\xCF\x71\x04\x5A\x36\xD4\xBF\x2F\x99\x9C\xE8\xD9\xBA\xB1\x95\xE6\x02\x4B\x21\xA1\x5B\xD5\xC1\x4F\x8F\xAE\x69\x6D\x53\xDB\x01\x93\xB5\x5C\x1E\x18\xDD\x64\x5A\xCA\x18\x28\x3E\x63\x04\x11\xFD\x1C\x8D\x00\x0F\xB8\x37\xDF\x67\x8A\x9D\x66\xA9\x02\x6A\x91\xFF\x13\xCA\x2F\x5D\x83\xBC\x87\x93\x6C\xDC\x24\x51\x16\x04\x25\x66\xFA\xB3\xD9\xC2\xBA\x29\xBE\x9A\x48\x38\x82\x99\xF4\xBF\x3B\x4A\x31\x19\xF9\xBF\x8E\x21\x33\x14\xCA\x4F\x54\x5F\xFB\xCE\xFB\x8F\x71\x7F\xFD\x5E\x19\xA0\x0F\x4B\x91\xB8\xC4\x54\xBC\x06\xB0\x45\x8F\x26\x91\xA2\x8E\xFE\xA9", ["CN=Visa eCommerce Root,OU=Visa International Service Association,O=VISA,C=US"] = "\x30\x82\x03\xA2\x30\x82\x02\x8A\xA0\x03\x02\x01\x02\x02\x10\x13\x86\x35\x4D\x1D\x3F\x06\xF2\xC1\xF9\x65\x05\xD5\x90\x1C\x62\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x6B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x0A\x13\x04\x56\x49\x53\x41\x31\x2F\x30\x2D\x06\x03\x55\x04\x0B\x13\x26\x56\x69\x73\x61\x20\x49\x6E\x74\x65\x72\x6E\x61\x74\x69\x6F\x6E\x61\x6C\x20\x53\x65\x72\x76\x69\x63\x65\x20\x41\x73\x73\x6F\x63\x69\x61\x74\x69\x6F\x6E\x31\x1C\x30\x1A\x06\x03\x55\x04\x03\x13\x13\x56\x69\x73\x61\x20\x65\x43\x6F\x6D\x6D\x65\x72\x63\x65\x20\x52\x6F\x6F\x74\x30\x1E\x17\x0D\x30\x32\x30\x36\x32\x36\x30\x32\x31\x38\x33\x36\x5A\x17\x0D\x32\x32\x30\x36\x32\x34\x30\x30\x31\x36\x31\x32\x5A\x30\x6B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x0A\x13\x04\x56\x49\x53\x41\x31\x2F\x30\x2D\x06\x03\x55\x04\x0B\x13\x26\x56\x69\x73\x61\x20\x49\x6E\x74\x65\x72\x6E\x61\x74\x69\x6F\x6E\x61\x6C\x20\x53\x65\x72\x76\x69\x63\x65\x20\x41\x73\x73\x6F\x63\x69\x61\x74\x69\x6F\x6E\x31\x1C\x30\x1A\x06\x03\x55\x04\x03\x13\x13\x56\x69\x73\x61\x20\x65\x43\x6F\x6D\x6D\x65\x72\x63\x65\x20\x52\x6F\x6F\x74\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAF\x57\xDE\x56\x1E\x6E\xA1\xDA\x60\xB1\x94\x27\xCB\x17\xDB\x07\x3F\x80\x85\x4F\xC8\x9C\xB6\xD0\xF4\x6F\x4F\xCF\x99\xD8\xE1\xDB\xC2\x48\x5C\x3A\xAC\x39\x33\xC7\x1F\x6A\x8B\x26\x3D\x2B\x35\xF5\x48\xB1\x91\xC1\x02\x4E\x04\x96\x91\x7B\xB0\x33\xF0\xB1\x14\x4E\x11\x6F\xB5\x40\xAF\x1B\x45\xA5\x4A\xEF\x7E\xB6\xAC\xF2\xA0\x1F\x58\x3F\x12\x46\x60\x3C\x8D\xA1\xE0\x7D\xCF\x57\x3E\x33\x1E\xFB\x47\xF1\xAA\x15\x97\x07\x55\x66\xA5\xB5\x2D\x2E\xD8\x80\x59\xB2\xA7\x0D\xB7\x46\xEC\x21\x63\xFF\x35\xAB\xA5\x02\xCF\x2A\xF4\x4C\xFE\x7B\xF5\x94\x5D\x84\x4D\xA8\xF2\x60\x8F\xDB\x0E\x25\x3C\x9F\x73\x71\xCF\x94\xDF\x4A\xEA\xDB\xDF\x72\x38\x8C\xF3\x96\xBD\xF1\x17\xBC\xD2\xBA\x3B\x45\x5A\xC6\xA7\xF6\xC6\x17\x8B\x01\x9D\xFC\x19\xA8\x2A\x83\x16\xB8\x3A\x48\xFE\x4E\x3E\xA0\xAB\x06\x19\xE9\x53\xF3\x80\x13\x07\xED\x2D\xBF\x3F\x0A\x3C\x55\x20\x39\x2C\x2C\x00\x69\x74\x95\x4A\xBC\x20\xB2\xA9\x79\xE5\x18\x89\x91\xA8\xDC\x1C\x4D\xEF\xBB\x7E\x37\x0B\x5D\xFE\x39\xA5\x88\x52\x8C\x00\x6C\xEC\x18\x7C\x41\xBD\xF6\x8B\x75\x77\xBA\x60\x9D\x84\xE7\xFE\x2D\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x15\x38\x83\x0F\x3F\x2C\x3F\x70\x33\x1E\xCD\x46\xFE\x07\x8C\x20\xE0\xD7\xC3\xB7\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x5F\xF1\x41\x7D\x7C\x5C\x08\xB9\x2B\xE0\xD5\x92\x47\xFA\x67\x5C\xA5\x13\xC3\x03\x21\x9B\x2B\x4C\x89\x46\xCF\x59\x4D\xC9\xFE\xA5\x40\xB6\x63\xCD\xDD\x71\x28\x95\x67\x11\xCC\x24\xAC\xD3\x44\x6C\x71\xAE\x01\x20\x6B\x03\xA2\x8F\x18\xB7\x29\x3A\x7D\xE5\x16\x60\x53\x78\x3C\xC0\xAF\x15\x83\xF7\x8F\x52\x33\x24\xBD\x64\x93\x97\xEE\x8B\xF7\xDB\x18\xA8\x6D\x71\xB3\xF7\x2C\x17\xD0\x74\x25\x69\xF7\xFE\x6B\x3C\x94\xBE\x4D\x4B\x41\x8C\x4E\xE2\x73\xD0\xE3\x90\x22\x73\x43\xCD\xF3\xEF\xEA\x73\xCE\x45\x8A\xB0\xA6\x49\xFF\x4C\x7D\x9D\x71\x88\xC4\x76\x1D\x90\x5B\x1D\xEE\xFD\xCC\xF7\xEE\xFD\x60\xA5\xB1\x7A\x16\x71\xD1\x16\xD0\x7C\x12\x3C\x6C\x69\x97\xDB\xAE\x5F\x39\x9A\x70\x2F\x05\x3C\x19\x46\x04\x99\x20\x36\xD0\x60\x6E\x61\x06\xBB\x16\x42\x8C\x70\xF7\x30\xFB\xE0\xDB\x66\xA3\x00\x01\xBD\xE6\x2C\xDA\x91\x5F\xA0\x46\x8B\x4D\x6A\x9C\x3D\x3D\xDD\x05\x46\xFE\x76\xBF\xA0\x0A\x3C\xE4\x00\xE6\x27\xB7\xFF\x84\x2D\xDE\xBA\x22\x27\x96\x10\x71\xEB\x22\xED\xDF\xDF\x33\x9C\xCF\xE3\xAD\xAE\x8E\xD4\x8E\xE6\x4F\x51\xAF\x16\x92\xE0\x5C\xF6\x07\x0F", - ["emailAddress=certificate@trustcenter.de,OU=TC TrustCenter Class 2 CA,O=TC TrustCenter for Security in Data Networks GmbH,L=Hamburg,ST=Hamburg,C=DE"] = "\x30\x82\x03\x5C\x30\x82\x02\xC5\xA0\x03\x02\x01\x02\x02\x02\x03\xEA\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x04\x05\x00\x30\x81\xBC\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x10\x30\x0E\x06\x03\x55\x04\x08\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x3A\x30\x38\x06\x03\x55\x04\x0A\x13\x31\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x66\x6F\x72\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x69\x6E\x20\x44\x61\x74\x61\x20\x4E\x65\x74\x77\x6F\x72\x6B\x73\x20\x47\x6D\x62\x48\x31\x22\x30\x20\x06\x03\x55\x04\x0B\x13\x19\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x43\x6C\x61\x73\x73\x20\x32\x20\x43\x41\x31\x29\x30\x27\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x09\x01\x16\x1A\x63\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x40\x74\x72\x75\x73\x74\x63\x65\x6E\x74\x65\x72\x2E\x64\x65\x30\x1E\x17\x0D\x39\x38\x30\x33\x30\x39\x31\x31\x35\x39\x35\x39\x5A\x17\x0D\x31\x31\x30\x31\x30\x31\x31\x31\x35\x39\x35\x39\x5A\x30\x81\xBC\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x10\x30\x0E\x06\x03\x55\x04\x08\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x3A\x30\x38\x06\x03\x55\x04\x0A\x13\x31\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x66\x6F\x72\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x69\x6E\x20\x44\x61\x74\x61\x20\x4E\x65\x74\x77\x6F\x72\x6B\x73\x20\x47\x6D\x62\x48\x31\x22\x30\x20\x06\x03\x55\x04\x0B\x13\x19\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x43\x6C\x61\x73\x73\x20\x32\x20\x43\x41\x31\x29\x30\x27\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x09\x01\x16\x1A\x63\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x40\x74\x72\x75\x73\x74\x63\x65\x6E\x74\x65\x72\x2E\x64\x65\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xDA\x38\xE8\xED\x32\x00\x29\x71\x83\x01\x0D\xBF\x8C\x01\xDC\xDA\xC6\xAD\x39\xA4\xA9\x8A\x2F\xD5\x8B\x5C\x68\x5F\x50\xC6\x62\xF5\x66\xBD\xCA\x91\x22\xEC\xAA\x1D\x51\xD7\x3D\xB3\x51\xB2\x83\x4E\x5D\xCB\x49\xB0\xF0\x4C\x55\xE5\x6B\x2D\xC7\x85\x0B\x30\x1C\x92\x4E\x82\xD4\xCA\x02\xED\xF7\x6F\xBE\xDC\xE0\xE3\x14\xB8\x05\x53\xF2\x9A\xF4\x56\x8B\x5A\x9E\x85\x93\xD1\xB4\x82\x56\xAE\x4D\xBB\xA8\x4B\x57\x16\xBC\xFE\xF8\x58\x9E\xF8\x29\x8D\xB0\x7B\xCD\x78\xC9\x4F\xAC\x8B\x67\x0C\xF1\x9C\xFB\xFC\x57\x9B\x57\x5C\x4F\x0D\x02\x03\x01\x00\x01\xA3\x6B\x30\x69\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x86\x30\x33\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x08\x04\x26\x16\x24\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x74\x72\x75\x73\x74\x63\x65\x6E\x74\x65\x72\x2E\x64\x65\x2F\x67\x75\x69\x64\x65\x6C\x69\x6E\x65\x73\x30\x11\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x01\x04\x04\x03\x02\x00\x07\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x04\x05\x00\x03\x81\x81\x00\x84\x52\xFB\x28\xDF\xFF\x1F\x75\x01\xBC\x01\xBE\x04\x56\x97\x6A\x74\x42\x24\x31\x83\xF9\x46\xB1\x06\x8A\x89\xCF\x96\x2C\x33\xBF\x8C\xB5\x5F\x7A\x72\xA1\x85\x06\xCE\x86\xF8\x05\x8E\xE8\xF9\x25\xCA\xDA\x83\x8C\x06\xAC\xEB\x36\x6D\x85\x91\x34\x04\x36\xF4\x42\xF0\xF8\x79\x2E\x0A\x48\x5C\xAB\xCC\x51\x4F\x78\x76\xA0\xD9\xAC\x19\xBD\x2A\xD1\x69\x04\x28\x91\xCA\x36\x10\x27\x80\x57\x5B\xD2\x5C\xF5\xC2\x5B\xAB\x64\x81\x63\x74\x51\xF4\x97\xBF\xCD\x12\x28\xF7\x4D\x66\x7F\xA7\xF0\x1C\x01\x26\x78\xB2\x66\x47\x70\x51\x64", - ["emailAddress=certificate@trustcenter.de,OU=TC TrustCenter Class 3 CA,O=TC TrustCenter for Security in Data Networks GmbH,L=Hamburg,ST=Hamburg,C=DE"] = "\x30\x82\x03\x5C\x30\x82\x02\xC5\xA0\x03\x02\x01\x02\x02\x02\x03\xEB\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x04\x05\x00\x30\x81\xBC\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x10\x30\x0E\x06\x03\x55\x04\x08\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x3A\x30\x38\x06\x03\x55\x04\x0A\x13\x31\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x66\x6F\x72\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x69\x6E\x20\x44\x61\x74\x61\x20\x4E\x65\x74\x77\x6F\x72\x6B\x73\x20\x47\x6D\x62\x48\x31\x22\x30\x20\x06\x03\x55\x04\x0B\x13\x19\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x43\x6C\x61\x73\x73\x20\x33\x20\x43\x41\x31\x29\x30\x27\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x09\x01\x16\x1A\x63\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x40\x74\x72\x75\x73\x74\x63\x65\x6E\x74\x65\x72\x2E\x64\x65\x30\x1E\x17\x0D\x39\x38\x30\x33\x30\x39\x31\x31\x35\x39\x35\x39\x5A\x17\x0D\x31\x31\x30\x31\x30\x31\x31\x31\x35\x39\x35\x39\x5A\x30\x81\xBC\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x10\x30\x0E\x06\x03\x55\x04\x08\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x13\x07\x48\x61\x6D\x62\x75\x72\x67\x31\x3A\x30\x38\x06\x03\x55\x04\x0A\x13\x31\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x66\x6F\x72\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x69\x6E\x20\x44\x61\x74\x61\x20\x4E\x65\x74\x77\x6F\x72\x6B\x73\x20\x47\x6D\x62\x48\x31\x22\x30\x20\x06\x03\x55\x04\x0B\x13\x19\x54\x43\x20\x54\x72\x75\x73\x74\x43\x65\x6E\x74\x65\x72\x20\x43\x6C\x61\x73\x73\x20\x33\x20\x43\x41\x31\x29\x30\x27\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x09\x01\x16\x1A\x63\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x40\x74\x72\x75\x73\x74\x63\x65\x6E\x74\x65\x72\x2E\x64\x65\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xB6\xB4\xC1\x35\x05\x2E\x0D\x8D\xEC\xA0\x40\x6A\x1C\x0E\x27\xA6\x50\x92\x6B\x50\x1B\x07\xDE\x2E\xE7\x76\xCC\xE0\xDA\xFC\x84\xA8\x5E\x8C\x63\x6A\x2B\x4D\xD9\x4E\x02\x76\x11\xC1\x0B\xF2\x8D\x79\xCA\x00\xB6\xF1\xB0\x0E\xD7\xFB\xA4\x17\x3D\xAF\xAB\x69\x7A\x96\x27\xBF\xAF\x33\xA1\x9A\x2A\x59\xAA\xC4\xB5\x37\x08\xF2\x12\xA5\x31\xB6\x43\xF5\x32\x96\x71\x28\x28\xAB\x8D\x28\x86\xDF\xBB\xEE\xE3\x0C\x7D\x30\xD6\xC3\x52\xAB\x8F\x5D\x27\x9C\x6B\xC0\xA3\xE7\x05\x6B\x57\x49\x44\xB3\x6E\xEA\x64\xCF\xD2\x8E\x7A\x50\x77\x77\x02\x03\x01\x00\x01\xA3\x6B\x30\x69\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x86\x30\x33\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x08\x04\x26\x16\x24\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x74\x72\x75\x73\x74\x63\x65\x6E\x74\x65\x72\x2E\x64\x65\x2F\x67\x75\x69\x64\x65\x6C\x69\x6E\x65\x73\x30\x11\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x01\x04\x04\x03\x02\x00\x07\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x04\x05\x00\x03\x81\x81\x00\x16\x3D\xC6\xCD\xC1\xBB\x85\x71\x85\x46\x9F\x3E\x20\x8F\x51\x28\x99\xEC\x2D\x45\x21\x63\x23\x5B\x04\xBB\x4C\x90\xB8\x88\x92\x04\x4D\xBD\x7D\x01\xA3\x3F\xF6\xEC\xCE\xF1\xDE\xFE\x7D\xE5\xE1\x3E\xBB\xC6\xAB\x5E\x0B\xDD\x3D\x96\xC4\xCB\xA9\xD4\xF9\x26\xE6\x06\x4E\x9E\x0C\xA5\x7A\xBA\x6E\xC3\x7C\x82\x19\xD1\xC7\xB1\xB1\xC3\xDB\x0D\x8E\x9B\x40\x7C\x37\x0B\xF1\x5D\xE8\xFD\x1F\x90\x88\xA5\x0E\x4E\x37\x64\x21\xA8\x4E\x8D\xB4\x9F\xF1\xDE\x48\xAD\xD5\x56\x18\x52\x29\x8B\x47\x34\x12\x09\xD4\xBB\x92\x35\xEF\x0F\xDB\x34", ["CN=Certum CA,O=Unizeto Sp. z o.o.,C=PL"] = "\x30\x82\x03\x0C\x30\x82\x01\xF4\xA0\x03\x02\x01\x02\x02\x03\x01\x00\x20\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x3E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x50\x4C\x31\x1B\x30\x19\x06\x03\x55\x04\x0A\x13\x12\x55\x6E\x69\x7A\x65\x74\x6F\x20\x53\x70\x2E\x20\x7A\x20\x6F\x2E\x6F\x2E\x31\x12\x30\x10\x06\x03\x55\x04\x03\x13\x09\x43\x65\x72\x74\x75\x6D\x20\x43\x41\x30\x1E\x17\x0D\x30\x32\x30\x36\x31\x31\x31\x30\x34\x36\x33\x39\x5A\x17\x0D\x32\x37\x30\x36\x31\x31\x31\x30\x34\x36\x33\x39\x5A\x30\x3E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x50\x4C\x31\x1B\x30\x19\x06\x03\x55\x04\x0A\x13\x12\x55\x6E\x69\x7A\x65\x74\x6F\x20\x53\x70\x2E\x20\x7A\x20\x6F\x2E\x6F\x2E\x31\x12\x30\x10\x06\x03\x55\x04\x03\x13\x09\x43\x65\x72\x74\x75\x6D\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xCE\xB1\xC1\x2E\xD3\x4F\x7C\xCD\x25\xCE\x18\x3E\x4F\xC4\x8C\x6F\x80\x6A\x73\xC8\x5B\x51\xF8\x9B\xD2\xDC\xBB\x00\x5C\xB1\xA0\xFC\x75\x03\xEE\x81\xF0\x88\xEE\x23\x52\xE9\xE6\x15\x33\x8D\xAC\x2D\x09\xC5\x76\xF9\x2B\x39\x80\x89\xE4\x97\x4B\x90\xA5\xA8\x78\xF8\x73\x43\x7B\xA4\x61\xB0\xD8\x58\xCC\xE1\x6C\x66\x7E\x9C\xF3\x09\x5E\x55\x63\x84\xD5\xA8\xEF\xF3\xB1\x2E\x30\x68\xB3\xC4\x3C\xD8\xAC\x6E\x8D\x99\x5A\x90\x4E\x34\xDC\x36\x9A\x8F\x81\x88\x50\xB7\x6D\x96\x42\x09\xF3\xD7\x95\x83\x0D\x41\x4B\xB0\x6A\x6B\xF8\xFC\x0F\x7E\x62\x9F\x67\xC4\xED\x26\x5F\x10\x26\x0F\x08\x4F\xF0\xA4\x57\x28\xCE\x8F\xB8\xED\x45\xF6\x6E\xEE\x25\x5D\xAA\x6E\x39\xBE\xE4\x93\x2F\xD9\x47\xA0\x72\xEB\xFA\xA6\x5B\xAF\xCA\x53\x3F\xE2\x0E\xC6\x96\x56\x11\x6E\xF7\xE9\x66\xA9\x26\xD8\x7F\x95\x53\xED\x0A\x85\x88\xBA\x4F\x29\xA5\x42\x8C\x5E\xB6\xFC\x85\x20\x00\xAA\x68\x0B\xA1\x1A\x85\x01\x9C\xC4\x46\x63\x82\x88\xB6\x22\xB1\xEE\xFE\xAA\x46\x59\x7E\xCF\x35\x2C\xD5\xB6\xDA\x5D\xF7\x48\x33\x14\x54\xB6\xEB\xD9\x6F\xCE\xCD\x88\xD6\xAB\x1B\xDA\x96\x3B\x1D\x59\x02\x03\x01\x00\x01\xA3\x13\x30\x11\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\xB8\x8D\xCE\xEF\xE7\x14\xBA\xCF\xEE\xB0\x44\x92\x6C\xB4\x39\x3E\xA2\x84\x6E\xAD\xB8\x21\x77\xD2\xD4\x77\x82\x87\xE6\x20\x41\x81\xEE\xE2\xF8\x11\xB7\x63\xD1\x17\x37\xBE\x19\x76\x24\x1C\x04\x1A\x4C\xEB\x3D\xAA\x67\x6F\x2D\xD4\xCD\xFE\x65\x31\x70\xC5\x1B\xA6\x02\x0A\xBA\x60\x7B\x6D\x58\xC2\x9A\x49\xFE\x63\x32\x0B\x6B\xE3\x3A\xC0\xAC\xAB\x3B\xB0\xE8\xD3\x09\x51\x8C\x10\x83\xC6\x34\xE0\xC5\x2B\xE0\x1A\xB6\x60\x14\x27\x6C\x32\x77\x8C\xBC\xB2\x72\x98\xCF\xCD\xCC\x3F\xB9\xC8\x24\x42\x14\xD6\x57\xFC\xE6\x26\x43\xA9\x1D\xE5\x80\x90\xCE\x03\x54\x28\x3E\xF7\x3F\xD3\xF8\x4D\xED\x6A\x0A\x3A\x93\x13\x9B\x3B\x14\x23\x13\x63\x9C\x3F\xD1\x87\x27\x79\xE5\x4C\x51\xE3\x01\xAD\x85\x5D\x1A\x3B\xB1\xD5\x73\x10\xA4\xD3\xF2\xBC\x6E\x64\xF5\x5A\x56\x90\xA8\xC7\x0E\x4C\x74\x0F\x2E\x71\x3B\xF7\xC8\x47\xF4\x69\x6F\x15\xF2\x11\x5E\x83\x1E\x9C\x7C\x52\xAE\xFD\x02\xDA\x12\xA8\x59\x67\x18\xDB\xBC\x70\xDD\x9B\xB1\x69\xED\x80\xCE\x89\x40\x48\x6A\x0E\x35\xCA\x29\x66\x15\x21\x94\x2C\xE8\x60\x2A\x9B\x85\x4A\x40\xF3\x6B\x8A\x24\xEC\x06\x16\x2C\x73", ["CN=AAA Certificate Services,O=Comodo CA Limited,L=Salford,ST=Greater Manchester,C=GB"] = "\x30\x82\x04\x32\x30\x82\x03\x1A\xA0\x03\x02\x01\x02\x02\x01\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x7B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x42\x31\x1B\x30\x19\x06\x03\x55\x04\x08\x0C\x12\x47\x72\x65\x61\x74\x65\x72\x20\x4D\x61\x6E\x63\x68\x65\x73\x74\x65\x72\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x0C\x07\x53\x61\x6C\x66\x6F\x72\x64\x31\x1A\x30\x18\x06\x03\x55\x04\x0A\x0C\x11\x43\x6F\x6D\x6F\x64\x6F\x20\x43\x41\x20\x4C\x69\x6D\x69\x74\x65\x64\x31\x21\x30\x1F\x06\x03\x55\x04\x03\x0C\x18\x41\x41\x41\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x53\x65\x72\x76\x69\x63\x65\x73\x30\x1E\x17\x0D\x30\x34\x30\x31\x30\x31\x30\x30\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x31\x32\x33\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x7B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x42\x31\x1B\x30\x19\x06\x03\x55\x04\x08\x0C\x12\x47\x72\x65\x61\x74\x65\x72\x20\x4D\x61\x6E\x63\x68\x65\x73\x74\x65\x72\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x0C\x07\x53\x61\x6C\x66\x6F\x72\x64\x31\x1A\x30\x18\x06\x03\x55\x04\x0A\x0C\x11\x43\x6F\x6D\x6F\x64\x6F\x20\x43\x41\x20\x4C\x69\x6D\x69\x74\x65\x64\x31\x21\x30\x1F\x06\x03\x55\x04\x03\x0C\x18\x41\x41\x41\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x53\x65\x72\x76\x69\x63\x65\x73\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xBE\x40\x9D\xF4\x6E\xE1\xEA\x76\x87\x1C\x4D\x45\x44\x8E\xBE\x46\xC8\x83\x06\x9D\xC1\x2A\xFE\x18\x1F\x8E\xE4\x02\xFA\xF3\xAB\x5D\x50\x8A\x16\x31\x0B\x9A\x06\xD0\xC5\x70\x22\xCD\x49\x2D\x54\x63\xCC\xB6\x6E\x68\x46\x0B\x53\xEA\xCB\x4C\x24\xC0\xBC\x72\x4E\xEA\xF1\x15\xAE\xF4\x54\x9A\x12\x0A\xC3\x7A\xB2\x33\x60\xE2\xDA\x89\x55\xF3\x22\x58\xF3\xDE\xDC\xCF\xEF\x83\x86\xA2\x8C\x94\x4F\x9F\x68\xF2\x98\x90\x46\x84\x27\xC7\x76\xBF\xE3\xCC\x35\x2C\x8B\x5E\x07\x64\x65\x82\xC0\x48\xB0\xA8\x91\xF9\x61\x9F\x76\x20\x50\xA8\x91\xC7\x66\xB5\xEB\x78\x62\x03\x56\xF0\x8A\x1A\x13\xEA\x31\xA3\x1E\xA0\x99\xFD\x38\xF6\xF6\x27\x32\x58\x6F\x07\xF5\x6B\xB8\xFB\x14\x2B\xAF\xB7\xAA\xCC\xD6\x63\x5F\x73\x8C\xDA\x05\x99\xA8\x38\xA8\xCB\x17\x78\x36\x51\xAC\xE9\x9E\xF4\x78\x3A\x8D\xCF\x0F\xD9\x42\xE2\x98\x0C\xAB\x2F\x9F\x0E\x01\xDE\xEF\x9F\x99\x49\xF1\x2D\xDF\xAC\x74\x4D\x1B\x98\xB5\x47\xC5\xE5\x29\xD1\xF9\x90\x18\xC7\x62\x9C\xBE\x83\xC7\x26\x7B\x3E\x8A\x25\xC7\xC0\xDD\x9D\xE6\x35\x68\x10\x20\x9D\x8F\xD8\xDE\xD2\xC3\x84\x9C\x0D\x5E\xE8\x2F\xC9\x02\x03\x01\x00\x01\xA3\x81\xC0\x30\x81\xBD\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xA0\x11\x0A\x23\x3E\x96\xF1\x07\xEC\xE2\xAF\x29\xEF\x82\xA5\x7F\xD0\x30\xA4\xB4\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x7B\x06\x03\x55\x1D\x1F\x04\x74\x30\x72\x30\x38\xA0\x36\xA0\x34\x86\x32\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x63\x6F\x6D\x6F\x64\x6F\x63\x61\x2E\x63\x6F\x6D\x2F\x41\x41\x41\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x53\x65\x72\x76\x69\x63\x65\x73\x2E\x63\x72\x6C\x30\x36\xA0\x34\xA0\x32\x86\x30\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x63\x6F\x6D\x6F\x64\x6F\x2E\x6E\x65\x74\x2F\x41\x41\x41\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x53\x65\x72\x76\x69\x63\x65\x73\x2E\x63\x72\x6C\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x08\x56\xFC\x02\xF0\x9B\xE8\xFF\xA4\xFA\xD6\x7B\xC6\x44\x80\xCE\x4F\xC4\xC5\xF6\x00\x58\xCC\xA6\xB6\xBC\x14\x49\x68\x04\x76\xE8\xE6\xEE\x5D\xEC\x02\x0F\x60\xD6\x8D\x50\x18\x4F\x26\x4E\x01\xE3\xE6\xB0\xA5\xEE\xBF\xBC\x74\x54\x41\xBF\xFD\xFC\x12\xB8\xC7\x4F\x5A\xF4\x89\x60\x05\x7F\x60\xB7\x05\x4A\xF3\xF6\xF1\xC2\xBF\xC4\xB9\x74\x86\xB6\x2D\x7D\x6B\xCC\xD2\xF3\x46\xDD\x2F\xC6\xE0\x6A\xC3\xC3\x34\x03\x2C\x7D\x96\xDD\x5A\xC2\x0E\xA7\x0A\x99\xC1\x05\x8B\xAB\x0C\x2F\xF3\x5C\x3A\xCF\x6C\x37\x55\x09\x87\xDE\x53\x40\x6C\x58\xEF\xFC\xB6\xAB\x65\x6E\x04\xF6\x1B\xDC\x3C\xE0\x5A\x15\xC6\x9E\xD9\xF1\x59\x48\x30\x21\x65\x03\x6C\xEC\xE9\x21\x73\xEC\x9B\x03\xA1\xE0\x37\xAD\xA0\x15\x18\x8F\xFA\xBA\x02\xCE\xA7\x2C\xA9\x10\x13\x2C\xD4\xE5\x08\x26\xAB\x22\x97\x60\xF8\x90\x5E\x74\xD4\xA2\x9A\x53\xBD\xF2\xA9\x68\xE0\xA2\x6E\xC2\xD7\x6C\xB1\xA3\x0F\x9E\xBF\xEB\x68\xE7\x56\xF2\xAE\xF2\xE3\x2B\x38\x3A\x09\x81\xB5\x6B\x85\xD7\xBE\x2D\xED\x3F\x1A\xB7\xB2\x63\xE2\xF5\x62\x2C\x82\xD4\x6A\x00\x41\x50\xF1\x39\x83\x9F\x95\xE9\x36\x96\x98\x6E", ["CN=Secure Certificate Services,O=Comodo CA Limited,L=Salford,ST=Greater Manchester,C=GB"] = "\x30\x82\x04\x3F\x30\x82\x03\x27\xA0\x03\x02\x01\x02\x02\x01\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x7E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x42\x31\x1B\x30\x19\x06\x03\x55\x04\x08\x0C\x12\x47\x72\x65\x61\x74\x65\x72\x20\x4D\x61\x6E\x63\x68\x65\x73\x74\x65\x72\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x0C\x07\x53\x61\x6C\x66\x6F\x72\x64\x31\x1A\x30\x18\x06\x03\x55\x04\x0A\x0C\x11\x43\x6F\x6D\x6F\x64\x6F\x20\x43\x41\x20\x4C\x69\x6D\x69\x74\x65\x64\x31\x24\x30\x22\x06\x03\x55\x04\x03\x0C\x1B\x53\x65\x63\x75\x72\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x53\x65\x72\x76\x69\x63\x65\x73\x30\x1E\x17\x0D\x30\x34\x30\x31\x30\x31\x30\x30\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x31\x32\x33\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x7E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x42\x31\x1B\x30\x19\x06\x03\x55\x04\x08\x0C\x12\x47\x72\x65\x61\x74\x65\x72\x20\x4D\x61\x6E\x63\x68\x65\x73\x74\x65\x72\x31\x10\x30\x0E\x06\x03\x55\x04\x07\x0C\x07\x53\x61\x6C\x66\x6F\x72\x64\x31\x1A\x30\x18\x06\x03\x55\x04\x0A\x0C\x11\x43\x6F\x6D\x6F\x64\x6F\x20\x43\x41\x20\x4C\x69\x6D\x69\x74\x65\x64\x31\x24\x30\x22\x06\x03\x55\x04\x03\x0C\x1B\x53\x65\x63\x75\x72\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x53\x65\x72\x76\x69\x63\x65\x73\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xC0\x71\x33\x82\x8A\xD0\x70\xEB\x73\x87\x82\x40\xD5\x1D\xE4\xCB\xC9\x0E\x42\x90\xF9\xDE\x34\xB9\xA1\xBA\x11\xF4\x25\x85\xF3\xCC\x72\x6D\xF2\x7B\x97\x6B\xB3\x07\xF1\x77\x24\x91\x5F\x25\x8F\xF6\x74\x3D\xE4\x80\xC2\xF8\x3C\x0D\xF3\xBF\x40\xEA\xF7\xC8\x52\xD1\x72\x6F\xEF\xC8\xAB\x41\xB8\x6E\x2E\x17\x2A\x95\x69\x0C\xCD\xD2\x1E\x94\x7B\x2D\x94\x1D\xAA\x75\xD7\xB3\x98\xCB\xAC\xBC\x64\x53\x40\xBC\x8F\xAC\xAC\x36\xCB\x5C\xAD\xBB\xDD\xE0\x94\x17\xEC\xD1\x5C\xD0\xBF\xEF\xA5\x95\xC9\x90\xC5\xB0\xAC\xFB\x1B\x43\xDF\x7A\x08\x5D\xB7\xB8\xF2\x40\x1B\x2B\x27\x9E\x50\xCE\x5E\x65\x82\x88\x8C\x5E\xD3\x4E\x0C\x7A\xEA\x08\x91\xB6\x36\xAA\x2B\x42\xFB\xEA\xC2\xA3\x39\xE5\xDB\x26\x38\xAD\x8B\x0A\xEE\x19\x63\xC7\x1C\x24\xDF\x03\x78\xDA\xE6\xEA\xC1\x47\x1A\x0B\x0B\x46\x09\xDD\x02\xFC\xDE\xCB\x87\x5F\xD7\x30\x63\x68\xA1\xAE\xDC\x32\xA1\xBA\xBE\xFE\x44\xAB\x68\xB6\xA5\x17\x15\xFD\xBD\xD5\xA7\xA7\x9A\xE4\x44\x33\xE9\x88\x8E\xFC\xED\x51\xEB\x93\x71\x4E\xAD\x01\xE7\x44\x8E\xAB\x2D\xCB\xA8\xFE\x01\x49\x48\xF0\xC0\xDD\xC7\x68\xD8\x92\xFE\x3D\x02\x03\x01\x00\x01\xA3\x81\xC7\x30\x81\xC4\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x3C\xD8\x93\x88\xC2\xC0\x82\x09\xCC\x01\x99\x06\x93\x20\xE9\x9E\x70\x09\x63\x4F\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x81\x81\x06\x03\x55\x1D\x1F\x04\x7A\x30\x78\x30\x3B\xA0\x39\xA0\x37\x86\x35\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x63\x6F\x6D\x6F\x64\x6F\x63\x61\x2E\x63\x6F\x6D\x2F\x53\x65\x63\x75\x72\x65\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x53\x65\x72\x76\x69\x63\x65\x73\x2E\x63\x72\x6C\x30\x39\xA0\x37\xA0\x35\x86\x33\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x63\x6F\x6D\x6F\x64\x6F\x2E\x6E\x65\x74\x2F\x53\x65\x63\x75\x72\x65\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x53\x65\x72\x76\x69\x63\x65\x73\x2E\x63\x72\x6C\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x87\x01\x6D\x23\x1D\x7E\x5B\x17\x7D\xC1\x61\x32\xCF\x8F\xE7\xF3\x8A\x94\x59\x66\xE0\x9E\x28\xA8\x5E\xD3\xB7\xF4\x34\xE6\xAA\x39\xB2\x97\x16\xC5\x82\x6F\x32\xA4\xE9\x8C\xE7\xAF\xFD\xEF\xC2\xE8\xB9\x4B\xAA\xA3\xF4\xE6\xDA\x8D\x65\x21\xFB\xBA\x80\xEB\x26\x28\x85\x1A\xFE\x39\x8C\xDE\x5B\x04\x04\xB4\x54\xF9\xA3\x67\x9E\x41\xFA\x09\x52\xCC\x05\x48\xA8\xC9\x3F\x21\x04\x1E\xCE\x48\x6B\xFC\x85\xE8\xC2\x7B\xAF\x7F\xB7\xCC\xF8\x5F\x3A\xFD\x35\xC6\x0D\xEF\x97\xDC\x4C\xAB\x11\xE1\x6B\xCB\x31\xD1\x6C\xFB\x48\x80\xAB\xDC\x9C\x37\xB8\x21\x14\x4B\x0D\x71\x3D\xEC\x83\x33\x6E\xD1\x6E\x32\x16\xEC\x98\xC7\x16\x8B\x59\xA6\x34\xAB\x05\x57\x2D\x93\xF7\xAA\x13\xCB\xD2\x13\xE2\xB7\x2E\x3B\xCD\x6B\x50\x17\x09\x68\x3E\xB5\x26\x57\xEE\xB6\xE0\xB6\xDD\xB9\x29\x80\x79\x7D\x8F\xA3\xF0\xA4\x28\xA4\x15\xC4\x85\xF4\x27\xD4\x6B\xBF\xE5\x5C\xE4\x65\x02\x76\x54\xB4\xE3\x37\x66\x24\xD3\x19\x61\xC8\x52\x10\xE5\x8B\x37\x9A\xB9\xA9\xF9\x1D\xBF\xEA\x99\x92\x61\x96\xFF\x01\xCD\xA1\x5F\x0D\xBC\x71\xBC\x0E\xAC\x0B\x1D\x47\x45\x1D\xC1\xEC\x7C\xEC\xFD\x29", @@ -51,7 +48,6 @@ redef root_certs += { ["CN=Sonera Class2 CA,O=Sonera,C=FI"] = "\x30\x82\x03\x20\x30\x82\x02\x08\xA0\x03\x02\x01\x02\x02\x01\x1D\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x39\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x46\x49\x31\x0F\x30\x0D\x06\x03\x55\x04\x0A\x13\x06\x53\x6F\x6E\x65\x72\x61\x31\x19\x30\x17\x06\x03\x55\x04\x03\x13\x10\x53\x6F\x6E\x65\x72\x61\x20\x43\x6C\x61\x73\x73\x32\x20\x43\x41\x30\x1E\x17\x0D\x30\x31\x30\x34\x30\x36\x30\x37\x32\x39\x34\x30\x5A\x17\x0D\x32\x31\x30\x34\x30\x36\x30\x37\x32\x39\x34\x30\x5A\x30\x39\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x46\x49\x31\x0F\x30\x0D\x06\x03\x55\x04\x0A\x13\x06\x53\x6F\x6E\x65\x72\x61\x31\x19\x30\x17\x06\x03\x55\x04\x03\x13\x10\x53\x6F\x6E\x65\x72\x61\x20\x43\x6C\x61\x73\x73\x32\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\x90\x17\x4A\x35\x9D\xCA\xF0\x0D\x96\xC7\x44\xFA\x16\x37\xFC\x48\xBD\xBD\x7F\x80\x2D\x35\x3B\xE1\x6F\xA8\x67\xA9\xBF\x03\x1C\x4D\x8C\x6F\x32\x47\xD5\x41\x68\xA4\x13\x04\xC1\x35\x0C\x9A\x84\x43\xFC\x5C\x1D\xFF\x89\xB3\xE8\x17\x18\xCD\x91\x5F\xFB\x89\xE3\xEA\xBF\x4E\x5D\x7C\x1B\x26\xD3\x75\x79\xED\xE6\x84\xE3\x57\xE5\xAD\x29\xC4\xF4\x3A\x28\xE7\xA5\x7B\x84\x36\x69\xB3\xFD\x5E\x76\xBD\xA3\x2D\x99\xD3\x90\x4E\x23\x28\x7D\x18\x63\xF1\x54\x3B\x26\x9D\x76\x5B\x97\x42\xB2\xFF\xAE\xF0\x4E\xEC\xDD\x39\x95\x4E\x83\x06\x7F\xE7\x49\x40\xC8\xC5\x01\xB2\x54\x5A\x66\x1D\x3D\xFC\xF9\xE9\x3C\x0A\x9E\x81\xB8\x70\xF0\x01\x8B\xE4\x23\x54\x7C\xC8\xAE\xF8\x90\x1E\x00\x96\x72\xD4\x54\xCF\x61\x23\xBC\xEA\xFB\x9D\x02\x95\xD1\xB6\xB9\x71\x3A\x69\x08\x3F\x0F\xB4\xE1\x42\xC7\x88\xF5\x3F\x98\xA8\xA7\xBA\x1C\xE0\x71\x71\xEF\x58\x57\x81\x50\x7A\x5C\x6B\x74\x46\x0E\x83\x03\x98\xC3\x8E\xA8\x6E\xF2\x76\x32\x6E\x27\x83\xC2\x73\xF3\xDC\x18\xE8\xB4\x93\xEA\x75\x44\x6B\x04\x60\x20\x71\x57\x87\x9D\xF3\xBE\xA0\x90\x23\x3D\x8A\x24\xE1\xDA\x21\xDB\xC3\x02\x03\x01\x00\x01\xA3\x33\x30\x31\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x11\x06\x03\x55\x1D\x0E\x04\x0A\x04\x08\x4A\xA0\xAA\x58\x84\xD3\x5E\x3C\x30\x0B\x06\x03\x55\x1D\x0F\x04\x04\x03\x02\x01\x06\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x5A\xCE\x87\xF9\x16\x72\x15\x57\x4B\x1D\xD9\x9B\xE7\xA2\x26\x30\xEC\x93\x67\xDF\xD6\x2D\xD2\x34\xAF\xF7\x38\xA5\xCE\xAB\x16\xB9\xAB\x2F\x7C\x35\xCB\xAC\xD0\x0F\xB4\x4C\x2B\xFC\x80\xEF\x6B\x8C\x91\x5F\x36\x76\xF7\xDB\xB3\x1B\x19\xEA\xF4\xB2\x11\xFD\x61\x71\x44\xBF\x28\xB3\x3A\x1D\xBF\xB3\x43\xE8\x9F\xBF\xDC\x31\x08\x71\xB0\x9D\x8D\xD6\x34\x47\x32\x90\xC6\x65\x24\xF7\xA0\x4A\x7C\x04\x73\x8F\x39\x6F\x17\x8C\x72\xB5\xBD\x4B\xC8\x7A\xF8\x7B\x83\xC3\x28\x4E\x9C\x09\xEA\x67\x3F\xB2\x67\x04\x1B\xC3\x14\xDA\xF8\xE7\x49\x24\x91\xD0\x1D\x6A\xFA\x61\x39\xEF\x6B\xE7\x21\x75\x06\x07\xD8\x12\xB4\x21\x20\x70\x42\x71\x81\xDA\x3C\x9A\x36\xBE\xA6\x5B\x0D\x6A\x6C\x9A\x1F\x91\x7B\xF9\xF9\xEF\x42\xBA\x4E\x4E\x9E\xCC\x0C\x8D\x94\xDC\xD9\x45\x9C\x5E\xEC\x42\x50\x63\xAE\xF4\x5D\xC4\xB1\x12\xDC\xCA\x3B\xA8\x2E\x9D\x14\x5A\x05\x75\xB7\xEC\xD7\x63\xE2\xBA\x35\xB6\x04\x08\x91\xE8\xDA\x9D\x9C\xF6\x66\xB5\x18\xAC\x0A\xA6\x54\x26\x34\x33\xD2\x1B\xC1\xD4\x7F\x1A\x3A\x8E\x0B\xAA\x32\x6E\xDB\xFC\x4F\x25\x9F\xD9\x32\xC7\x96\x5A\x70\xAC\xDF\x4C", ["CN=Staat der Nederlanden Root CA,O=Staat der Nederlanden,C=NL"] = "\x30\x82\x03\xBA\x30\x82\x02\xA2\xA0\x03\x02\x01\x02\x02\x04\x00\x98\x96\x8A\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x55\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4E\x4C\x31\x1E\x30\x1C\x06\x03\x55\x04\x0A\x13\x15\x53\x74\x61\x61\x74\x20\x64\x65\x72\x20\x4E\x65\x64\x65\x72\x6C\x61\x6E\x64\x65\x6E\x31\x26\x30\x24\x06\x03\x55\x04\x03\x13\x1D\x53\x74\x61\x61\x74\x20\x64\x65\x72\x20\x4E\x65\x64\x65\x72\x6C\x61\x6E\x64\x65\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x30\x32\x31\x32\x31\x37\x30\x39\x32\x33\x34\x39\x5A\x17\x0D\x31\x35\x31\x32\x31\x36\x30\x39\x31\x35\x33\x38\x5A\x30\x55\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4E\x4C\x31\x1E\x30\x1C\x06\x03\x55\x04\x0A\x13\x15\x53\x74\x61\x61\x74\x20\x64\x65\x72\x20\x4E\x65\x64\x65\x72\x6C\x61\x6E\x64\x65\x6E\x31\x26\x30\x24\x06\x03\x55\x04\x03\x13\x1D\x53\x74\x61\x61\x74\x20\x64\x65\x72\x20\x4E\x65\x64\x65\x72\x6C\x61\x6E\x64\x65\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\x98\xD2\xB5\x51\x11\x7A\x81\xA6\x14\x98\x71\x6D\xBE\xCC\xE7\x13\x1B\xD6\x27\x0E\x7A\xB3\x6A\x18\x1C\xB6\x61\x5A\xD5\x61\x09\xBF\xDE\x90\x13\xC7\x67\xEE\xDD\xF3\xDA\xC5\x0C\x12\x9E\x35\x55\x3E\x2C\x27\x88\x40\x6B\xF7\xDC\xDD\x22\x61\xF5\xC2\xC7\x0E\xF5\xF6\xD5\x76\x53\x4D\x8F\x8C\xBC\x18\x76\x37\x85\x9D\xE8\xCA\x49\xC7\xD2\x4F\x98\x13\x09\xA2\x3E\x22\x88\x9C\x7F\xD6\xF2\x10\x65\xB4\xEE\x5F\x18\xD5\x17\xE3\xF8\xC5\xFD\xE2\x9D\xA2\xEF\x53\x0E\x85\x77\xA2\x0F\xE1\x30\x47\xEE\x00\xE7\x33\x7D\x44\x67\x1A\x0B\x51\xE8\x8B\xA0\x9E\x50\x98\x68\x34\x52\x1F\x2E\x6D\x01\xF2\x60\x45\xF2\x31\xEB\xA9\x31\x68\x29\xBB\x7A\x41\x9E\xC6\x19\x7F\x94\xB4\x51\x39\x03\x7F\xB2\xDE\xA7\x32\x9B\xB4\x47\x8E\x6F\xB4\x4A\xAE\xE5\xAF\xB1\xDC\xB0\x1B\x61\xBC\x99\x72\xDE\xE4\x89\xB7\x7A\x26\x5D\xDA\x33\x49\x5B\x52\x9C\x0E\xF5\x8A\xAD\xC3\xB8\x3D\xE8\x06\x6A\xC2\xD5\x2A\x0B\x6C\x7B\x84\xBD\x56\x05\xCB\x86\x65\x92\xEC\x44\x2B\xB0\x8E\xB9\xDC\x70\x0B\x46\xDA\xAD\xBC\x63\x88\x39\xFA\xDB\x6A\xFE\x23\xFA\xBC\xE4\x48\xF4\x67\x2B\x6A\x11\x10\x21\x49\x02\x03\x01\x00\x01\xA3\x81\x91\x30\x81\x8E\x30\x0C\x06\x03\x55\x1D\x13\x04\x05\x30\x03\x01\x01\xFF\x30\x4F\x06\x03\x55\x1D\x20\x04\x48\x30\x46\x30\x44\x06\x04\x55\x1D\x20\x00\x30\x3C\x30\x3A\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x01\x16\x2E\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x70\x6B\x69\x6F\x76\x65\x72\x68\x65\x69\x64\x2E\x6E\x6C\x2F\x70\x6F\x6C\x69\x63\x69\x65\x73\x2F\x72\x6F\x6F\x74\x2D\x70\x6F\x6C\x69\x63\x79\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xA8\x7D\xEB\xBC\x63\xA4\x74\x13\x74\x00\xEC\x96\xE0\xD3\x34\xC1\x2C\xBF\x6C\xF8\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x05\x84\x87\x55\x74\x36\x61\xC1\xBB\xD1\xD4\xC6\x15\xA8\x13\xB4\x9F\xA4\xFE\xBB\xEE\x15\xB4\x2F\x06\x0C\x29\xF2\xA8\x92\xA4\x61\x0D\xFC\xAB\x5C\x08\x5B\x51\x13\x2B\x4D\xC2\x2A\x61\xC8\xF8\x09\x58\xFC\x2D\x02\xB2\x39\x7D\x99\x66\x81\xBF\x6E\x5C\x95\x45\x20\x6C\xE6\x79\xA7\xD1\xD8\x1C\x29\xFC\xC2\x20\x27\x51\xC8\xF1\x7C\x5D\x34\x67\x69\x85\x11\x30\xC6\x00\xD2\xD7\xF3\xD3\x7C\xB6\xF0\x31\x57\x28\x12\x82\x73\xE9\x33\x2F\xA6\x55\xB4\x0B\x91\x94\x47\x9C\xFA\xBB\x7A\x42\x32\xE8\xAE\x7E\x2D\xC8\xBC\xAC\x14\xBF\xD9\x0F\xD9\x5B\xFC\xC1\xF9\x7A\x95\xE1\x7D\x7E\x96\xFC\x71\xB0\xC2\x4C\xC8\xDF\x45\x34\xC9\xCE\x0D\xF2\x9C\x64\x08\xD0\x3B\xC3\x29\xC5\xB2\xED\x90\x04\xC1\xB1\x29\x91\xC5\x30\x6F\xC1\xA9\x72\x33\xCC\xFE\x5D\x16\x17\x2C\x11\x69\xE7\x7E\xFE\xC5\x83\x08\xDF\xBC\xDC\x22\x3A\x2E\x20\x69\x23\x39\x56\x60\x67\x90\x8B\x2E\x76\x39\xFB\x11\x88\x97\xF6\x7C\xBD\x4B\xB8\x20\x16\x67\x05\x8D\xE2\x3B\xC1\x72\x3F\x94\x95\x37\xC7\x5D\xB9\x9E\xD8\x93\xA1\x17\x8F\xFF\x0C\x66\x15\xC1\x24\x7C\x32\x7C\x03\x1D\x3B\xA1\x58\x45\x32\x93", ["OU=TDC Internet Root CA,O=TDC Internet,C=DK"] = "\x30\x82\x04\x2B\x30\x82\x03\x13\xA0\x03\x02\x01\x02\x02\x04\x3A\xCC\xA5\x4C\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x43\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x4B\x31\x15\x30\x13\x06\x03\x55\x04\x0A\x13\x0C\x54\x44\x43\x20\x49\x6E\x74\x65\x72\x6E\x65\x74\x31\x1D\x30\x1B\x06\x03\x55\x04\x0B\x13\x14\x54\x44\x43\x20\x49\x6E\x74\x65\x72\x6E\x65\x74\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x30\x31\x30\x34\x30\x35\x31\x36\x33\x33\x31\x37\x5A\x17\x0D\x32\x31\x30\x34\x30\x35\x31\x37\x30\x33\x31\x37\x5A\x30\x43\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x4B\x31\x15\x30\x13\x06\x03\x55\x04\x0A\x13\x0C\x54\x44\x43\x20\x49\x6E\x74\x65\x72\x6E\x65\x74\x31\x1D\x30\x1B\x06\x03\x55\x04\x0B\x13\x14\x54\x44\x43\x20\x49\x6E\x74\x65\x72\x6E\x65\x74\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xC4\xB8\x40\xBC\x91\xD5\x63\x1F\xD7\x99\xA0\x8B\x0C\x40\x1E\x74\xB7\x48\x9D\x46\x8C\x02\xB2\xE0\x24\x5F\xF0\x19\x13\xA7\x37\x83\x6B\x5D\xC7\x8E\xF9\x84\x30\xCE\x1A\x3B\xFA\xFB\xCE\x8B\x6D\x23\xC6\xC3\x6E\x66\x9F\x89\xA5\xDF\xE0\x42\x50\x67\xFA\x1F\x6C\x1E\xF4\xD0\x05\xD6\xBF\xCA\xD6\x4E\xE4\x68\x60\x6C\x46\xAA\x1C\x5D\x63\xE1\x07\x86\x0E\x65\x00\xA7\x2E\xA6\x71\xC6\xBC\xB9\x81\xA8\x3A\x7D\x1A\xD2\xF9\xD1\xAC\x4B\xCB\xCE\x75\xAF\xDC\x7B\xFA\x81\x73\xD4\xFC\xBA\xBD\x41\x88\xD4\x74\xB3\xF9\x5E\x38\x3A\x3C\x43\xA8\xD2\x95\x4E\x77\x6D\x13\x0C\x9D\x8F\x78\x01\xB7\x5A\x20\x1F\x03\x37\x35\xE2\x2C\xDB\x4B\x2B\x2C\x78\xB9\x49\xDB\xC4\xD0\xC7\x9C\x9C\xE4\x8A\x20\x09\x21\x16\x56\x66\xFF\x05\xEC\x5B\xE3\xF0\xCF\xAB\x24\x24\x5E\xC3\x7F\x70\x7A\x12\xC4\xD2\xB5\x10\xA0\xB6\x21\xE1\x8D\x78\x69\x55\x44\x69\xF5\xCA\x96\x1C\x34\x85\x17\x25\x77\xE2\xF6\x2F\x27\x98\x78\xFD\x79\x06\x3A\xA2\xD6\x5A\x43\xC1\xFF\xEC\x04\x3B\xEE\x13\xEF\xD3\x58\x5A\xFF\x92\xEB\xEC\xAE\xDA\xF2\x37\x03\x47\x41\xB6\x97\xC9\x2D\x0A\x41\x22\xBB\xBB\xE6\xA7\x02\x03\x01\x00\x01\xA3\x82\x01\x25\x30\x82\x01\x21\x30\x11\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x01\x04\x04\x03\x02\x00\x07\x30\x65\x06\x03\x55\x1D\x1F\x04\x5E\x30\x5C\x30\x5A\xA0\x58\xA0\x56\xA4\x54\x30\x52\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x4B\x31\x15\x30\x13\x06\x03\x55\x04\x0A\x13\x0C\x54\x44\x43\x20\x49\x6E\x74\x65\x72\x6E\x65\x74\x31\x1D\x30\x1B\x06\x03\x55\x04\x0B\x13\x14\x54\x44\x43\x20\x49\x6E\x74\x65\x72\x6E\x65\x74\x20\x52\x6F\x6F\x74\x20\x43\x41\x31\x0D\x30\x0B\x06\x03\x55\x04\x03\x13\x04\x43\x52\x4C\x31\x30\x2B\x06\x03\x55\x1D\x10\x04\x24\x30\x22\x80\x0F\x32\x30\x30\x31\x30\x34\x30\x35\x31\x36\x33\x33\x31\x37\x5A\x81\x0F\x32\x30\x32\x31\x30\x34\x30\x35\x31\x37\x30\x33\x31\x37\x5A\x30\x0B\x06\x03\x55\x1D\x0F\x04\x04\x03\x02\x01\x06\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x6C\x64\x01\xC7\xFD\x85\x6D\xAC\xC8\xDA\x9E\x50\x08\x85\x08\xB5\x3C\x56\xA8\x50\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x6C\x64\x01\xC7\xFD\x85\x6D\xAC\xC8\xDA\x9E\x50\x08\x85\x08\xB5\x3C\x56\xA8\x50\x30\x0C\x06\x03\x55\x1D\x13\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x09\x2A\x86\x48\x86\xF6\x7D\x07\x41\x00\x04\x10\x30\x0E\x1B\x08\x56\x35\x2E\x30\x3A\x34\x2E\x30\x03\x02\x04\x90\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x4E\x43\xCC\xD1\xDD\x1D\x10\x1B\x06\x7F\xB7\xA4\xFA\xD3\xD9\x4D\xFB\x23\x9F\x23\x54\x5B\xE6\x8B\x2F\x04\x28\x8B\xB5\x27\x6D\x89\xA1\xEC\x98\x69\xDC\xE7\x8D\x26\x83\x05\x79\x74\xEC\xB4\xB9\xA3\x97\xC1\x35\x00\xFD\x15\xDA\x39\x81\x3A\x95\x31\x90\xDE\x97\xE9\x86\xA8\x99\x77\x0C\xE5\x5A\xA0\x84\xFF\x12\x16\xAC\x6E\xB8\x8D\xC3\x7B\x92\xC2\xAC\x2E\xD0\x7D\x28\xEC\xB6\xF3\x60\x38\x69\x6F\x3E\xD8\x04\x55\x3E\x9E\xCC\x55\xD2\xBA\xFE\xBB\x47\x04\xD7\x0A\xD9\x16\x0A\x34\x29\xF5\x58\x13\xD5\x4F\xCF\x8F\x56\x4B\xB3\x1E\xEE\xD3\x98\x79\xDA\x08\x1E\x0C\x6F\xB8\xF8\x16\x27\xEF\xC2\x6F\x3D\xF6\xA3\x4B\x3E\x0E\xE4\x6D\x6C\xDB\x3B\x41\x12\x9B\xBD\x0D\x47\x23\x7F\x3C\x4A\xD0\xAF\xC0\xAF\xF6\xEF\x1B\xB5\x15\xC4\xEB\x83\xC4\x09\x5F\x74\x8B\xD9\x11\xFB\xC2\x56\xB1\x3C\xF8\x70\xCA\x34\x8D\x43\x40\x13\x8C\xFD\x99\x03\x54\x79\xC6\x2E\xEA\x86\xA1\xF6\x3A\xD4\x09\xBC\xF4\xBC\x66\xCC\x3D\x58\xD0\x57\x49\x0A\xEE\x25\xE2\x41\xEE\x13\xF9\x9B\x38\x34\xD1\x00\xF5\x7E\xE7\x94\x1D\xFC\x69\x03\x62\xB8\x99\x05\x05\x3D\x6B\x78\x12\xBD\xB0\x6F\x65", - ["CN=TDC OCES CA,O=TDC,C=DK"] = "\x30\x82\x05\x19\x30\x82\x04\x01\xA0\x03\x02\x01\x02\x02\x04\x3E\x48\xBD\xC4\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x31\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x4B\x31\x0C\x30\x0A\x06\x03\x55\x04\x0A\x13\x03\x54\x44\x43\x31\x14\x30\x12\x06\x03\x55\x04\x03\x13\x0B\x54\x44\x43\x20\x4F\x43\x45\x53\x20\x43\x41\x30\x1E\x17\x0D\x30\x33\x30\x32\x31\x31\x30\x38\x33\x39\x33\x30\x5A\x17\x0D\x33\x37\x30\x32\x31\x31\x30\x39\x30\x39\x33\x30\x5A\x30\x31\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x4B\x31\x0C\x30\x0A\x06\x03\x55\x04\x0A\x13\x03\x54\x44\x43\x31\x14\x30\x12\x06\x03\x55\x04\x03\x13\x0B\x54\x44\x43\x20\x4F\x43\x45\x53\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAC\x62\xF6\x61\x20\xB2\xCF\xC0\xC6\x85\xD7\xE3\x79\xE6\xCC\xED\xF2\x39\x92\xA4\x97\x2E\x64\xA3\x84\x5B\x87\x9C\x4C\xFD\xA4\xF3\xC4\x5F\x21\xBD\x56\x10\xEB\xDB\x2E\x61\xEC\x93\x69\xE3\xA3\xCC\xBD\x99\xC3\x05\xFC\x06\xB8\xCA\x36\x1C\xFE\x90\x8E\x49\x4C\xC4\x56\x9A\x2F\x56\xBC\xCF\x7B\x0C\xF1\x6F\x47\xA6\x0D\x43\x4D\xE2\xE9\x1D\x39\x34\xCD\x8D\x2C\xD9\x12\x98\xF9\xE3\xE1\xC1\x4A\x7C\x86\x38\xC4\xA9\xC4\x61\x88\xD2\x5E\xAF\x1A\x26\x4D\xD5\xE4\xA0\x22\x47\x84\xD9\x64\xB7\x19\x96\xFC\xEC\x19\xE4\xB2\x97\x26\x4E\x4A\x4C\xCB\x8F\x24\x8B\x54\x18\x1C\x48\x61\x7B\xD5\x88\x68\xDA\x5D\xB5\xEA\xCD\x1A\x30\xC1\x80\x83\x76\x50\xAA\x4F\xD1\xD4\xDD\x38\xF0\xEF\x16\xF4\xE1\x0C\x50\x06\xBF\xEA\xFB\x7A\x49\xA1\x28\x2B\x1C\xF6\xFC\x15\x32\xA3\x74\x6A\x8F\xA9\xC3\x62\x29\x71\x31\xE5\x3B\xA4\x60\x17\x5E\x74\xE6\xDA\x13\xED\xE9\x1F\x1F\x1B\xD1\xB2\x68\x73\xC6\x10\x34\x75\x46\x10\x10\xE3\x90\x00\x76\x40\xCB\x8B\xB7\x43\x09\x21\xFF\xAB\x4E\x93\xC6\x58\xE9\xA5\x82\xDB\x77\xC4\x3A\x99\xB1\x72\x95\x49\x04\xF0\xB7\x2B\xFA\x7B\x59\x8E\xDD\x02\x03\x01\x00\x01\xA3\x82\x02\x37\x30\x82\x02\x33\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x81\xEC\x06\x03\x55\x1D\x20\x04\x81\xE4\x30\x81\xE1\x30\x81\xDE\x06\x08\x2A\x81\x50\x81\x29\x01\x01\x01\x30\x81\xD1\x30\x2F\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x01\x16\x23\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x63\x65\x72\x74\x69\x66\x69\x6B\x61\x74\x2E\x64\x6B\x2F\x72\x65\x70\x6F\x73\x69\x74\x6F\x72\x79\x30\x81\x9D\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x02\x30\x81\x90\x30\x0A\x16\x03\x54\x44\x43\x30\x03\x02\x01\x01\x1A\x81\x81\x43\x65\x72\x74\x69\x66\x69\x6B\x61\x74\x65\x72\x20\x66\x72\x61\x20\x64\x65\x6E\x6E\x65\x20\x43\x41\x20\x75\x64\x73\x74\x65\x64\x65\x73\x20\x75\x6E\x64\x65\x72\x20\x4F\x49\x44\x20\x31\x2E\x32\x2E\x32\x30\x38\x2E\x31\x36\x39\x2E\x31\x2E\x31\x2E\x31\x2E\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x73\x20\x66\x72\x6F\x6D\x20\x74\x68\x69\x73\x20\x43\x41\x20\x61\x72\x65\x20\x69\x73\x73\x75\x65\x64\x20\x75\x6E\x64\x65\x72\x20\x4F\x49\x44\x20\x31\x2E\x32\x2E\x32\x30\x38\x2E\x31\x36\x39\x2E\x31\x2E\x31\x2E\x31\x2E\x30\x11\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x01\x04\x04\x03\x02\x00\x07\x30\x81\x81\x06\x03\x55\x1D\x1F\x04\x7A\x30\x78\x30\x48\xA0\x46\xA0\x44\xA4\x42\x30\x40\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x4B\x31\x0C\x30\x0A\x06\x03\x55\x04\x0A\x13\x03\x54\x44\x43\x31\x14\x30\x12\x06\x03\x55\x04\x03\x13\x0B\x54\x44\x43\x20\x4F\x43\x45\x53\x20\x43\x41\x31\x0D\x30\x0B\x06\x03\x55\x04\x03\x13\x04\x43\x52\x4C\x31\x30\x2C\xA0\x2A\xA0\x28\x86\x26\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x6F\x63\x65\x73\x2E\x63\x65\x72\x74\x69\x66\x69\x6B\x61\x74\x2E\x64\x6B\x2F\x6F\x63\x65\x73\x2E\x63\x72\x6C\x30\x2B\x06\x03\x55\x1D\x10\x04\x24\x30\x22\x80\x0F\x32\x30\x30\x33\x30\x32\x31\x31\x30\x38\x33\x39\x33\x30\x5A\x81\x0F\x32\x30\x33\x37\x30\x32\x31\x31\x30\x39\x30\x39\x33\x30\x5A\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x60\xB5\x85\xEC\x56\x64\x7E\x12\x19\x27\x67\x1D\x50\x15\x4B\x73\xAE\x3B\xF9\x12\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x60\xB5\x85\xEC\x56\x64\x7E\x12\x19\x27\x67\x1D\x50\x15\x4B\x73\xAE\x3B\xF9\x12\x30\x1D\x06\x09\x2A\x86\x48\x86\xF6\x7D\x07\x41\x00\x04\x10\x30\x0E\x1B\x08\x56\x36\x2E\x30\x3A\x34\x2E\x30\x03\x02\x04\x90\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x0A\xBA\x26\x26\x46\xD3\x73\xA8\x09\xF3\x6B\x0B\x30\x99\xFD\x8A\xE1\x57\x7A\x11\xD3\xB8\x94\xD7\x09\x10\x6E\xA3\xB1\x38\x03\xD1\xB6\xF2\x43\x41\x29\x62\xA7\x72\xD8\xFB\x7C\x05\xE6\x31\x70\x27\x54\x18\x4E\x8A\x7C\x4E\xE5\xD1\xCA\x8C\x78\x88\xCF\x1B\xD3\x90\x8B\xE6\x23\xF8\x0B\x0E\x33\x43\x7D\x9C\xE2\x0A\x19\x8F\xC9\x01\x3E\x74\x5D\x74\xC9\x8B\x1C\x03\xE5\x18\xC8\x01\x4C\x3F\xCB\x97\x05\x5D\x98\x71\xA6\x98\x6F\xB6\x7C\xBD\x37\x7F\xBE\xE1\x93\x25\x6D\x6F\xF0\x0A\xAD\x17\x18\xE1\x03\xBC\x07\x29\xC8\xAD\x26\xE8\xF8\x61\xF0\xFD\x21\x09\x7E\x9A\x8E\xA9\x68\x7D\x48\x62\x72\xBD\x00\xEA\x01\x99\xB8\x06\x82\x51\x81\x4E\xF1\xF5\xB4\x91\x54\xB9\x23\x7A\x00\x9A\x9F\x5D\x8D\xE0\x3C\x64\xB9\x1A\x12\x92\x2A\xC7\x82\x44\x72\x39\xDC\xE2\x3C\xC6\xD8\x55\xF5\x15\x4E\xC8\x05\x0E\xDB\xC6\xD0\x62\xA6\xEC\x15\xB4\xB5\x02\x82\xDB\xAC\x8C\xA2\x81\xF0\x9B\x99\x31\xF5\x20\x20\xA8\x88\x61\x0A\x07\x9F\x94\xFC\xD0\xD7\x1B\xCC\x2E\x17\xF3\x04\x27\x76\x67\xEB\x54\x83\xFD\xA4\x90\x7E\x06\x3D\x04\xA3\x43\x2D\xDA\xFC\x0B\x62\xEA\x2F\x5F\x62\x53", ["CN=UTN - DATACorp SGC,OU=http://www.usertrust.com,O=The USERTRUST Network,L=Salt Lake City,ST=UT,C=US"] = "\x30\x82\x04\x5E\x30\x82\x03\x46\xA0\x03\x02\x01\x02\x02\x10\x44\xBE\x0C\x8B\x50\x00\x21\xB4\x11\xD3\x2A\x68\x06\xA9\xAD\x69\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\x93\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0B\x30\x09\x06\x03\x55\x04\x08\x13\x02\x55\x54\x31\x17\x30\x15\x06\x03\x55\x04\x07\x13\x0E\x53\x61\x6C\x74\x20\x4C\x61\x6B\x65\x20\x43\x69\x74\x79\x31\x1E\x30\x1C\x06\x03\x55\x04\x0A\x13\x15\x54\x68\x65\x20\x55\x53\x45\x52\x54\x52\x55\x53\x54\x20\x4E\x65\x74\x77\x6F\x72\x6B\x31\x21\x30\x1F\x06\x03\x55\x04\x0B\x13\x18\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x75\x73\x65\x72\x74\x72\x75\x73\x74\x2E\x63\x6F\x6D\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x55\x54\x4E\x20\x2D\x20\x44\x41\x54\x41\x43\x6F\x72\x70\x20\x53\x47\x43\x30\x1E\x17\x0D\x39\x39\x30\x36\x32\x34\x31\x38\x35\x37\x32\x31\x5A\x17\x0D\x31\x39\x30\x36\x32\x34\x31\x39\x30\x36\x33\x30\x5A\x30\x81\x93\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0B\x30\x09\x06\x03\x55\x04\x08\x13\x02\x55\x54\x31\x17\x30\x15\x06\x03\x55\x04\x07\x13\x0E\x53\x61\x6C\x74\x20\x4C\x61\x6B\x65\x20\x43\x69\x74\x79\x31\x1E\x30\x1C\x06\x03\x55\x04\x0A\x13\x15\x54\x68\x65\x20\x55\x53\x45\x52\x54\x52\x55\x53\x54\x20\x4E\x65\x74\x77\x6F\x72\x6B\x31\x21\x30\x1F\x06\x03\x55\x04\x0B\x13\x18\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x75\x73\x65\x72\x74\x72\x75\x73\x74\x2E\x63\x6F\x6D\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x55\x54\x4E\x20\x2D\x20\x44\x41\x54\x41\x43\x6F\x72\x70\x20\x53\x47\x43\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xDF\xEE\x58\x10\xA2\x2B\x6E\x55\xC4\x8E\xBF\x2E\x46\x09\xE7\xE0\x08\x0F\x2E\x2B\x7A\x13\x94\x1B\xBD\xF6\xB6\x80\x8E\x65\x05\x93\x00\x1E\xBC\xAF\xE2\x0F\x8E\x19\x0D\x12\x47\xEC\xAC\xAD\xA3\xFA\x2E\x70\xF8\xDE\x6E\xFB\x56\x42\x15\x9E\x2E\x5C\xEF\x23\xDE\x21\xB9\x05\x76\x27\x19\x0F\x4F\xD6\xC3\x9C\xB4\xBE\x94\x19\x63\xF2\xA6\x11\x0A\xEB\x53\x48\x9C\xBE\xF2\x29\x3B\x16\xE8\x1A\xA0\x4C\xA6\xC9\xF4\x18\x59\x68\xC0\x70\xF2\x53\x00\xC0\x5E\x50\x82\xA5\x56\x6F\x36\xF9\x4A\xE0\x44\x86\xA0\x4D\x4E\xD6\x47\x6E\x49\x4A\xCB\x67\xD7\xA6\xC4\x05\xB9\x8E\x1E\xF4\xFC\xFF\xCD\xE7\x36\xE0\x9C\x05\x6C\xB2\x33\x22\x15\xD0\xB4\xE0\xCC\x17\xC0\xB2\xC0\xF4\xFE\x32\x3F\x29\x2A\x95\x7B\xD8\xF2\xA7\x4E\x0F\x54\x7C\xA1\x0D\x80\xB3\x09\x03\xC1\xFF\x5C\xDD\x5E\x9A\x3E\xBC\xAE\xBC\x47\x8A\x6A\xAE\x71\xCA\x1F\xB1\x2A\xB8\x5F\x42\x05\x0B\xEC\x46\x30\xD1\x72\x0B\xCA\xE9\x56\x6D\xF5\xEF\xDF\x78\xBE\x61\xBA\xB2\xA5\xAE\x04\x4C\xBC\xA8\xAC\x69\x15\x97\xBD\xEF\xEB\xB4\x8C\xBF\x35\xF8\xD4\xC3\xD1\x28\x0E\x5C\x3A\x9F\x70\x18\x33\x20\x77\xC4\xA2\xAF\x02\x03\x01\x00\x01\xA3\x81\xAB\x30\x81\xA8\x30\x0B\x06\x03\x55\x1D\x0F\x04\x04\x03\x02\x01\xC6\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x53\x32\xD1\xB3\xCF\x7F\xFA\xE0\xF1\xA0\x5D\x85\x4E\x92\xD2\x9E\x45\x1D\xB4\x4F\x30\x3D\x06\x03\x55\x1D\x1F\x04\x36\x30\x34\x30\x32\xA0\x30\xA0\x2E\x86\x2C\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x75\x73\x65\x72\x74\x72\x75\x73\x74\x2E\x63\x6F\x6D\x2F\x55\x54\x4E\x2D\x44\x41\x54\x41\x43\x6F\x72\x70\x53\x47\x43\x2E\x63\x72\x6C\x30\x2A\x06\x03\x55\x1D\x25\x04\x23\x30\x21\x06\x08\x2B\x06\x01\x05\x05\x07\x03\x01\x06\x0A\x2B\x06\x01\x04\x01\x82\x37\x0A\x03\x03\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x04\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x27\x35\x97\x00\x8A\x8B\x28\xBD\xC6\x33\x30\x1E\x29\xFC\xE2\xF7\xD5\x98\xD4\x40\xBB\x60\xCA\xBF\xAB\x17\x2C\x09\x36\x7F\x50\xFA\x41\xDC\xAE\x96\x3A\x0A\x23\x3E\x89\x59\xC9\xA3\x07\xED\x1B\x37\xAD\xFC\x7C\xBE\x51\x49\x5A\xDE\x3A\x0A\x54\x08\x16\x45\xC2\x99\xB1\x87\xCD\x8C\x68\xE0\x69\x03\xE9\xC4\x4E\x98\xB2\x3B\x8C\x16\xB3\x0E\xA0\x0C\x98\x50\x9B\x93\xA9\x70\x09\xC8\x2C\xA3\x8F\xDF\x02\xE4\xE0\x71\x3A\xF1\xB4\x23\x72\xA0\xAA\x01\xDF\xDF\x98\x3E\x14\x50\xA0\x31\x26\xBD\x28\xE9\x5A\x30\x26\x75\xF9\x7B\x60\x1C\x8D\xF3\xCD\x50\x26\x6D\x04\x27\x9A\xDF\xD5\x0D\x45\x47\x29\x6B\x2C\xE6\x76\xD9\xA9\x29\x7D\x32\xDD\xC9\x36\x3C\xBD\xAE\x35\xF1\x11\x9E\x1D\xBB\x90\x3F\x12\x47\x4E\x8E\xD7\x7E\x0F\x62\x73\x1D\x52\x26\x38\x1C\x18\x49\xFD\x30\x74\x9A\xC4\xE5\x22\x2F\xD8\xC0\x8D\xED\x91\x7A\x4C\x00\x8F\x72\x7F\x5D\xDA\xDD\x1B\x8B\x45\x6B\xE7\xDD\x69\x97\xA8\xC5\x56\x4C\x0F\x0C\xF6\x9F\x7A\x91\x37\xF6\x97\x82\xE0\xDD\x71\x69\xFF\x76\x3F\x60\x4D\x3C\xCF\xF7\x99\xF9\xC6\x57\xF4\xC9\x55\x39\x78\xBA\x2C\x79\xC9\xA6\x88\x2B\xF4\x08", ["CN=UTN-USERFirst-Hardware,OU=http://www.usertrust.com,O=The USERTRUST Network,L=Salt Lake City,ST=UT,C=US"] = "\x30\x82\x04\x74\x30\x82\x03\x5C\xA0\x03\x02\x01\x02\x02\x10\x44\xBE\x0C\x8B\x50\x00\x24\xB4\x11\xD3\x36\x2A\xFE\x65\x0A\xFD\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\x97\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0B\x30\x09\x06\x03\x55\x04\x08\x13\x02\x55\x54\x31\x17\x30\x15\x06\x03\x55\x04\x07\x13\x0E\x53\x61\x6C\x74\x20\x4C\x61\x6B\x65\x20\x43\x69\x74\x79\x31\x1E\x30\x1C\x06\x03\x55\x04\x0A\x13\x15\x54\x68\x65\x20\x55\x53\x45\x52\x54\x52\x55\x53\x54\x20\x4E\x65\x74\x77\x6F\x72\x6B\x31\x21\x30\x1F\x06\x03\x55\x04\x0B\x13\x18\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x75\x73\x65\x72\x74\x72\x75\x73\x74\x2E\x63\x6F\x6D\x31\x1F\x30\x1D\x06\x03\x55\x04\x03\x13\x16\x55\x54\x4E\x2D\x55\x53\x45\x52\x46\x69\x72\x73\x74\x2D\x48\x61\x72\x64\x77\x61\x72\x65\x30\x1E\x17\x0D\x39\x39\x30\x37\x30\x39\x31\x38\x31\x30\x34\x32\x5A\x17\x0D\x31\x39\x30\x37\x30\x39\x31\x38\x31\x39\x32\x32\x5A\x30\x81\x97\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x0B\x30\x09\x06\x03\x55\x04\x08\x13\x02\x55\x54\x31\x17\x30\x15\x06\x03\x55\x04\x07\x13\x0E\x53\x61\x6C\x74\x20\x4C\x61\x6B\x65\x20\x43\x69\x74\x79\x31\x1E\x30\x1C\x06\x03\x55\x04\x0A\x13\x15\x54\x68\x65\x20\x55\x53\x45\x52\x54\x52\x55\x53\x54\x20\x4E\x65\x74\x77\x6F\x72\x6B\x31\x21\x30\x1F\x06\x03\x55\x04\x0B\x13\x18\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x75\x73\x65\x72\x74\x72\x75\x73\x74\x2E\x63\x6F\x6D\x31\x1F\x30\x1D\x06\x03\x55\x04\x03\x13\x16\x55\x54\x4E\x2D\x55\x53\x45\x52\x46\x69\x72\x73\x74\x2D\x48\x61\x72\x64\x77\x61\x72\x65\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xB1\xF7\xC3\x38\x3F\xB4\xA8\x7F\xCF\x39\x82\x51\x67\xD0\x6D\x9F\xD2\xFF\x58\xF3\xE7\x9F\x2B\xEC\x0D\x89\x54\x99\xB9\x38\x99\x16\xF7\xE0\x21\x79\x48\xC2\xBB\x61\x74\x12\x96\x1D\x3C\x6A\x72\xD5\x3C\x10\x67\x3A\x39\xED\x2B\x13\xCD\x66\xEB\x95\x09\x33\xA4\x6C\x97\xB1\xE8\xC6\xEC\xC1\x75\x79\x9C\x46\x5E\x8D\xAB\xD0\x6A\xFD\xB9\x2A\x55\x17\x10\x54\xB3\x19\xF0\x9A\xF6\xF1\xB1\x5D\xB6\xA7\x6D\xFB\xE0\x71\x17\x6B\xA2\x88\xFB\x00\xDF\xFE\x1A\x31\x77\x0C\x9A\x01\x7A\xB1\x32\xE3\x2B\x01\x07\x38\x6E\xC3\xA5\x5E\x23\xBC\x45\x9B\x7B\x50\xC1\xC9\x30\x8F\xDB\xE5\x2B\x7A\xD3\x5B\xFB\x33\x40\x1E\xA0\xD5\x98\x17\xBC\x8B\x87\xC3\x89\xD3\x5D\xA0\x8E\xB2\xAA\xAA\xF6\x8E\x69\x88\x06\xC5\xFA\x89\x21\xF3\x08\x9D\x69\x2E\x09\x33\x9B\x29\x0D\x46\x0F\x8C\xCC\x49\x34\xB0\x69\x51\xBD\xF9\x06\xCD\x68\xAD\x66\x4C\xBC\x3E\xAC\x61\xBD\x0A\x88\x0E\xC8\xDF\x3D\xEE\x7C\x04\x4C\x9D\x0A\x5E\x6B\x91\xD6\xEE\xC7\xED\x28\x8D\xAB\x4D\x87\x89\x73\xD0\x6E\xA4\xD0\x1E\x16\x8B\x14\xE1\x76\x44\x03\x7F\x63\xAC\xE4\xCD\x49\x9C\xC5\x92\xF4\xAB\x32\xA1\x48\x5B\x02\x03\x01\x00\x01\xA3\x81\xB9\x30\x81\xB6\x30\x0B\x06\x03\x55\x1D\x0F\x04\x04\x03\x02\x01\xC6\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xA1\x72\x5F\x26\x1B\x28\x98\x43\x95\x5D\x07\x37\xD5\x85\x96\x9D\x4B\xD2\xC3\x45\x30\x44\x06\x03\x55\x1D\x1F\x04\x3D\x30\x3B\x30\x39\xA0\x37\xA0\x35\x86\x33\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x75\x73\x65\x72\x74\x72\x75\x73\x74\x2E\x63\x6F\x6D\x2F\x55\x54\x4E\x2D\x55\x53\x45\x52\x46\x69\x72\x73\x74\x2D\x48\x61\x72\x64\x77\x61\x72\x65\x2E\x63\x72\x6C\x30\x31\x06\x03\x55\x1D\x25\x04\x2A\x30\x28\x06\x08\x2B\x06\x01\x05\x05\x07\x03\x01\x06\x08\x2B\x06\x01\x05\x05\x07\x03\x05\x06\x08\x2B\x06\x01\x05\x05\x07\x03\x06\x06\x08\x2B\x06\x01\x05\x05\x07\x03\x07\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x47\x19\x0F\xDE\x74\xC6\x99\x97\xAF\xFC\xAD\x28\x5E\x75\x8E\xEB\x2D\x67\xEE\x4E\x7B\x2B\xD7\x0C\xFF\xF6\xDE\xCB\x55\xA2\x0A\xE1\x4C\x54\x65\x93\x60\x6B\x9F\x12\x9C\xAD\x5E\x83\x2C\xEB\x5A\xAE\xC0\xE4\x2D\xF4\x00\x63\x1D\xB8\xC0\x6C\xF2\xCF\x49\xBB\x4D\x93\x6F\x06\xA6\x0A\x22\xB2\x49\x62\x08\x4E\xFF\xC8\xC8\x14\xB2\x88\x16\x5D\xE7\x01\xE4\x12\x95\xE5\x45\x34\xB3\x8B\x69\xBD\xCF\xB4\x85\x8F\x75\x51\x9E\x7D\x3A\x38\x3A\x14\x48\x12\xC6\xFB\xA7\x3B\x1A\x8D\x0D\x82\x40\x07\xE8\x04\x08\x90\xA1\x89\xCB\x19\x50\xDF\xCA\x1C\x01\xBC\x1D\x04\x19\x7B\x10\x76\x97\x3B\xEE\x90\x90\xCA\xC4\x0E\x1F\x16\x6E\x75\xEF\x33\xF8\xD3\x6F\x5B\x1E\x96\xE3\xE0\x74\x77\x74\x7B\x8A\xA2\x6E\x2D\xDD\x76\xD6\x39\x30\x82\xF0\xAB\x9C\x52\xF2\x2A\xC7\xAF\x49\x5E\x7E\xC7\x68\xE5\x82\x81\xC8\x6A\x27\xF9\x27\x88\x2A\xD5\x58\x50\x95\x1F\xF0\x3B\x1C\x57\xBB\x7D\x14\x39\x62\x2B\x9A\xC9\x94\x92\x2A\xA3\x22\x0C\xFF\x89\x26\x7D\x5F\x23\x2B\x47\xD7\x15\x1D\xA9\x6A\x9E\x51\x0D\x2A\x51\x9E\x81\xF9\xD4\x3B\x5E\x70\x12\x7F\x10\x32\x9C\x1E\xBB\x9D\xF8\x66\xA8", ["CN=Chambers of Commerce Root,OU=http://www.chambersign.org,O=AC Camerfirma SA CIF A82743287,C=EU"] = "\x30\x82\x04\xBD\x30\x82\x03\xA5\xA0\x03\x02\x01\x02\x02\x01\x00\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x7F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x55\x31\x27\x30\x25\x06\x03\x55\x04\x0A\x13\x1E\x41\x43\x20\x43\x61\x6D\x65\x72\x66\x69\x72\x6D\x61\x20\x53\x41\x20\x43\x49\x46\x20\x41\x38\x32\x37\x34\x33\x32\x38\x37\x31\x23\x30\x21\x06\x03\x55\x04\x0B\x13\x1A\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x63\x68\x61\x6D\x62\x65\x72\x73\x69\x67\x6E\x2E\x6F\x72\x67\x31\x22\x30\x20\x06\x03\x55\x04\x03\x13\x19\x43\x68\x61\x6D\x62\x65\x72\x73\x20\x6F\x66\x20\x43\x6F\x6D\x6D\x65\x72\x63\x65\x20\x52\x6F\x6F\x74\x30\x1E\x17\x0D\x30\x33\x30\x39\x33\x30\x31\x36\x31\x33\x34\x33\x5A\x17\x0D\x33\x37\x30\x39\x33\x30\x31\x36\x31\x33\x34\x34\x5A\x30\x7F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x55\x31\x27\x30\x25\x06\x03\x55\x04\x0A\x13\x1E\x41\x43\x20\x43\x61\x6D\x65\x72\x66\x69\x72\x6D\x61\x20\x53\x41\x20\x43\x49\x46\x20\x41\x38\x32\x37\x34\x33\x32\x38\x37\x31\x23\x30\x21\x06\x03\x55\x04\x0B\x13\x1A\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x63\x68\x61\x6D\x62\x65\x72\x73\x69\x67\x6E\x2E\x6F\x72\x67\x31\x22\x30\x20\x06\x03\x55\x04\x03\x13\x19\x43\x68\x61\x6D\x62\x65\x72\x73\x20\x6F\x66\x20\x43\x6F\x6D\x6D\x65\x72\x63\x65\x20\x52\x6F\x6F\x74\x30\x82\x01\x20\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0D\x00\x30\x82\x01\x08\x02\x82\x01\x01\x00\xB7\x36\x55\xE5\xA5\x5D\x18\x30\xE0\xDA\x89\x54\x91\xFC\xC8\xC7\x52\xF8\x2F\x50\xD9\xEF\xB1\x75\x73\x65\x47\x7D\x1B\x5B\xBA\x75\xC5\xFC\xA1\x88\x24\xFA\x2F\xED\xCA\x08\x4A\x39\x54\xC4\x51\x7A\xB5\xDA\x60\xEA\x38\x3C\x81\xB2\xCB\xF1\xBB\xD9\x91\x23\x3F\x48\x01\x70\x75\xA9\x05\x2A\xAD\x1F\x71\xF3\xC9\x54\x3D\x1D\x06\x6A\x40\x3E\xB3\x0C\x85\xEE\x5C\x1B\x79\xC2\x62\xC4\xB8\x36\x8E\x35\x5D\x01\x0C\x23\x04\x47\x35\xAA\x9B\x60\x4E\xA0\x66\x3D\xCB\x26\x0A\x9C\x40\xA1\xF4\x5D\x98\xBF\x71\xAB\xA5\x00\x68\x2A\xED\x83\x7A\x0F\xA2\x14\xB5\xD4\x22\xB3\x80\xB0\x3C\x0C\x5A\x51\x69\x2D\x58\x18\x8F\xED\x99\x9E\xF1\xAE\xE2\x95\xE6\xF6\x47\xA8\xD6\x0C\x0F\xB0\x58\x58\xDB\xC3\x66\x37\x9E\x9B\x91\x54\x33\x37\xD2\x94\x1C\x6A\x48\xC9\xC9\xF2\xA5\xDA\xA5\x0C\x23\xF7\x23\x0E\x9C\x32\x55\x5E\x71\x9C\x84\x05\x51\x9A\x2D\xFD\xE6\x4E\x2A\x34\x5A\xDE\xCA\x40\x37\x67\x0C\x54\x21\x55\x77\xDA\x0A\x0C\xCC\x97\xAE\x80\xDC\x94\x36\x4A\xF4\x3E\xCE\x36\x13\x1E\x53\xE4\xAC\x4E\x3A\x05\xEC\xDB\xAE\x72\x9C\x38\x8B\xD0\x39\x3B\x89\x0A\x3E\x77\xFE\x75\x02\x01\x03\xA3\x82\x01\x44\x30\x82\x01\x40\x30\x12\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x08\x30\x06\x01\x01\xFF\x02\x01\x0C\x30\x3C\x06\x03\x55\x1D\x1F\x04\x35\x30\x33\x30\x31\xA0\x2F\xA0\x2D\x86\x2B\x68\x74\x74\x70\x3A\x2F\x2F\x63\x72\x6C\x2E\x63\x68\x61\x6D\x62\x65\x72\x73\x69\x67\x6E\x2E\x6F\x72\x67\x2F\x63\x68\x61\x6D\x62\x65\x72\x73\x72\x6F\x6F\x74\x2E\x63\x72\x6C\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xE3\x94\xF5\xB1\x4D\xE9\xDB\xA1\x29\x5B\x57\x8B\x4D\x76\x06\x76\xE1\xD1\xA2\x8A\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x11\x06\x09\x60\x86\x48\x01\x86\xF8\x42\x01\x01\x04\x04\x03\x02\x00\x07\x30\x27\x06\x03\x55\x1D\x11\x04\x20\x30\x1E\x81\x1C\x63\x68\x61\x6D\x62\x65\x72\x73\x72\x6F\x6F\x74\x40\x63\x68\x61\x6D\x62\x65\x72\x73\x69\x67\x6E\x2E\x6F\x72\x67\x30\x27\x06\x03\x55\x1D\x12\x04\x20\x30\x1E\x81\x1C\x63\x68\x61\x6D\x62\x65\x72\x73\x72\x6F\x6F\x74\x40\x63\x68\x61\x6D\x62\x65\x72\x73\x69\x67\x6E\x2E\x6F\x72\x67\x30\x58\x06\x03\x55\x1D\x20\x04\x51\x30\x4F\x30\x4D\x06\x0B\x2B\x06\x01\x04\x01\x81\x87\x2E\x0A\x03\x01\x30\x3E\x30\x3C\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x01\x16\x30\x68\x74\x74\x70\x3A\x2F\x2F\x63\x70\x73\x2E\x63\x68\x61\x6D\x62\x65\x72\x73\x69\x67\x6E\x2E\x6F\x72\x67\x2F\x63\x70\x73\x2F\x63\x68\x61\x6D\x62\x65\x72\x73\x72\x6F\x6F\x74\x2E\x68\x74\x6D\x6C\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x0C\x41\x97\xC2\x1A\x86\xC0\x22\x7C\x9F\xFB\x90\xF3\x1A\xD1\x03\xB1\xEF\x13\xF9\x21\x5F\x04\x9C\xDA\xC9\xA5\x8D\x27\x6C\x96\x87\x91\xBE\x41\x90\x01\x72\x93\xE7\x1E\x7D\x5F\xF6\x89\xC6\x5D\xA7\x40\x09\x3D\xAC\x49\x45\x45\xDC\x2E\x8D\x30\x68\xB2\x09\xBA\xFB\xC3\x2F\xCC\xBA\x0B\xDF\x3F\x77\x7B\x46\x7D\x3A\x12\x24\x8E\x96\x8F\x3C\x05\x0A\x6F\xD2\x94\x28\x1D\x6D\x0C\xC0\x2E\x88\x22\xD5\xD8\xCF\x1D\x13\xC7\xF0\x48\xD7\xD7\x05\xA7\xCF\xC7\x47\x9E\x3B\x3C\x34\xC8\x80\x4F\xD4\x14\xBB\xFC\x0D\x50\xF7\xFA\xB3\xEC\x42\x5F\xA9\xDD\x6D\xC8\xF4\x75\xCF\x7B\xC1\x72\x26\xB1\x01\x1C\x5C\x2C\xFD\x7A\x4E\xB4\x01\xC5\x05\x57\xB9\xE7\x3C\xAA\x05\xD9\x88\xE9\x07\x46\x41\xCE\xEF\x41\x81\xAE\x58\xDF\x83\xA2\xAE\xCA\xD7\x77\x1F\xE7\x00\x3C\x9D\x6F\x8E\xE4\x32\x09\x1D\x4D\x78\x34\x78\x34\x3C\x94\x9B\x26\xED\x4F\x71\xC6\x19\x7A\xBD\x20\x22\x48\x5A\xFE\x4B\x7D\x03\xB7\xE7\x58\xBE\xC6\x32\x4E\x74\x1E\x68\xDD\xA8\x68\x5B\xB3\x3E\xEE\x62\x7D\xD9\x80\xE8\x0A\x75\x7A\xB7\xEE\xB4\x65\x9A\x21\x90\xE0\xAA\xD0\x98\xBC\x38\xB5\x73\x3C\x8B\xF8\xDC", @@ -139,4 +135,12 @@ redef root_certs += { ["CN=Root CA Generalitat Valenciana,OU=PKIGVA,O=Generalitat Valenciana,C=ES"] = "\x30\x82\x06\x8B\x30\x82\x05\x73\xA0\x03\x02\x01\x02\x02\x04\x3B\x45\xE5\x68\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x68\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x53\x31\x1F\x30\x1D\x06\x03\x55\x04\x0A\x13\x16\x47\x65\x6E\x65\x72\x61\x6C\x69\x74\x61\x74\x20\x56\x61\x6C\x65\x6E\x63\x69\x61\x6E\x61\x31\x0F\x30\x0D\x06\x03\x55\x04\x0B\x13\x06\x50\x4B\x49\x47\x56\x41\x31\x27\x30\x25\x06\x03\x55\x04\x03\x13\x1E\x52\x6F\x6F\x74\x20\x43\x41\x20\x47\x65\x6E\x65\x72\x61\x6C\x69\x74\x61\x74\x20\x56\x61\x6C\x65\x6E\x63\x69\x61\x6E\x61\x30\x1E\x17\x0D\x30\x31\x30\x37\x30\x36\x31\x36\x32\x32\x34\x37\x5A\x17\x0D\x32\x31\x30\x37\x30\x31\x31\x35\x32\x32\x34\x37\x5A\x30\x68\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x53\x31\x1F\x30\x1D\x06\x03\x55\x04\x0A\x13\x16\x47\x65\x6E\x65\x72\x61\x6C\x69\x74\x61\x74\x20\x56\x61\x6C\x65\x6E\x63\x69\x61\x6E\x61\x31\x0F\x30\x0D\x06\x03\x55\x04\x0B\x13\x06\x50\x4B\x49\x47\x56\x41\x31\x27\x30\x25\x06\x03\x55\x04\x03\x13\x1E\x52\x6F\x6F\x74\x20\x43\x41\x20\x47\x65\x6E\x65\x72\x61\x6C\x69\x74\x61\x74\x20\x56\x61\x6C\x65\x6E\x63\x69\x61\x6E\x61\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xC6\x2A\xAB\x57\x11\x37\x2F\x22\x8A\xCA\x03\x74\x1D\xCA\xED\x2D\xA2\x0B\xBC\x33\x52\x40\x26\x47\xBE\x5A\x69\xA6\x3B\x72\x36\x17\x4C\xE8\xDF\xB8\xBB\x2F\x76\xE1\x40\x46\x74\x65\x02\x90\x52\x08\xB4\xFF\xA8\x8C\xC1\xE0\xC7\x89\x56\x10\x39\x33\xEF\x68\xB4\x5F\x5F\xDA\x6D\x23\xA1\x89\x5E\x22\xA3\x4A\x06\xF0\x27\xF0\x57\xB9\xF8\xE9\x4E\x32\x77\x0A\x3F\x41\x64\xF3\xEB\x65\xEE\x76\xFE\x54\xAA\x7D\x1D\x20\xAE\xF3\xD7\x74\xC2\x0A\x5F\xF5\x08\x28\x52\x08\xCC\x55\x5D\xD2\x0F\xDB\x9A\x81\xA5\xBB\xA1\xB3\xC1\x94\xCD\x54\xE0\x32\x75\x31\x91\x1A\x62\xB2\xDE\x75\xE2\xCF\x4F\x89\xD9\x91\x90\x0F\x41\x1B\xB4\x5A\x4A\x77\xBD\x67\x83\xE0\x93\xE7\x5E\xA7\x0C\xE7\x81\xD3\xF4\x52\xAC\x53\xB2\x03\xC7\x44\x26\xFB\x79\xE5\xCB\x34\x60\x50\x10\x7B\x1B\xDB\x6B\xD7\x47\xAB\x5F\x7C\x68\xCA\x6E\x9D\x41\x03\x10\xEE\x6B\x99\x7B\x5E\x25\xA8\xC2\xAB\xE4\xC0\xF3\x5C\x9C\xE3\xBE\xCE\x31\x4C\x64\x1E\x5E\x80\xA2\xF5\x83\x7E\x0C\xD6\xCA\x8C\x55\x8E\xBE\xE0\xBE\x49\x07\x0F\xA3\x24\x41\x7A\x58\x1D\x84\xEA\x58\x12\xC8\xE1\xB7\xED\xEF\x93\xDE\x94\x08\x31\x02\x03\x01\x00\x01\xA3\x82\x03\x3B\x30\x82\x03\x37\x30\x32\x06\x08\x2B\x06\x01\x05\x05\x07\x01\x01\x04\x26\x30\x24\x30\x22\x06\x08\x2B\x06\x01\x05\x05\x07\x30\x01\x86\x16\x68\x74\x74\x70\x3A\x2F\x2F\x6F\x63\x73\x70\x2E\x70\x6B\x69\x2E\x67\x76\x61\x2E\x65\x73\x30\x12\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x08\x30\x06\x01\x01\xFF\x02\x01\x02\x30\x82\x02\x34\x06\x03\x55\x1D\x20\x04\x82\x02\x2B\x30\x82\x02\x27\x30\x82\x02\x23\x06\x0A\x2B\x06\x01\x04\x01\xBF\x55\x02\x01\x00\x30\x82\x02\x13\x30\x82\x01\xE8\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x02\x30\x82\x01\xDA\x1E\x82\x01\xD6\x00\x41\x00\x75\x00\x74\x00\x6F\x00\x72\x00\x69\x00\x64\x00\x61\x00\x64\x00\x20\x00\x64\x00\x65\x00\x20\x00\x43\x00\x65\x00\x72\x00\x74\x00\x69\x00\x66\x00\x69\x00\x63\x00\x61\x00\x63\x00\x69\x00\xF3\x00\x6E\x00\x20\x00\x52\x00\x61\x00\xED\x00\x7A\x00\x20\x00\x64\x00\x65\x00\x20\x00\x6C\x00\x61\x00\x20\x00\x47\x00\x65\x00\x6E\x00\x65\x00\x72\x00\x61\x00\x6C\x00\x69\x00\x74\x00\x61\x00\x74\x00\x20\x00\x56\x00\x61\x00\x6C\x00\x65\x00\x6E\x00\x63\x00\x69\x00\x61\x00\x6E\x00\x61\x00\x2E\x00\x0D\x00\x0A\x00\x4C\x00\x61\x00\x20\x00\x44\x00\x65\x00\x63\x00\x6C\x00\x61\x00\x72\x00\x61\x00\x63\x00\x69\x00\xF3\x00\x6E\x00\x20\x00\x64\x00\x65\x00\x20\x00\x50\x00\x72\x00\xE1\x00\x63\x00\x74\x00\x69\x00\x63\x00\x61\x00\x73\x00\x20\x00\x64\x00\x65\x00\x20\x00\x43\x00\x65\x00\x72\x00\x74\x00\x69\x00\x66\x00\x69\x00\x63\x00\x61\x00\x63\x00\x69\x00\xF3\x00\x6E\x00\x20\x00\x71\x00\x75\x00\x65\x00\x20\x00\x72\x00\x69\x00\x67\x00\x65\x00\x20\x00\x65\x00\x6C\x00\x20\x00\x66\x00\x75\x00\x6E\x00\x63\x00\x69\x00\x6F\x00\x6E\x00\x61\x00\x6D\x00\x69\x00\x65\x00\x6E\x00\x74\x00\x6F\x00\x20\x00\x64\x00\x65\x00\x20\x00\x6C\x00\x61\x00\x20\x00\x70\x00\x72\x00\x65\x00\x73\x00\x65\x00\x6E\x00\x74\x00\x65\x00\x20\x00\x41\x00\x75\x00\x74\x00\x6F\x00\x72\x00\x69\x00\x64\x00\x61\x00\x64\x00\x20\x00\x64\x00\x65\x00\x20\x00\x43\x00\x65\x00\x72\x00\x74\x00\x69\x00\x66\x00\x69\x00\x63\x00\x61\x00\x63\x00\x69\x00\xF3\x00\x6E\x00\x20\x00\x73\x00\x65\x00\x20\x00\x65\x00\x6E\x00\x63\x00\x75\x00\x65\x00\x6E\x00\x74\x00\x72\x00\x61\x00\x20\x00\x65\x00\x6E\x00\x20\x00\x6C\x00\x61\x00\x20\x00\x64\x00\x69\x00\x72\x00\x65\x00\x63\x00\x63\x00\x69\x00\xF3\x00\x6E\x00\x20\x00\x77\x00\x65\x00\x62\x00\x20\x00\x68\x00\x74\x00\x74\x00\x70\x00\x3A\x00\x2F\x00\x2F\x00\x77\x00\x77\x00\x77\x00\x2E\x00\x70\x00\x6B\x00\x69\x00\x2E\x00\x67\x00\x76\x00\x61\x00\x2E\x00\x65\x00\x73\x00\x2F\x00\x63\x00\x70\x00\x73\x30\x25\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x01\x16\x19\x68\x74\x74\x70\x3A\x2F\x2F\x77\x77\x77\x2E\x70\x6B\x69\x2E\x67\x76\x61\x2E\x65\x73\x2F\x63\x70\x73\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x7B\x35\xD3\x40\xD2\x1C\x78\x19\x66\xEF\x74\x10\x28\xDC\x3E\x4F\xB2\x78\x04\xFC\x30\x81\x95\x06\x03\x55\x1D\x23\x04\x81\x8D\x30\x81\x8A\x80\x14\x7B\x35\xD3\x40\xD2\x1C\x78\x19\x66\xEF\x74\x10\x28\xDC\x3E\x4F\xB2\x78\x04\xFC\xA1\x6C\xA4\x6A\x30\x68\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x53\x31\x1F\x30\x1D\x06\x03\x55\x04\x0A\x13\x16\x47\x65\x6E\x65\x72\x61\x6C\x69\x74\x61\x74\x20\x56\x61\x6C\x65\x6E\x63\x69\x61\x6E\x61\x31\x0F\x30\x0D\x06\x03\x55\x04\x0B\x13\x06\x50\x4B\x49\x47\x56\x41\x31\x27\x30\x25\x06\x03\x55\x04\x03\x13\x1E\x52\x6F\x6F\x74\x20\x43\x41\x20\x47\x65\x6E\x65\x72\x61\x6C\x69\x74\x61\x74\x20\x56\x61\x6C\x65\x6E\x63\x69\x61\x6E\x61\x82\x04\x3B\x45\xE5\x68\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x24\x61\x4E\xF5\xB5\xC8\x42\x02\x2A\xB3\x5C\x75\xAD\xC5\x6D\xCA\xE7\x94\x3F\xA5\x68\x95\x88\xC1\x54\xC0\x10\x69\xA2\x12\x2F\x18\x3F\x25\x50\xA8\x7C\x4A\xEA\xC6\x09\xD9\xF4\x75\xC6\x40\xDA\xAF\x50\x9D\x3D\xA5\x16\xBB\x6D\x31\xC6\xC7\x73\x0A\x48\xFE\x20\x72\xED\x6F\xCC\xE8\x83\x61\x16\x46\x90\x01\x95\x4B\x7D\x8E\x9A\x52\x09\x2F\xF6\x6F\x1C\xE4\xA1\x71\xCF\x8C\x2A\x5A\x17\x73\x83\x47\x4D\x0F\x36\xFB\x04\x4D\x49\x51\xE2\x14\xC9\x64\x61\xFB\xD4\x14\xE0\xF4\x9E\xB7\x34\x8F\x0A\x26\xBD\x97\x5C\xF4\x79\x3A\x4A\x30\x19\xCC\xAD\x4F\xA0\x98\x8A\xB4\x31\x97\x2A\xE2\x73\x6D\x7E\x78\xB8\xF8\x88\x89\x4F\xB1\x22\x91\x64\x4B\xF5\x50\xDE\x03\xDB\xE5\xC5\x76\xE7\x13\x66\x75\x7E\x65\xFB\x01\x9F\x93\x87\x88\x9D\xF9\x46\x57\x7C\x4D\x60\xAF\x98\x73\x13\x23\xA4\x20\x91\x81\xFA\xD0\x61\x66\xB8\x7D\xD1\xAF\xD6\x6F\x1E\x6C\x3D\xE9\x11\xFD\xA9\xF9\x82\x22\x86\x99\x33\x71\x5A\xEA\x19\x57\x3D\x91\xCD\xA9\xC0\xA3\x6E\x07\x13\xA6\xC9\xED\xF8\x68\xA3\x9E\xC3\x5A\x72\x09\x87\x28\xD1\xC4\x73\xC4\x73\x18\x5F\x50\x75\x16\x31\x9F\xB7\xE8\x7C\xC3", ["CN=A-Trust-nQual-03,OU=A-Trust-nQual-03,O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH,C=AT"] = "\x30\x82\x03\xCF\x30\x82\x02\xB7\xA0\x03\x02\x01\x02\x02\x03\x01\x6C\x1E\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\x8D\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x41\x54\x31\x48\x30\x46\x06\x03\x55\x04\x0A\x0C\x3F\x41\x2D\x54\x72\x75\x73\x74\x20\x47\x65\x73\x2E\x20\x66\x2E\x20\x53\x69\x63\x68\x65\x72\x68\x65\x69\x74\x73\x73\x79\x73\x74\x65\x6D\x65\x20\x69\x6D\x20\x65\x6C\x65\x6B\x74\x72\x2E\x20\x44\x61\x74\x65\x6E\x76\x65\x72\x6B\x65\x68\x72\x20\x47\x6D\x62\x48\x31\x19\x30\x17\x06\x03\x55\x04\x0B\x0C\x10\x41\x2D\x54\x72\x75\x73\x74\x2D\x6E\x51\x75\x61\x6C\x2D\x30\x33\x31\x19\x30\x17\x06\x03\x55\x04\x03\x0C\x10\x41\x2D\x54\x72\x75\x73\x74\x2D\x6E\x51\x75\x61\x6C\x2D\x30\x33\x30\x1E\x17\x0D\x30\x35\x30\x38\x31\x37\x32\x32\x30\x30\x30\x30\x5A\x17\x0D\x31\x35\x30\x38\x31\x37\x32\x32\x30\x30\x30\x30\x5A\x30\x81\x8D\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x41\x54\x31\x48\x30\x46\x06\x03\x55\x04\x0A\x0C\x3F\x41\x2D\x54\x72\x75\x73\x74\x20\x47\x65\x73\x2E\x20\x66\x2E\x20\x53\x69\x63\x68\x65\x72\x68\x65\x69\x74\x73\x73\x79\x73\x74\x65\x6D\x65\x20\x69\x6D\x20\x65\x6C\x65\x6B\x74\x72\x2E\x20\x44\x61\x74\x65\x6E\x76\x65\x72\x6B\x65\x68\x72\x20\x47\x6D\x62\x48\x31\x19\x30\x17\x06\x03\x55\x04\x0B\x0C\x10\x41\x2D\x54\x72\x75\x73\x74\x2D\x6E\x51\x75\x61\x6C\x2D\x30\x33\x31\x19\x30\x17\x06\x03\x55\x04\x03\x0C\x10\x41\x2D\x54\x72\x75\x73\x74\x2D\x6E\x51\x75\x61\x6C\x2D\x30\x33\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAD\x3D\x61\x6E\x03\xF3\x90\x3B\xC0\x41\x0B\x84\x80\xCD\xEC\x2A\xA3\x9D\x6B\xBB\x6E\xC2\x42\x84\xF7\x51\x14\xE1\xA0\xA8\x2D\x51\xA3\x51\xF2\xDE\x23\xF0\x34\x44\xFF\x94\xEB\xCC\x05\x23\x95\x40\xB9\x07\x78\xA5\x25\xF6\x0A\xBD\x45\x86\xE8\xD9\xBD\xC0\x04\x8E\x85\x44\x61\xEF\x7F\xA7\xC9\xFA\xC1\x25\xCC\x85\x2C\x63\x3F\x05\x60\x73\x49\x05\xE0\x60\x78\x95\x10\x4B\xDC\xF9\x11\x59\xCE\x71\x7F\x40\x9B\x8A\xAA\x24\xDF\x0B\x42\xE2\xDB\x56\xBC\x4A\xD2\xA5\x0C\x9B\xB7\x43\x3E\xDD\x83\xD3\x26\x10\x02\xCF\xEA\x23\xC4\x49\x4E\xE5\xD3\xE9\xB4\x88\xAB\x0C\xAE\x62\x92\xD4\x65\x87\xD9\x6A\xD7\xF4\x85\x9F\xE4\x33\x22\x25\xA5\xE5\xC8\x33\xBA\xC3\xC7\x41\xDC\x5F\xC6\x6A\xCC\x00\x0E\x6D\x32\xA8\xB6\x87\x36\x00\x62\x77\x9B\x1E\x1F\x34\xCB\x90\x3C\x78\x88\x74\x05\xEB\x79\xF5\x93\x71\x65\xCA\x9D\xC7\x6B\x18\x2D\x3D\x5C\x4E\xE7\xD5\xF8\x3F\x31\x7D\x8F\x87\xEC\x0A\x22\x2F\x23\xE9\xFE\xBB\x7D\xC9\xE0\xF4\xEC\xEB\x7C\xC4\xB0\xC3\x2D\x62\xB5\x9A\x71\xD6\xB1\x6A\xE8\xEC\xD9\xED\xD5\x72\xEC\xBE\x57\x01\xCE\x05\x55\x9F\xDE\xD1\x60\x88\x10\xB3\x02\x03\x01\x00\x01\xA3\x36\x30\x34\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x11\x06\x03\x55\x1D\x0E\x04\x0A\x04\x08\x44\x6A\x95\x67\x55\x79\x11\x4F\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x55\xD4\x54\xD1\x59\x48\x5C\xB3\x93\x85\xAA\xBF\x63\x2F\xE4\x80\xCE\x34\xA3\x34\x62\x3E\xF6\xD8\xEE\x67\x88\x31\x04\x03\x6F\x0B\xD4\x07\xFB\x4E\x75\x0F\xD3\x2E\xD3\xC0\x17\xC7\xC6\x28\xEC\x06\x0D\x11\x24\x0E\x0E\xA5\x5D\xBF\x8C\xB2\x13\x96\x71\xDC\xD4\xCE\x0E\x0D\x0A\x68\x32\x6C\xB9\x41\x31\x19\xAB\xB1\x07\x7B\x4D\x98\xD3\x5C\xB0\xD1\xF0\xA7\x42\xA0\xB5\xC4\x8E\xAF\xFE\xF1\x3F\xF4\xEF\x4F\x46\x00\x76\xEB\x02\xFB\xF9\x9D\xD2\x40\x96\xC7\x88\x3A\xB8\x9F\x11\x79\xF3\x80\x65\xA8\xBD\x1F\xD3\x78\x81\xA0\x51\x4C\x37\xB4\xA6\x5D\x25\x70\xD1\x66\xC9\x68\xF9\x2E\x11\x14\x68\xF1\x54\x98\x08\xAC\x26\x92\x0F\xDE\x89\x9E\xD4\xFA\xB3\x79\x2B\xD2\xA3\x79\xD4\xEC\x8B\xAC\x87\x53\x68\x42\x4C\x51\x51\x74\x1E\x1B\x27\x2E\xE3\xF5\x1F\x29\x74\x4D\xED\xAF\xF7\xE1\x92\x99\x81\xE8\xBE\x3A\xC7\x17\x50\xF6\xB7\xC6\xFC\x9B\xB0\x8A\x6B\xD6\x88\x03\x91\x8F\x06\x77\x3A\x85\x02\xDD\x98\xD5\x43\x78\x3F\xC6\x30\x15\xAC\x9B\x6B\xCB\x57\xB7\x89\x51\x8B\x3A\xE8\xC9\x84\x0C\xDB\xB1\x50\x20\x0A\x1A\x4A\xBA\x6A\x1A\xBD\xEC\x1B\xC8\xC5\x84\x9A\xCD", ["CN=TWCA Root Certification Authority,OU=Root CA,O=TAIWAN-CA,C=TW"] = "\x30\x82\x03\x7B\x30\x82\x02\x63\xA0\x03\x02\x01\x02\x02\x01\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x54\x57\x31\x12\x30\x10\x06\x03\x55\x04\x0A\x0C\x09\x54\x41\x49\x57\x41\x4E\x2D\x43\x41\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x0C\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x2A\x30\x28\x06\x03\x55\x04\x03\x0C\x21\x54\x57\x43\x41\x20\x52\x6F\x6F\x74\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x1E\x17\x0D\x30\x38\x30\x38\x32\x38\x30\x37\x32\x34\x33\x33\x5A\x17\x0D\x33\x30\x31\x32\x33\x31\x31\x35\x35\x39\x35\x39\x5A\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x54\x57\x31\x12\x30\x10\x06\x03\x55\x04\x0A\x0C\x09\x54\x41\x49\x57\x41\x4E\x2D\x43\x41\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x0C\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x2A\x30\x28\x06\x03\x55\x04\x03\x0C\x21\x54\x57\x43\x41\x20\x52\x6F\x6F\x74\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xB0\x7E\x72\xB8\xA4\x03\x94\xE6\xA7\xDE\x09\x38\x91\x4A\x11\x40\x87\xA7\x7C\x59\x64\x14\x7B\xB5\x11\x10\xDD\xFE\xBF\xD5\xC0\xBB\x56\xE2\x85\x25\xF4\x35\x72\x0F\xF8\x53\xD0\x41\xE1\x44\x01\xC2\xB4\x1C\xC3\x31\x42\x16\x47\x85\x33\x22\x76\xB2\x0A\x6F\x0F\xE5\x25\x50\x4F\x85\x86\xBE\xBF\x98\x2E\x10\x67\x1E\xBE\x11\x05\x86\x05\x90\xC4\x59\xD0\x7C\x78\x10\xB0\x80\x5C\xB7\xE1\xC7\x2B\x75\xCB\x7C\x9F\xAE\xB5\xD1\x9D\x23\x37\x63\xA7\xDC\x42\xA2\x2D\x92\x04\x1B\x50\xC1\x7B\xB8\x3E\x1B\xC9\x56\x04\x8B\x2F\x52\x9B\xAD\xA9\x56\xE9\xC1\xFF\xAD\xA9\x58\x87\x30\xB6\x81\xF7\x97\x45\xFC\x19\x57\x3B\x2B\x6F\xE4\x47\xF4\x99\x45\xFE\x1D\xF1\xF8\x97\xA3\x88\x1D\x37\x1C\x5C\x8F\xE0\x76\x25\x9A\x50\xF8\xA0\x54\xFF\x44\x90\x76\x23\xD2\x32\xC6\xC3\xAB\x06\xBF\xFC\xFB\xBF\xF3\xAD\x7D\x92\x62\x02\x5B\x29\xD3\x35\xA3\x93\x9A\x43\x64\x60\x5D\xB2\xFA\x32\xFF\x3B\x04\xAF\x4D\x40\x6A\xF9\xC7\xE3\xEF\x23\xFD\x6B\xCB\xE5\x0F\x8B\x38\x0D\xEE\x0A\xFC\xFE\x0F\x98\x9F\x30\x31\xDD\x6C\x52\x65\xF9\x8B\x81\xBE\x22\xE1\x1C\x58\x03\xBA\x91\x1B\x89\x07\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x6A\x38\x5B\x26\x8D\xDE\x8B\x5A\xF2\x4F\x7A\x54\x83\x19\x18\xE3\x08\x35\xA6\xBA\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x3C\xD5\x77\x3D\xDA\xDF\x89\xBA\x87\x0C\x08\x54\x6A\x20\x50\x92\xBE\xB0\x41\x3D\xB9\x26\x64\x83\x0A\x2F\xE8\x40\xC0\x97\x28\x27\x82\x30\x4A\xC9\x93\xFF\x6A\xE7\xA6\x00\x7F\x89\x42\x9A\xD6\x11\xE5\x53\xCE\x2F\xCC\xF2\xDA\x05\xC4\xFE\xE2\x50\xC4\x3A\x86\x7D\xCC\xDA\x7E\x10\x09\x3B\x92\x35\x2A\x53\xB2\xFE\xEB\x2B\x05\xD9\x6C\x5D\xE6\xD0\xEF\xD3\x6A\x66\x9E\x15\x28\x85\x7A\xE8\x82\x00\xAC\x1E\xA7\x09\x69\x56\x42\xD3\x68\x51\x18\xBE\x54\x9A\xBF\x44\x41\xBA\x49\xBE\x20\xBA\x69\x5C\xEE\xB8\x77\xCD\xCE\x6C\x1F\xAD\x83\x96\x18\x7D\x0E\xB5\x14\x39\x84\xF1\x28\xE9\x2D\xA3\x9E\x7B\x1E\x7A\x72\x5A\x83\xB3\x79\x6F\xEF\xB4\xFC\xD0\x0A\xA5\x58\x4F\x46\xDF\xFB\x6D\x79\x59\xF2\x84\x22\x52\xAE\x0F\xCC\xFB\x7C\x3B\xE7\x6A\xCA\x47\x61\xC3\x7A\xF8\xD3\x92\x04\x1F\xB8\x20\x84\xE1\x36\x54\x16\xC7\x40\xDE\x3B\x8A\x73\xDC\xDF\xC6\x09\x4C\xDF\xEC\xDA\xFF\xD4\x53\x42\xA1\xC9\xF2\x62\x1D\x22\x83\x3C\x97\xC5\xF9\x19\x62\x27\xAC\x65\x22\xD7\xD3\x3C\xC6\xE5\x8E\xB2\x53\xCC\x49\xCE\xBC\x30\xFE\x7B\x0E\x33\x90\xFB\xED\xD2\x14\x91\x1F\x07\xAF", + ["OU=Security Communication RootCA2,O=SECOM Trust Systems CO.\,LTD.,C=JP"] = "\x30\x82\x03\x77\x30\x82\x02\x5F\xA0\x03\x02\x01\x02\x02\x01\x00\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x30\x5D\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4A\x50\x31\x25\x30\x23\x06\x03\x55\x04\x0A\x13\x1C\x53\x45\x43\x4F\x4D\x20\x54\x72\x75\x73\x74\x20\x53\x79\x73\x74\x65\x6D\x73\x20\x43\x4F\x2E\x2C\x4C\x54\x44\x2E\x31\x27\x30\x25\x06\x03\x55\x04\x0B\x13\x1E\x53\x65\x63\x75\x72\x69\x74\x79\x20\x43\x6F\x6D\x6D\x75\x6E\x69\x63\x61\x74\x69\x6F\x6E\x20\x52\x6F\x6F\x74\x43\x41\x32\x30\x1E\x17\x0D\x30\x39\x30\x35\x32\x39\x30\x35\x30\x30\x33\x39\x5A\x17\x0D\x32\x39\x30\x35\x32\x39\x30\x35\x30\x30\x33\x39\x5A\x30\x5D\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4A\x50\x31\x25\x30\x23\x06\x03\x55\x04\x0A\x13\x1C\x53\x45\x43\x4F\x4D\x20\x54\x72\x75\x73\x74\x20\x53\x79\x73\x74\x65\x6D\x73\x20\x43\x4F\x2E\x2C\x4C\x54\x44\x2E\x31\x27\x30\x25\x06\x03\x55\x04\x0B\x13\x1E\x53\x65\x63\x75\x72\x69\x74\x79\x20\x43\x6F\x6D\x6D\x75\x6E\x69\x63\x61\x74\x69\x6F\x6E\x20\x52\x6F\x6F\x74\x43\x41\x32\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xD0\x15\x39\x52\xB1\x52\xB3\xBA\xC5\x59\x82\xC4\x5D\x52\xAE\x3A\x43\x65\x80\x4B\xC7\xF2\x96\xBC\xDB\x36\x97\xD6\xA6\x64\x8C\xA8\x5E\xF0\xE3\x0A\x1C\xF7\xDF\x97\x3D\x4B\xAE\xF6\x5D\xEC\x21\xB5\x41\xAB\xCD\xB9\x7E\x76\x9F\xBE\xF9\x3E\x36\x34\xA0\x3B\xC1\xF6\x31\x11\x45\x74\x93\x3D\x57\x80\xC5\xF9\x89\x99\xCA\xE5\xAB\x6A\xD4\xB5\xDA\x41\x90\x10\xC1\xD6\xD6\x42\x89\xC2\xBF\xF4\x38\x12\x95\x4C\x54\x05\xF7\x36\xE4\x45\x83\x7B\x14\x65\xD6\xDC\x0C\x4D\xD1\xDE\x7E\x0C\xAB\x3B\xC4\x15\xBE\x3A\x56\xA6\x5A\x6F\x76\x69\x52\xA9\x7A\xB9\xC8\xEB\x6A\x9A\x5D\x52\xD0\x2D\x0A\x6B\x35\x16\x09\x10\x84\xD0\x6A\xCA\x3A\x06\x00\x37\x47\xE4\x7E\x57\x4F\x3F\x8B\xEB\x67\xB8\x88\xAA\xC5\xBE\x53\x55\xB2\x91\xC4\x7D\xB9\xB0\x85\x19\x06\x78\x2E\xDB\x61\x1A\xFA\x85\xF5\x4A\x91\xA1\xE7\x16\xD5\x8E\xA2\x39\xDF\x94\xB8\x70\x1F\x28\x3F\x8B\xFC\x40\x5E\x63\x83\x3C\x83\x2A\x1A\x99\x6B\xCF\xDE\x59\x6A\x3B\xFC\x6F\x16\xD7\x1F\xFD\x4A\x10\xEB\x4E\x82\x16\x3A\xAC\x27\x0C\x53\xF1\xAD\xD5\x24\xB0\x6B\x03\x50\xC1\x2D\x3C\x16\xDD\x44\x34\x27\x1A\x75\xFB\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x0A\x85\xA9\x77\x65\x05\x98\x7C\x40\x81\xF8\x0F\x97\x2C\x38\xF1\x0A\xEC\x3C\xCF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x03\x82\x01\x01\x00\x4C\x3A\xA3\x44\xAC\xB9\x45\xB1\xC7\x93\x7E\xC8\x0B\x0A\x42\xDF\x64\xEA\x1C\xEE\x59\x6C\x08\xBA\x89\x5F\x6A\xCA\x4A\x95\x9E\x7A\x8F\x07\xC5\xDA\x45\x72\x82\x71\x0E\x3A\xD2\xCC\x6F\xA7\xB4\xA1\x23\xBB\xF6\x24\x9F\xCB\x17\xFE\x8C\xA6\xCE\xC2\xD2\xDB\xCC\x8D\xFC\x71\xFC\x03\x29\xC1\x6C\x5D\x33\x5F\x64\xB6\x65\x3B\x89\x6F\x18\x76\x78\xF5\xDC\xA2\x48\x1F\x19\x3F\x8E\x93\xEB\xF1\xFA\x17\xEE\xCD\x4E\xE3\x04\x12\x55\xD6\xE5\xE4\xDD\xFB\x3E\x05\x7C\xE2\x1D\x5E\xC6\xA7\xBC\x97\x4F\x68\x3A\xF5\xE9\x2E\x0A\x43\xB6\xAF\x57\x5C\x62\x68\x7C\xB7\xFD\xA3\x8A\x84\xA0\xAC\x62\xBE\x2B\x09\x87\x34\xF0\x6A\x01\xBB\x9B\x29\x56\x3C\xFE\x00\x37\xCF\x23\x6C\xF1\x4E\xAA\xB6\x74\x46\x12\x6C\x91\xEE\x34\xD5\xEC\x9A\x91\xE7\x44\xBE\x90\x31\x72\xD5\x49\x02\xF6\x02\xE5\xF4\x1F\xEB\x7C\xD9\x96\x55\xA9\xFF\xEC\x8A\xF9\x99\x47\xFF\x35\x5A\x02\xAA\x04\xCB\x8A\x5B\x87\x71\x29\x91\xBD\xA4\xB4\x7A\x0D\xBD\x9A\xF5\x57\x23\x00\x07\x21\x17\x3F\x4A\x39\xD1\x05\x49\x0B\xA7\xB6\x37\x81\xA5\x5D\x8C\xAA\x33\x5E\x81\x28\x7C\xA7\x7D\x27\xEB\x00\xAE\x8D\x37", + ["CN=EC-ACC,OU=Jerarquia Entitats de Certificacio Catalanes,OU=Vegeu https://www.catcert.net/verarrel (c)03,OU=Serveis Publics de Certificacio,O=Agencia Catalana de Certificacio (NIF Q-0801176-I),C=ES"] = "\x30\x82\x05\x56\x30\x82\x04\x3E\xA0\x03\x02\x01\x02\x02\x10\xEE\x2B\x3D\xEB\xD4\x21\xDE\x14\xA8\x62\xAC\x04\xF3\xDD\xC4\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\xF3\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x53\x31\x3B\x30\x39\x06\x03\x55\x04\x0A\x13\x32\x41\x67\x65\x6E\x63\x69\x61\x20\x43\x61\x74\x61\x6C\x61\x6E\x61\x20\x64\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x63\x69\x6F\x20\x28\x4E\x49\x46\x20\x51\x2D\x30\x38\x30\x31\x31\x37\x36\x2D\x49\x29\x31\x28\x30\x26\x06\x03\x55\x04\x0B\x13\x1F\x53\x65\x72\x76\x65\x69\x73\x20\x50\x75\x62\x6C\x69\x63\x73\x20\x64\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x63\x69\x6F\x31\x35\x30\x33\x06\x03\x55\x04\x0B\x13\x2C\x56\x65\x67\x65\x75\x20\x68\x74\x74\x70\x73\x3A\x2F\x2F\x77\x77\x77\x2E\x63\x61\x74\x63\x65\x72\x74\x2E\x6E\x65\x74\x2F\x76\x65\x72\x61\x72\x72\x65\x6C\x20\x28\x63\x29\x30\x33\x31\x35\x30\x33\x06\x03\x55\x04\x0B\x13\x2C\x4A\x65\x72\x61\x72\x71\x75\x69\x61\x20\x45\x6E\x74\x69\x74\x61\x74\x73\x20\x64\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x63\x69\x6F\x20\x43\x61\x74\x61\x6C\x61\x6E\x65\x73\x31\x0F\x30\x0D\x06\x03\x55\x04\x03\x13\x06\x45\x43\x2D\x41\x43\x43\x30\x1E\x17\x0D\x30\x33\x30\x31\x30\x37\x32\x33\x30\x30\x30\x30\x5A\x17\x0D\x33\x31\x30\x31\x30\x37\x32\x32\x35\x39\x35\x39\x5A\x30\x81\xF3\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x53\x31\x3B\x30\x39\x06\x03\x55\x04\x0A\x13\x32\x41\x67\x65\x6E\x63\x69\x61\x20\x43\x61\x74\x61\x6C\x61\x6E\x61\x20\x64\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x63\x69\x6F\x20\x28\x4E\x49\x46\x20\x51\x2D\x30\x38\x30\x31\x31\x37\x36\x2D\x49\x29\x31\x28\x30\x26\x06\x03\x55\x04\x0B\x13\x1F\x53\x65\x72\x76\x65\x69\x73\x20\x50\x75\x62\x6C\x69\x63\x73\x20\x64\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x63\x69\x6F\x31\x35\x30\x33\x06\x03\x55\x04\x0B\x13\x2C\x56\x65\x67\x65\x75\x20\x68\x74\x74\x70\x73\x3A\x2F\x2F\x77\x77\x77\x2E\x63\x61\x74\x63\x65\x72\x74\x2E\x6E\x65\x74\x2F\x76\x65\x72\x61\x72\x72\x65\x6C\x20\x28\x63\x29\x30\x33\x31\x35\x30\x33\x06\x03\x55\x04\x0B\x13\x2C\x4A\x65\x72\x61\x72\x71\x75\x69\x61\x20\x45\x6E\x74\x69\x74\x61\x74\x73\x20\x64\x65\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x63\x69\x6F\x20\x43\x61\x74\x61\x6C\x61\x6E\x65\x73\x31\x0F\x30\x0D\x06\x03\x55\x04\x03\x13\x06\x45\x43\x2D\x41\x43\x43\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xB3\x22\xC7\x4F\xE2\x97\x42\x95\x88\x47\x83\x40\xF6\x1D\x17\xF3\x83\x73\x24\x1E\x51\xF3\x98\x8A\xC3\x92\xB8\xFF\x40\x90\x05\x70\x87\x60\xC9\x00\xA9\xB5\x94\x65\x19\x22\x15\x17\xC2\x43\x6C\x66\x44\x9A\x0D\x04\x3E\x39\x6F\xA5\x4B\x7A\xAA\x63\xB7\x8A\x44\x9D\xD9\x63\x91\x84\x66\xE0\x28\x0F\xBA\x42\xE3\x6E\x8E\xF7\x14\x27\x93\x69\xEE\x91\x0E\xA3\x5F\x0E\xB1\xEB\x66\xA2\x72\x4F\x12\x13\x86\x65\x7A\x3E\xDB\x4F\x07\xF4\xA7\x09\x60\xDA\x3A\x42\x99\xC7\xB2\x7F\xB3\x16\x95\x1C\xC7\xF9\x34\xB5\x94\x85\xD5\x99\x5E\xA0\x48\xA0\x7E\xE7\x17\x65\xB8\xA2\x75\xB8\x1E\xF3\xE5\x42\x7D\xAF\xED\xF3\x8A\x48\x64\x5D\x82\x14\x93\xD8\xC0\xE4\xFF\xB3\x50\x72\xF2\x76\xF6\xB3\x5D\x42\x50\x79\xD0\x94\x3E\x6B\x0C\x00\xBE\xD8\x6B\x0E\x4E\x2A\xEC\x3E\xD2\xCC\x82\xA2\x18\x65\x33\x13\x77\x9E\x9A\x5D\x1A\x13\xD8\xC3\xDB\x3D\xC8\x97\x7A\xEE\x70\xED\xA7\xE6\x7C\xDB\x71\xCF\x2D\x94\x62\xDF\x6D\xD6\xF5\x38\xBE\x3F\xA5\x85\x0A\x19\xB8\xA8\xD8\x09\x75\x42\x70\xC4\xEA\xEF\xCB\x0E\xC8\x34\xA8\x12\x22\x98\x0C\xB8\x13\x94\xB6\x4B\xEC\xF0\xD0\x90\xE7\x27\x02\x03\x01\x00\x01\xA3\x81\xE3\x30\x81\xE0\x30\x1D\x06\x03\x55\x1D\x11\x04\x16\x30\x14\x81\x12\x65\x63\x5F\x61\x63\x63\x40\x63\x61\x74\x63\x65\x72\x74\x2E\x6E\x65\x74\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xA0\xC3\x8B\x44\xAA\x37\xA5\x45\xBF\x97\x80\x5A\xD1\xF1\x78\xA2\x9B\xE9\x5D\x8D\x30\x7F\x06\x03\x55\x1D\x20\x04\x78\x30\x76\x30\x74\x06\x0B\x2B\x06\x01\x04\x01\xF5\x78\x01\x03\x01\x0A\x30\x65\x30\x2C\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x01\x16\x20\x68\x74\x74\x70\x73\x3A\x2F\x2F\x77\x77\x77\x2E\x63\x61\x74\x63\x65\x72\x74\x2E\x6E\x65\x74\x2F\x76\x65\x72\x61\x72\x72\x65\x6C\x30\x35\x06\x08\x2B\x06\x01\x05\x05\x07\x02\x02\x30\x29\x1A\x27\x56\x65\x67\x65\x75\x20\x68\x74\x74\x70\x73\x3A\x2F\x2F\x77\x77\x77\x2E\x63\x61\x74\x63\x65\x72\x74\x2E\x6E\x65\x74\x2F\x76\x65\x72\x61\x72\x72\x65\x6C\x20\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\xA0\x48\x5B\x82\x01\xF6\x4D\x48\xB8\x39\x55\x35\x9C\x80\x7A\x53\x99\xD5\x5A\xFF\xB1\x71\x3B\xCC\x39\x09\x94\x5E\xD6\xDA\xEF\xBE\x01\x5B\x5D\xD3\x1E\xD8\xFD\x7D\x4F\xCD\xA0\x41\xE0\x34\x93\xBF\xCB\xE2\x86\x9C\x37\x92\x90\x56\x1C\xDC\xEB\x29\x05\xE5\xC4\x9E\xC7\x35\xDF\x8A\x0C\xCD\xC5\x21\x43\xE9\xAA\x88\xE5\x35\xC0\x19\x42\x63\x5A\x02\x5E\xA4\x48\x18\x3A\x85\x6F\xDC\x9D\xBC\x3F\x9D\x9C\xC1\x87\xB8\x7A\x61\x08\xE9\x77\x0B\x7F\x70\xAB\x7A\xDD\xD9\x97\x2C\x64\x1E\x85\xBF\xBC\x74\x96\xA1\xC3\x7A\x12\xEC\x0C\x1A\x6E\x83\x0C\x3C\xE8\x72\x46\x9F\xFB\x48\xD5\x5E\x97\xE6\xB1\xA1\xF8\xE4\xEF\x46\x25\x94\x9C\x89\xDB\x69\x38\xBE\xEC\x5C\x0E\x56\xC7\x65\x51\xE5\x50\x88\x88\xBF\x42\xD5\x2B\x3D\xE5\xF9\xBA\x9E\x2E\xB3\xCA\xF4\x73\x92\x02\x0B\xBE\x4C\x66\xEB\x20\xFE\xB9\xCB\xB5\x99\x7F\xE6\xB6\x13\xFA\xCA\x4B\x4D\xD9\xEE\x53\x46\x06\x3B\xC6\x4E\xAD\x93\x5A\x81\x7E\x6C\x2A\x4B\x6A\x05\x45\x8C\xF2\x21\xA4\x31\x90\x87\x6C\x65\x9C\x9D\xA5\x60\x95\x3A\x52\x7F\xF5\xD1\xAB\x08\x6E\xF3\xEE\x5B\xF9\x88\x3D\x7E\xB8\x6F\x6E\x03\xE4\x42", + ["CN=Hellenic Academic and Research Institutions RootCA 2011,O=Hellenic Academic and Research Institutions Cert. Authority,C=GR"] = "\x30\x82\x04\x31\x30\x82\x03\x19\xA0\x03\x02\x01\x02\x02\x01\x00\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x81\x95\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x52\x31\x44\x30\x42\x06\x03\x55\x04\x0A\x13\x3B\x48\x65\x6C\x6C\x65\x6E\x69\x63\x20\x41\x63\x61\x64\x65\x6D\x69\x63\x20\x61\x6E\x64\x20\x52\x65\x73\x65\x61\x72\x63\x68\x20\x49\x6E\x73\x74\x69\x74\x75\x74\x69\x6F\x6E\x73\x20\x43\x65\x72\x74\x2E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x31\x40\x30\x3E\x06\x03\x55\x04\x03\x13\x37\x48\x65\x6C\x6C\x65\x6E\x69\x63\x20\x41\x63\x61\x64\x65\x6D\x69\x63\x20\x61\x6E\x64\x20\x52\x65\x73\x65\x61\x72\x63\x68\x20\x49\x6E\x73\x74\x69\x74\x75\x74\x69\x6F\x6E\x73\x20\x52\x6F\x6F\x74\x43\x41\x20\x32\x30\x31\x31\x30\x1E\x17\x0D\x31\x31\x31\x32\x30\x36\x31\x33\x34\x39\x35\x32\x5A\x17\x0D\x33\x31\x31\x32\x30\x31\x31\x33\x34\x39\x35\x32\x5A\x30\x81\x95\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x52\x31\x44\x30\x42\x06\x03\x55\x04\x0A\x13\x3B\x48\x65\x6C\x6C\x65\x6E\x69\x63\x20\x41\x63\x61\x64\x65\x6D\x69\x63\x20\x61\x6E\x64\x20\x52\x65\x73\x65\x61\x72\x63\x68\x20\x49\x6E\x73\x74\x69\x74\x75\x74\x69\x6F\x6E\x73\x20\x43\x65\x72\x74\x2E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x31\x40\x30\x3E\x06\x03\x55\x04\x03\x13\x37\x48\x65\x6C\x6C\x65\x6E\x69\x63\x20\x41\x63\x61\x64\x65\x6D\x69\x63\x20\x61\x6E\x64\x20\x52\x65\x73\x65\x61\x72\x63\x68\x20\x49\x6E\x73\x74\x69\x74\x75\x74\x69\x6F\x6E\x73\x20\x52\x6F\x6F\x74\x43\x41\x20\x32\x30\x31\x31\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xA9\x53\x00\xE3\x2E\xA6\xF6\x8E\xFA\x60\xD8\x2D\x95\x3E\xF8\x2C\x2A\x54\x4E\xCD\xB9\x84\x61\x94\x58\x4F\x8F\x3D\x8B\xE4\x43\xF3\x75\x89\x8D\x51\xE4\xC3\x37\xD2\x8A\x88\x4D\x79\x1E\xB7\x12\xDD\x43\x78\x4A\x8A\x92\xE6\xD7\x48\xD5\x0F\xA4\x3A\x29\x44\x35\xB8\x07\xF6\x68\x1D\x55\xCD\x38\x51\xF0\x8C\x24\x31\x85\xAF\x83\xC9\x7D\xE9\x77\xAF\xED\x1A\x7B\x9D\x17\xF9\xB3\x9D\x38\x50\x0F\xA6\x5A\x79\x91\x80\xAF\x37\xAE\xA6\xD3\x31\xFB\xB5\x26\x09\x9D\x3C\x5A\xEF\x51\xC5\x2B\xDF\x96\x5D\xEB\x32\x1E\x02\xDA\x70\x49\xEC\x6E\x0C\xC8\x9A\x37\x8D\xF7\xF1\x36\x60\x4B\x26\x2C\x82\x9E\xD0\x78\xF3\x0D\x0F\x63\xA4\x51\x30\xE1\xF9\x2B\x27\x12\x07\xD8\xEA\xBD\x18\x62\x98\xB0\x59\x37\x7D\xBE\xEE\xF3\x20\x51\x42\x5A\x83\xEF\x93\xBA\x69\x15\xF1\x62\x9D\x9F\x99\x39\x82\xA1\xB7\x74\x2E\x8B\xD4\xC5\x0B\x7B\x2F\xF0\xC8\x0A\xDA\x3D\x79\x0A\x9A\x93\x1C\xA5\x28\x72\x73\x91\x43\x9A\xA7\xD1\x4D\x85\x84\xB9\xA9\x74\x8F\x14\x40\xC7\xDC\xDE\xAC\x41\x64\x6C\xB4\x19\x9B\x02\x63\x6D\x24\x64\x8F\x44\xB2\x25\xEA\xCE\x5D\x74\x0C\x63\x32\x5C\x8D\x87\xE5\x02\x03\x01\x00\x01\xA3\x81\x89\x30\x81\x86\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0B\x06\x03\x55\x1D\x0F\x04\x04\x03\x02\x01\x06\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xA6\x91\x42\xFD\x13\x61\x4A\x23\x9E\x08\xA4\x29\xE5\xD8\x13\x04\x23\xEE\x41\x25\x30\x47\x06\x03\x55\x1D\x1E\x04\x40\x30\x3E\xA0\x3C\x30\x05\x82\x03\x2E\x67\x72\x30\x05\x82\x03\x2E\x65\x75\x30\x06\x82\x04\x2E\x65\x64\x75\x30\x06\x82\x04\x2E\x6F\x72\x67\x30\x05\x81\x03\x2E\x67\x72\x30\x05\x81\x03\x2E\x65\x75\x30\x06\x81\x04\x2E\x65\x64\x75\x30\x06\x81\x04\x2E\x6F\x72\x67\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x1F\xEF\x79\x41\xE1\x7B\x6E\x3F\xB2\x8C\x86\x37\x42\x4A\x4E\x1C\x37\x1E\x8D\x66\xBA\x24\x81\xC9\x4F\x12\x0F\x21\xC0\x03\x97\x86\x25\x6D\x5D\xD3\x22\x29\xA8\x6C\xA2\x0D\xA9\xEB\x3D\x06\x5B\x99\x3A\xC7\xCC\xC3\x9A\x34\x7F\xAB\x0E\xC8\x4E\x1C\xE1\xFA\xE4\xDC\xCD\x0D\xBE\xBF\x24\xFE\x6C\xE7\x6B\xC2\x0D\xC8\x06\x9E\x4E\x8D\x61\x28\xA6\x6A\xFD\xE5\xF6\x62\xEA\x18\x3C\x4E\xA0\x53\x9D\xB2\x3A\x9C\xEB\xA5\x9C\x91\x16\xB6\x4D\x82\xE0\x0C\x05\x48\xA9\x6C\xF5\xCC\xF8\xCB\x9D\x49\xB4\xF0\x02\xA5\xFD\x70\x03\xED\x8A\x21\xA5\xAE\x13\x86\x49\xC3\x33\x73\xBE\x87\x3B\x74\x8B\x17\x45\x26\x4C\x16\x91\x83\xFE\x67\x7D\xCD\x4D\x63\x67\xFA\xF3\x03\x12\x96\x78\x06\x8D\xB1\x67\xED\x8E\x3F\xBE\x9F\x4F\x02\xF5\xB3\x09\x2F\xF3\x4C\x87\xDF\x2A\xCB\x95\x7C\x01\xCC\xAC\x36\x7A\xBF\xA2\x73\x7A\xF7\x8F\xC1\xB5\x9A\xA1\x14\xB2\x8F\x33\x9F\x0D\xEF\x22\xDC\x66\x7B\x84\xBD\x45\x17\x06\x3D\x3C\xCA\xB9\x77\x34\x8F\xCA\xEA\xCF\x3F\x31\x3E\xE3\x88\xE3\x80\x49\x25\xC8\x97\xB5\x9D\x9A\x99\x4D\xB0\x3C\xF8\x4A\x00\x9B\x64\xDD\x9F\x39\x4B\xD1\x27\xD7\xB8", + ["CN=Actalis Authentication Root CA,O=Actalis S.p.A./03358520967,L=Milan,C=IT"] = "\x30\x82\x05\xBB\x30\x82\x03\xA3\xA0\x03\x02\x01\x02\x02\x08\x57\x0A\x11\x97\x42\xC4\xE3\xCC\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x30\x6B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x49\x54\x31\x0E\x30\x0C\x06\x03\x55\x04\x07\x0C\x05\x4D\x69\x6C\x61\x6E\x31\x23\x30\x21\x06\x03\x55\x04\x0A\x0C\x1A\x41\x63\x74\x61\x6C\x69\x73\x20\x53\x2E\x70\x2E\x41\x2E\x2F\x30\x33\x33\x35\x38\x35\x32\x30\x39\x36\x37\x31\x27\x30\x25\x06\x03\x55\x04\x03\x0C\x1E\x41\x63\x74\x61\x6C\x69\x73\x20\x41\x75\x74\x68\x65\x6E\x74\x69\x63\x61\x74\x69\x6F\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x31\x31\x30\x39\x32\x32\x31\x31\x32\x32\x30\x32\x5A\x17\x0D\x33\x30\x30\x39\x32\x32\x31\x31\x32\x32\x30\x32\x5A\x30\x6B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x49\x54\x31\x0E\x30\x0C\x06\x03\x55\x04\x07\x0C\x05\x4D\x69\x6C\x61\x6E\x31\x23\x30\x21\x06\x03\x55\x04\x0A\x0C\x1A\x41\x63\x74\x61\x6C\x69\x73\x20\x53\x2E\x70\x2E\x41\x2E\x2F\x30\x33\x33\x35\x38\x35\x32\x30\x39\x36\x37\x31\x27\x30\x25\x06\x03\x55\x04\x03\x0C\x1E\x41\x63\x74\x61\x6C\x69\x73\x20\x41\x75\x74\x68\x65\x6E\x74\x69\x63\x61\x74\x69\x6F\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x02\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x02\x0F\x00\x30\x82\x02\x0A\x02\x82\x02\x01\x00\xA7\xC6\xC4\xA5\x29\xA4\x2C\xEF\xE5\x18\xC5\xB0\x50\xA3\x6F\x51\x3B\x9F\x0A\x5A\xC9\xC2\x48\x38\x0A\xC2\x1C\xA0\x18\x7F\x91\xB5\x87\xB9\x40\x3F\xDD\x1D\x68\x1F\x08\x83\xD5\x2D\x1E\x88\xA0\xF8\x8F\x56\x8F\x6D\x99\x02\x92\x90\x16\xD5\x5F\x08\x6C\x89\xD7\xE1\xAC\xBC\x20\xC2\xB1\xE0\x83\x51\x8A\x69\x4D\x00\x96\x5A\x6F\x2F\xC0\x44\x7E\xA3\x0E\xE4\x91\xCD\x58\xEE\xDC\xFB\xC7\x1E\x45\x47\xDD\x27\xB9\x08\x01\x9F\xA6\x21\x1D\xF5\x41\x2D\x2F\x4C\xFD\x28\xAD\xE0\x8A\xAD\x22\xB4\x56\x65\x8E\x86\x54\x8F\x93\x43\x29\xDE\x39\x46\x78\xA3\x30\x23\xBA\xCD\xF0\x7D\x13\x57\xC0\x5D\xD2\x83\x6B\x48\x4C\xC4\xAB\x9F\x80\x5A\x5B\x3A\xBD\xC9\xA7\x22\x3F\x80\x27\x33\x5B\x0E\xB7\x8A\x0C\x5D\x07\x37\x08\xCB\x6C\xD2\x7A\x47\x22\x44\x35\xC5\xCC\xCC\x2E\x8E\xDD\x2A\xED\xB7\x7D\x66\x0D\x5F\x61\x51\x22\x55\x1B\xE3\x46\xE3\xE3\x3D\xD0\x35\x62\x9A\xDB\xAF\x14\xC8\x5B\xA1\xCC\x89\x1B\xE1\x30\x26\xFC\xA0\x9B\x1F\x81\xA7\x47\x1F\x04\xEB\xA3\x39\x92\x06\x9F\x99\xD3\xBF\xD3\xEA\x4F\x50\x9C\x19\xFE\x96\x87\x1E\x3C\x65\xF6\xA3\x18\x24\x83\x86\x10\xE7\x54\x3E\xA8\x3A\x76\x24\x4F\x81\x21\xC5\xE3\x0F\x02\xF8\x93\x94\x47\x20\xBB\xFE\xD4\x0E\xD3\x68\xB9\xDD\xC4\x7A\x84\x82\xE3\x53\x54\x79\xDD\xDB\x9C\xD2\xF2\x07\x9B\x2E\xB6\xBC\x3E\xED\x85\x6D\xEF\x25\x11\xF2\x97\x1A\x42\x61\xF7\x4A\x97\xE8\x8B\xB1\x10\x07\xFA\x65\x81\xB2\xA2\x39\xCF\xF7\x3C\xFF\x18\xFB\xC6\xF1\x5A\x8B\x59\xE2\x02\xAC\x7B\x92\xD0\x4E\x14\x4F\x59\x45\xF6\x0C\x5E\x28\x5F\xB0\xE8\x3F\x45\xCF\xCF\xAF\x9B\x6F\xFB\x84\xD3\x77\x5A\x95\x6F\xAC\x94\x84\x9E\xEE\xBC\xC0\x4A\x8F\x4A\x93\xF8\x44\x21\xE2\x31\x45\x61\x50\x4E\x10\xD8\xE3\x35\x7C\x4C\x19\xB4\xDE\x05\xBF\xA3\x06\x9F\xC8\xB5\xCD\xE4\x1F\xD7\x17\x06\x0D\x7A\x95\x74\x55\x0D\x68\x1A\xFC\x10\x1B\x62\x64\x9D\x6D\xE0\x95\xA0\xC3\x94\x07\x57\x0D\x14\xE6\xBD\x05\xFB\xB8\x9F\xE6\xDF\x8B\xE2\xC6\xE7\x7E\x96\xF6\x53\xC5\x80\x34\x50\x28\x58\xF0\x12\x50\x71\x17\x30\xBA\xE6\x78\x63\xBC\xF4\xB2\xAD\x9B\x2B\xB2\xFE\xE1\x39\x8C\x5E\xBA\x0B\x20\x94\xDE\x7B\x83\xB8\xFF\xE3\x56\x8D\xB7\x11\xE9\x3B\x8C\xF2\xB1\xC1\x5D\x9D\xA4\x0B\x4C\x2B\xD9\xB2\x18\xF5\xB5\x9F\x4B\x02\x03\x01\x00\x01\xA3\x63\x30\x61\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x52\xD8\x88\x3A\xC8\x9F\x78\x66\xED\x89\xF3\x7B\x38\x70\x94\xC9\x02\x02\x36\xD0\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x52\xD8\x88\x3A\xC8\x9F\x78\x66\xED\x89\xF3\x7B\x38\x70\x94\xC9\x02\x02\x36\xD0\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x03\x82\x02\x01\x00\x0B\x7B\x72\x87\xC0\x60\xA6\x49\x4C\x88\x58\xE6\x1D\x88\xF7\x14\x64\x48\xA6\xD8\x58\x0A\x0E\x4F\x13\x35\xDF\x35\x1D\xD4\xED\x06\x31\xC8\x81\x3E\x6A\xD5\xDD\x3B\x1A\x32\xEE\x90\x3D\x11\xD2\x2E\xF4\x8E\xC3\x63\x2E\x23\x66\xB0\x67\xBE\x6F\xB6\xC0\x13\x39\x60\xAA\xA2\x34\x25\x93\x75\x52\xDE\xA7\x9D\xAD\x0E\x87\x89\x52\x71\x6A\x16\x3C\x19\x1D\x83\xF8\x9A\x29\x65\xBE\xF4\x3F\x9A\xD9\xF0\xF3\x5A\x87\x21\x71\x80\x4D\xCB\xE0\x38\x9B\x3F\xBB\xFA\xE0\x30\x4D\xCF\x86\xD3\x65\x10\x19\x18\xD1\x97\x02\xB1\x2B\x72\x42\x68\xAC\xA0\xBD\x4E\x5A\xDA\x18\xBF\x6B\x98\x81\xD0\xFD\x9A\xBE\x5E\x15\x48\xCD\x11\x15\xB9\xC0\x29\x5C\xB4\xE8\x88\xF7\x3E\x36\xAE\xB7\x62\xFD\x1E\x62\xDE\x70\x78\x10\x1C\x48\x5B\xDA\xBC\xA4\x38\xBA\x67\xED\x55\x3E\x5E\x57\xDF\xD4\x03\x40\x4C\x81\xA4\xD2\x4F\x63\xA7\x09\x42\x09\x14\xFC\x00\xA9\xC2\x80\x73\x4F\x2E\xC0\x40\xD9\x11\x7B\x48\xEA\x7A\x02\xC0\xD3\xEB\x28\x01\x26\x58\x74\xC1\xC0\x73\x22\x6D\x93\x95\xFD\x39\x7D\xBB\x2A\xE3\xF6\x82\xE3\x2C\x97\x5F\x4E\x1F\x91\x94\xFA\xFE\x2C\xA3\xD8\x76\x1A\xB8\x4D\xB2\x38\x4F\x9B\xFA\x1D\x48\x60\x79\x26\xE2\xF3\xFD\xA9\xD0\x9A\xE8\x70\x8F\x49\x7A\xD6\xE5\xBD\x0A\x0E\xDB\x2D\xF3\x8D\xBF\xEB\xE3\xA4\x7D\xCB\xC7\x95\x71\xE8\xDA\xA3\x7C\xC5\xC2\xF8\x74\x92\x04\x1B\x86\xAC\xA4\x22\x53\x40\xB6\xAC\xFE\x4C\x76\xCF\xFB\x94\x32\xC0\x35\x9F\x76\x3F\x6E\xE5\x90\x6E\xA0\xA6\x26\xA2\xB8\x2C\xBE\xD1\x2B\x85\xFD\xA7\x68\xC8\xBA\x01\x2B\xB1\x6C\x74\x1D\xB8\x73\x95\xE7\xEE\xB7\xC7\x25\xF0\x00\x4C\x00\xB2\x7E\xB6\x0B\x8B\x1C\xF3\xC0\x50\x9E\x25\xB9\xE0\x08\xDE\x36\x66\xFF\x37\xA5\xD1\xBB\x54\x64\x2C\xC9\x27\xB5\x4B\x92\x7E\x65\xFF\xD3\x2D\xE1\xB9\x4E\xBC\x7F\xA4\x41\x21\x90\x41\x77\xA6\x39\x1F\xEA\x9E\xE3\x9F\xD0\x66\x6F\x05\xEC\xAA\x76\x7E\xBF\x6B\x16\xA0\xEB\xB5\xC7\xFC\x92\x54\x2F\x2B\x11\x27\x25\x37\x78\x4C\x51\x6A\xB0\xF3\xCC\x58\x5D\x14\xF1\x6A\x48\x15\xFF\xC2\x07\xB6\xB1\x8D\x0F\x8E\x5C\x50\x46\xB3\x3D\xBF\x01\x98\x4F\xB2\x59\x54\x47\x3E\x34\x7B\x78\x6D\x56\x93\x2E\x73\xEA\x66\x28\x78\xCD\x1D\x14\xBF\xA0\x8F\x2F\x2E\xB8\x2E\x8E\xF2\x14\x8A\xCC\xE9\xB5\x7C\xFB\x6C\x9D\x0C\xA5\xE1\x96", + ["OU=Trustis FPS Root CA,O=Trustis Limited,C=GB"] = "\x30\x82\x03\x67\x30\x82\x02\x4F\xA0\x03\x02\x01\x02\x02\x10\x1B\x1F\xAD\xB6\x20\xF9\x24\xD3\x36\x6B\xF7\xC7\xF1\x8C\xA0\x59\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x45\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x42\x31\x18\x30\x16\x06\x03\x55\x04\x0A\x13\x0F\x54\x72\x75\x73\x74\x69\x73\x20\x4C\x69\x6D\x69\x74\x65\x64\x31\x1C\x30\x1A\x06\x03\x55\x04\x0B\x13\x13\x54\x72\x75\x73\x74\x69\x73\x20\x46\x50\x53\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x30\x33\x31\x32\x32\x33\x31\x32\x31\x34\x30\x36\x5A\x17\x0D\x32\x34\x30\x31\x32\x31\x31\x31\x33\x36\x35\x34\x5A\x30\x45\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x47\x42\x31\x18\x30\x16\x06\x03\x55\x04\x0A\x13\x0F\x54\x72\x75\x73\x74\x69\x73\x20\x4C\x69\x6D\x69\x74\x65\x64\x31\x1C\x30\x1A\x06\x03\x55\x04\x0B\x13\x13\x54\x72\x75\x73\x74\x69\x73\x20\x46\x50\x53\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xC5\x50\x7B\x9E\x3B\x35\xD0\xDF\xC4\x8C\xCD\x8E\x9B\xED\xA3\xC0\x36\x99\xF4\x42\xEA\xA7\x3E\x80\x83\x0F\xA6\xA7\x59\x87\xC9\x90\x45\x43\x7E\x00\xEA\x86\x79\x2A\x03\xBD\x3D\x37\x99\x89\x66\xB7\xE5\x8A\x56\x86\x93\x9C\x68\x4B\x68\x04\x8C\x93\x93\x02\x3E\x30\xD2\x37\x3A\x22\x61\x89\x1C\x85\x4E\x7D\x8F\xD5\xAF\x7B\x35\xF6\x7E\x28\x47\x89\x31\xDC\x0E\x79\x64\x1F\x99\xD2\x5B\xBA\xFE\x7F\x60\xBF\xAD\xEB\xE7\x3C\x38\x29\x6A\x2F\xE5\x91\x0B\x55\xFF\xEC\x6F\x58\xD5\x2D\xC9\xDE\x4C\x66\x71\x8F\x0C\xD7\x04\xDA\x07\xE6\x1E\x18\xE3\xBD\x29\x02\xA8\xFA\x1C\xE1\x5B\xB9\x83\xA8\x41\x48\xBC\x1A\x71\x8D\xE7\x62\xE5\x2D\xB2\xEB\xDF\x7C\xCF\xDB\xAB\x5A\xCA\x31\xF1\x4C\x22\xF3\x05\x13\xF7\x82\xF9\x73\x79\x0C\xBE\xD7\x4B\x1C\xC0\xD1\x15\x3C\x93\x41\x64\xD1\xE6\xBE\x23\x17\x22\x00\x89\x5E\x1F\x6B\xA5\xAC\x6E\xA7\x4B\x8C\xED\xA3\x72\xE6\xAF\x63\x4D\x2F\x85\xD2\x14\x35\x9A\x2E\x4E\x8C\xEA\x32\x98\x28\x86\xA1\x91\x09\x41\x3A\xB4\xE1\xE3\xF2\xFA\xF0\xC9\x0A\xA2\x41\xDD\xA9\xE3\x03\xC7\x88\x15\x3B\x1C\xD4\x1A\x94\xD7\x9F\x64\x59\x12\x6D\x02\x03\x01\x00\x01\xA3\x53\x30\x51\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\xBA\xFA\x71\x25\x79\x8B\x57\x41\x25\x21\x86\x0B\x71\xEB\xB2\x64\x0E\x8B\x21\x67\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xBA\xFA\x71\x25\x79\x8B\x57\x41\x25\x21\x86\x0B\x71\xEB\xB2\x64\x0E\x8B\x21\x67\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\x7E\x58\xFF\xFD\x35\x19\x7D\x9C\x18\x4F\x9E\xB0\x2B\xBC\x8E\x8C\x14\xFF\x2C\xA0\xDA\x47\x5B\xC3\xEF\x81\x2D\xAF\x05\xEA\x74\x48\x5B\xF3\x3E\x4E\x07\xC7\x6D\xC5\xB3\x93\xCF\x22\x35\x5C\xB6\x3F\x75\x27\x5F\x09\x96\xCD\xA0\xFE\xBE\x40\x0C\x5C\x12\x55\xF8\x93\x82\xCA\x29\xE9\x5E\x3F\x56\x57\x8B\x38\x36\xF7\x45\x1A\x4C\x28\xCD\x9E\x41\xB8\xED\x56\x4C\x84\xA4\x40\xC8\xB8\xB0\xA5\x2B\x69\x70\x04\x6A\xC3\xF8\xD4\x12\x32\xF9\x0E\xC3\xB1\xDC\x32\x84\x44\x2C\x6F\xCB\x46\x0F\xEA\x66\x41\x0F\x4F\xF1\x58\xA5\xA6\x0D\x0D\x0F\x61\xDE\xA5\x9E\x5D\x7D\x65\xA1\x3C\x17\xE7\xA8\x55\x4E\xEF\xA0\xC7\xED\xC6\x44\x7F\x54\xF5\xA3\xE0\x8F\xF0\x7C\x55\x22\x8F\x29\xB6\x81\xA3\xE1\x6D\x4E\x2C\x1B\x80\x67\xEC\xAD\x20\x9F\x0C\x62\x61\xD5\x97\xFF\x43\xED\x2D\xC1\xDA\x5D\x29\x2A\x85\x3F\xAC\x65\xEE\x86\x0F\x05\x8D\x90\x5F\xDF\xEE\x9F\xF4\xBF\xEE\x1D\xFB\x98\xE4\x7F\x90\x2B\x84\x78\x10\x0E\x6C\x49\x53\xEF\x15\x5B\x65\x46\x4A\x5D\xAF\xBA\xFB\x3A\x72\x1D\xCD\xF6\x25\x88\x1E\x97\xCC\x21\x9C\x29\x01\x0D\x65\xEB\x57\xD9\xF3\x57\x96\xBB\x48\xCD\x81", + ["CN=StartCom Certification Authority G2,O=StartCom Ltd.,C=IL"] = "\x30\x82\x05\x63\x30\x82\x03\x4B\xA0\x03\x02\x01\x02\x02\x01\x3B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x30\x53\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x49\x4C\x31\x16\x30\x14\x06\x03\x55\x04\x0A\x13\x0D\x53\x74\x61\x72\x74\x43\x6F\x6D\x20\x4C\x74\x64\x2E\x31\x2C\x30\x2A\x06\x03\x55\x04\x03\x13\x23\x53\x74\x61\x72\x74\x43\x6F\x6D\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x47\x32\x30\x1E\x17\x0D\x31\x30\x30\x31\x30\x31\x30\x31\x30\x30\x30\x31\x5A\x17\x0D\x33\x39\x31\x32\x33\x31\x32\x33\x35\x39\x30\x31\x5A\x30\x53\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x49\x4C\x31\x16\x30\x14\x06\x03\x55\x04\x0A\x13\x0D\x53\x74\x61\x72\x74\x43\x6F\x6D\x20\x4C\x74\x64\x2E\x31\x2C\x30\x2A\x06\x03\x55\x04\x03\x13\x23\x53\x74\x61\x72\x74\x43\x6F\x6D\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x20\x47\x32\x30\x82\x02\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x02\x0F\x00\x30\x82\x02\x0A\x02\x82\x02\x01\x00\xB6\x89\x36\x5B\x07\xB7\x20\x36\xBD\x82\xBB\xE1\x16\x20\x03\x95\x7A\xAF\x0E\xA3\x55\xC9\x25\x99\x4A\xC5\xD0\x56\x41\x87\x90\x4D\x21\x60\xA4\x14\x87\x3B\xCD\xFD\xB2\x3E\xB4\x67\x03\x6A\xED\xE1\x0F\x4B\xC0\x91\x85\x70\x45\xE0\x42\x9E\xDE\x29\x23\xD4\x01\x0D\xA0\x10\x79\xB8\xDB\x03\xBD\xF3\xA9\x2F\xD1\xC6\xE0\x0F\xCB\x9E\x8A\x14\x0A\xB8\xBD\xF6\x56\x62\xF1\xC5\x72\xB6\x32\x25\xD9\xB2\xF3\xBD\x65\xC5\x0D\x2C\x6E\xD5\x92\x6F\x18\x8B\x00\x41\x14\x82\x6F\x40\x20\x26\x7A\x28\x0F\xF5\x1E\x7F\x27\xF7\x94\xB1\x37\x3D\xB7\xC7\x91\xF7\xE2\x01\xEC\xFD\x94\x89\xE1\xCC\x6E\xD3\x36\xD6\x0A\x19\x79\xAE\xD7\x34\x82\x65\xFF\x7C\x42\xBB\xB6\xDD\x0B\xA6\x34\xAF\x4B\x60\xFE\x7F\x43\x49\x06\x8B\x8C\x43\xB8\x56\xF2\xD9\x7F\x21\x43\x17\xEA\xA7\x48\x95\x01\x75\x75\xEA\x2B\xA5\x43\x95\xEA\x15\x84\x9D\x08\x8D\x26\x6E\x55\x9B\xAB\xDC\xD2\x39\xD2\x31\x1D\x60\xE2\xAC\xCC\x56\x45\x24\xF5\x1C\x54\xAB\xEE\x86\xDD\x96\x32\x85\xF8\x4C\x4F\xE8\x95\x76\xB6\x05\xDD\x36\x23\x67\xBC\xFF\x15\xE2\xCA\x3B\xE6\xA6\xEC\x3B\xEC\x26\x11\x34\x48\x8D\xF6\x80\x2B\x1A\x23\x02\xEB\x8A\x1C\x3A\x76\x2A\x7B\x56\x16\x1C\x72\x2A\xB3\xAA\xE3\x60\xA5\x00\x9F\x04\x9B\xE2\x6F\x1E\x14\x58\x5B\xA5\x6C\x8B\x58\x3C\xC3\xBA\x4E\x3A\x5C\xF7\xE1\x96\x2B\x3E\xEF\x07\xBC\xA4\xE5\x5D\xCC\x4D\x9F\x0D\xE1\xDC\xAA\xBB\xE1\x6E\x1A\xEC\x8F\xE1\xB6\x4C\x4D\x79\x72\x5D\x17\x35\x0B\x1D\xD7\xC1\x47\xDA\x96\x24\xE0\xD0\x72\xA8\x5A\x5F\x66\x2D\x10\xDC\x2F\x2A\x13\xAE\x26\xFE\x0A\x1C\x19\xCC\xD0\x3E\x0B\x9C\xC8\x09\x2E\xF9\x5B\x96\x7A\x47\x9C\xE9\x7A\xF3\x05\x50\x74\x95\x73\x9E\x30\x09\xF3\x97\x82\x5E\xE6\x8F\x39\x08\x1E\x59\xE5\x35\x14\x42\x13\xFF\x00\x9C\xF7\xBE\xAA\x50\xCF\xE2\x51\x48\xD7\xB8\x6F\xAF\xF8\x4E\x7E\x33\x98\x92\x14\x62\x3A\x75\x63\xCF\x7B\xFA\xDE\x82\x3B\xA9\xBB\x39\xE2\xC4\xBD\x2C\x00\x0E\xC8\x17\xAC\x13\xEF\x4D\x25\x8E\xD8\xB3\x90\x2F\xA9\xDA\x29\x7D\x1D\xAF\x74\x3A\xB2\x27\xC0\xC1\x1E\x3E\x75\xA3\x16\xA9\xAF\x7A\x22\x5D\x9F\x13\x1A\xCF\xA7\xA0\xEB\xE3\x86\x0A\xD3\xFD\xE6\x96\x95\xD7\x23\xC8\x37\xDD\xC4\x7C\xAA\x36\xAC\x98\x1A\x12\xB1\xE0\x4E\xE8\xB1\x3B\xF5\xD6\x6F\xF1\x30\xD7\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x4B\xC5\xB4\x40\x6B\xAD\x1C\xB3\xA5\x1C\x65\x6E\x46\x36\x89\x87\x05\x0C\x0E\xB6\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x03\x82\x02\x01\x00\x73\x57\x3F\x2C\xD5\x95\x32\x7E\x37\xDB\x96\x92\xEB\x19\x5E\x7E\x53\xE7\x41\xEC\x11\xB6\x47\xEF\xB5\xDE\xED\x74\x5C\xC5\xF1\x8E\x49\xE0\xFC\x6E\x99\x13\xCD\x9F\x8A\xDA\xCD\x3A\x0A\xD8\x3A\x5A\x09\x3F\x5F\x34\xD0\x2F\x03\xD2\x66\x1D\x1A\xBD\x9C\x90\x37\xC8\x0C\x8E\x07\x5A\x94\x45\x46\x2A\xE6\xBE\x7A\xDA\xA1\xA9\xA4\x69\x12\x92\xB0\x7D\x36\xD4\x44\x87\xD7\x51\xF1\x29\x63\xD6\x75\xCD\x16\xE4\x27\x89\x1D\xF8\xC2\x32\x48\xFD\xDB\x99\xD0\x8F\x5F\x54\x74\xCC\xAC\x67\x34\x11\x62\xD9\x0C\x0A\x37\x87\xD1\xA3\x17\x48\x8E\xD2\x17\x1D\xF6\xD7\xFD\xDB\x65\xEB\xFD\xA8\xD4\xF5\xD6\x4F\xA4\x5B\x75\xE8\xC5\xD2\x60\xB2\xDB\x09\x7E\x25\x8B\x7B\xBA\x52\x92\x9E\x3E\xE8\xC5\x77\xA1\x3C\xE0\x4A\x73\x6B\x61\xCF\x86\xDC\x43\xFF\xFF\x21\xFE\x23\x5D\x24\x4A\xF5\xD3\x6D\x0F\x62\x04\x05\x57\x82\xDA\x6E\xA4\x33\x25\x79\x4B\x2E\x54\x19\x8B\xCC\x2C\x3D\x30\xE9\xD1\x06\xFF\xE8\x32\x46\xBE\xB5\x33\x76\x77\xA8\x01\x5D\x96\xC1\xC1\xD5\xBE\xAE\x25\xC0\xC9\x1E\x0A\x09\x20\x88\xA1\x0E\xC9\xF3\x6F\x4D\x82\x54\x00\x20\xA7\xD2\x8F\xE4\x39\x54\x17\x2E\x8D\x1E\xB8\x1B\xBB\x1B\xBD\x9A\x4E\x3B\x10\x34\xDC\x9C\x88\x53\xEF\xA2\x31\x5B\x58\x4F\x91\x62\xC8\xC2\x9A\x9A\xCD\x15\x5D\x38\xA9\xD6\xBE\xF8\x13\xB5\x9F\x12\x69\xF2\x50\x62\xAC\xFB\x17\x37\xF4\xEE\xB8\x75\x67\x60\x10\xFB\x83\x50\xF9\x44\xB5\x75\x9C\x40\x17\xB2\xFE\xFD\x79\x5D\x6E\x58\x58\x5F\x30\xFC\x00\xAE\xAF\x33\xC1\x0E\x4E\x6C\xBA\xA7\xA6\xA1\x7F\x32\xDB\x38\xE0\xB1\x72\x17\x0A\x2B\x91\xEC\x6A\x63\x26\xED\x89\xD4\x78\xCC\x74\x1E\x05\xF8\x6B\xFE\x8C\x6A\x76\x39\x29\xAE\x65\x23\x12\x95\x08\x22\x1C\x97\xCE\x5B\x06\xEE\x0C\xE2\xBB\xBC\x1F\x44\x93\xF6\xD8\x38\x45\x05\x21\xED\xE4\xAD\xAB\x12\xB6\x03\xA4\x42\x2E\x2D\xC4\x09\x3A\x03\x67\x69\x84\x9A\xE1\x59\x90\x8A\x28\x85\xD5\x5D\x74\xB1\xD1\x0E\x20\x58\x9B\x13\xA5\xB0\x63\xA6\xED\x7B\x47\xFD\x45\x55\x30\xA4\xEE\x9A\xD4\xE6\xE2\x87\xEF\x98\xC9\x32\x82\x11\x29\x22\xBC\x00\x0A\x31\x5E\x2D\x0F\xC0\x8E\xE9\x6B\xB2\x8F\x2E\x06\xD8\xD1\x91\xC7\xC6\x12\xF4\x4C\xFD\x30\x17\xC3\xC1\xDA\x38\x5B\xE3\xA9\xEA\xE6\xA1\xBA\x79\xEF\x73\xD8\xB6\x53\x57\x2D\xF6\xD0\xE1\xD7\x48", + ["CN=Buypass Class 2 Root CA,O=Buypass AS-983163327,C=NO"] = "\x30\x82\x05\x59\x30\x82\x03\x41\xA0\x03\x02\x01\x02\x02\x01\x02\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x30\x4E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4E\x4F\x31\x1D\x30\x1B\x06\x03\x55\x04\x0A\x0C\x14\x42\x75\x79\x70\x61\x73\x73\x20\x41\x53\x2D\x39\x38\x33\x31\x36\x33\x33\x32\x37\x31\x20\x30\x1E\x06\x03\x55\x04\x03\x0C\x17\x42\x75\x79\x70\x61\x73\x73\x20\x43\x6C\x61\x73\x73\x20\x32\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x31\x30\x31\x30\x32\x36\x30\x38\x33\x38\x30\x33\x5A\x17\x0D\x34\x30\x31\x30\x32\x36\x30\x38\x33\x38\x30\x33\x5A\x30\x4E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4E\x4F\x31\x1D\x30\x1B\x06\x03\x55\x04\x0A\x0C\x14\x42\x75\x79\x70\x61\x73\x73\x20\x41\x53\x2D\x39\x38\x33\x31\x36\x33\x33\x32\x37\x31\x20\x30\x1E\x06\x03\x55\x04\x03\x0C\x17\x42\x75\x79\x70\x61\x73\x73\x20\x43\x6C\x61\x73\x73\x20\x32\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x02\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x02\x0F\x00\x30\x82\x02\x0A\x02\x82\x02\x01\x00\xD7\xC7\x5E\xF7\xC1\x07\xD4\x77\xFB\x43\x21\xF4\xF4\xF5\x69\xE4\xEE\x32\x01\xDB\xA3\x86\x1F\xE4\x59\x0D\xBA\xE7\x75\x83\x52\xEB\xEA\x1C\x61\x15\x48\xBB\x1D\x07\xCA\x8C\xAE\xB0\xDC\x96\x9D\xEA\xC3\x60\x92\x86\x82\x28\x73\x9C\x56\x06\xFF\x4B\x64\xF0\x0C\x2A\x37\x49\xB5\xE5\xCF\x0C\x7C\xEE\xF1\x4A\xBB\x73\x30\x65\xF3\xD5\x2F\x83\xB6\x7E\xE3\xE7\xF5\x9E\xAB\x60\xF9\xD3\xF1\x9D\x92\x74\x8A\xE4\x1C\x96\xAC\x5B\x80\xE9\xB5\xF4\x31\x87\xA3\x51\xFC\xC7\x7E\xA1\x6F\x8E\x53\x77\xD4\x97\xC1\x55\x33\x92\x3E\x18\x2F\x75\xD4\xAD\x86\x49\xCB\x95\xAF\x54\x06\x6C\xD8\x06\x13\x8D\x5B\xFF\xE1\x26\x19\x59\xC0\x24\xBA\x81\x71\x79\x90\x44\x50\x68\x24\x94\x5F\xB8\xB3\x11\xF1\x29\x41\x61\xA3\x41\xCB\x23\x36\xD5\xC1\xF1\x32\x50\x10\x4E\x7F\xF4\x86\x93\xEC\x84\xD3\x8E\xBC\x4B\xBF\x5C\x01\x4E\x07\x3D\xDC\x14\x8A\x94\x0A\xA4\xEA\x73\xFB\x0B\x51\xE8\x13\x07\x18\xFA\x0E\xF1\x2B\xD1\x54\x15\x7D\x3C\xE1\xF7\xB4\x19\x42\x67\x62\x5E\x77\xE0\xA2\x55\xEC\xB6\xD9\x69\x17\xD5\x3A\xAF\x44\xED\x4A\xC5\x9E\xE4\x7A\x27\x7C\xE5\x75\xD7\xAA\xCB\x25\xE7\xDF\x6B\x0A\xDB\x0F\x4D\x93\x4E\xA8\xA0\xCD\x7B\x2E\xF2\x59\x01\x6A\xB7\x0D\xB8\x07\x81\x7E\x8B\x38\x1B\x38\xE6\x0A\x57\x99\x3D\xEE\x21\xE8\xA3\xF5\x0C\x16\xDD\x8B\xEC\x34\x8E\x9C\x2A\x1C\x00\x15\x17\x8D\x68\x83\xD2\x70\x9F\x18\x08\xCD\x11\x68\xD5\xC9\x6B\x52\xCD\xC4\x46\x8F\xDC\xB5\xF3\xD8\x57\x73\x1E\xE9\x94\x39\x04\xBF\xD3\xDE\x38\xDE\xB4\x53\xEC\x69\x1C\xA2\x7E\xC4\x8F\xE4\x1B\x70\xAD\xF2\xA2\xF9\xFB\xF7\x16\x64\x66\x69\x9F\x49\x51\xA2\xE2\x15\x18\x67\x06\x4A\x7F\xD5\x6C\xB5\x4D\xB3\x33\xE0\x61\xEB\x5D\xBE\xE9\x98\x0F\x32\xD7\x1D\x4B\x3C\x2E\x5A\x01\x52\x91\x09\xF2\xDF\xEA\x8D\xD8\x06\x40\x63\xAA\x11\xE4\xFE\xC3\x37\x9E\x14\x52\x3F\xF4\xE2\xCC\xF2\x61\x93\xD1\xFD\x67\x6B\xD7\x52\xAE\xBF\x68\xAB\x40\x43\xA0\x57\x35\x53\x78\xF0\x53\xF8\x61\x42\x07\x64\xC6\xD7\x6F\x9B\x4C\x38\x0D\x63\xAC\x62\xAF\x36\x8B\xA2\x73\x0A\x0D\xF5\x21\xBD\x74\xAA\x4D\xEA\x72\x03\x49\xDB\xC7\x5F\x1D\x62\x63\xC7\xFD\xDD\x91\xEC\x33\xEE\xF5\x6D\xB4\x6E\x30\x68\xDE\xC8\xD6\x26\xB0\x75\x5E\x7B\xB4\x07\x20\x98\xA1\x76\x32\xB8\x4D\x6C\x4F\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xC9\x80\x77\xE0\x62\x92\x82\xF5\x46\x9C\xF3\xBA\xF7\x4C\xC3\xDE\xB8\xA3\xAD\x39\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x03\x82\x02\x01\x00\x53\x5F\x21\xF5\xBA\xB0\x3A\x52\x39\x2C\x92\xB0\x6C\x00\xC9\xEF\xCE\x20\xEF\x06\xF2\x96\x9E\xE9\xA4\x74\x7F\x7A\x16\xFC\xB7\xF5\xB6\xFB\x15\x1B\x3F\xAB\xA6\xC0\x72\x5D\x10\xB1\x71\xEE\xBC\x4F\xE3\xAD\xAC\x03\x6D\x2E\x71\x2E\xAF\xC4\xE3\xAD\xA3\xBD\x0C\x11\xA7\xB4\xFF\x4A\xB2\x7B\x10\x10\x1F\xA7\x57\x41\xB2\xC0\xAE\xF4\x2C\x59\xD6\x47\x10\x88\xF3\x21\x51\x29\x30\xCA\x60\x86\xAF\x46\xAB\x1D\xED\x3A\x5B\xB0\x94\xDE\x44\xE3\x41\x08\xA2\xC1\xEC\x1D\xD6\xFD\x4F\xB6\xD6\x47\xD0\x14\x0B\xCA\xE6\xCA\xB5\x7B\x77\x7E\x41\x1F\x5E\x83\xC7\xB6\x8C\x39\x96\xB0\x3F\x96\x81\x41\x6F\x60\x90\xE2\xE8\xF9\xFB\x22\x71\xD9\x7D\xB3\x3D\x46\xBF\xB4\x84\xAF\x90\x1C\x0F\x8F\x12\x6A\xAF\xEF\xEE\x1E\x7A\xAE\x02\x4A\x8A\x17\x2B\x76\xFE\xAC\x54\x89\x24\x2C\x4F\x3F\xB6\xB2\xA7\x4E\x8C\xA8\x91\x97\xFB\x29\xC6\x7B\x5C\x2D\xB9\xCB\x66\xB6\xB7\xA8\x5B\x12\x51\x85\xB5\x09\x7E\x62\x78\x70\xFE\xA9\x6A\x60\xB6\x1D\x0E\x79\x0C\xFD\xCA\xEA\x24\x80\x72\xC3\x97\x3F\xF2\x77\xAB\x43\x22\x0A\xC7\xEB\xB6\x0C\x84\x82\x2C\x80\x6B\x41\x8A\x08\xC0\xEB\xA5\x6B\xDF\x99\x12\xCB\x8A\xD5\x5E\x80\x0C\x91\xE0\x26\x08\x36\x48\xC5\xFA\x38\x11\x35\xFF\x25\x83\x2D\xF2\x7A\xBF\xDA\xFD\x8E\xFE\xA5\xCB\x45\x2C\x1F\xC4\x88\x53\xAE\x77\x0E\xD9\x9A\x76\xC5\x8E\x2C\x1D\xA3\xBA\xD5\xEC\x32\xAE\xC0\xAA\xAC\xF7\xD1\x7A\x4D\xEB\xD4\x07\xE2\x48\xF7\x22\x8E\xB0\xA4\x9F\x6A\xCE\x8E\xB2\xB2\x60\xF4\xA3\x22\xD0\x23\xEB\x94\x5A\x7A\x69\xDD\x0F\xBF\x40\x57\xAC\x6B\x59\x50\xD9\xA3\x99\xE1\x6E\xFE\x8D\x01\x79\x27\x23\x15\xDE\x92\x9D\x7B\x09\x4D\x5A\xE7\x4B\x48\x30\x5A\x18\xE6\x0A\x6D\xE6\x8F\xE0\xD2\xBB\xE6\xDF\x7C\x6E\x21\x82\xC1\x68\x39\x4D\xB4\x98\x58\x66\x62\xCC\x4A\x90\x5E\xC3\xFA\x27\x04\xB1\x79\x15\x74\x99\xCC\xBE\xAD\x20\xDE\x26\x60\x1C\xEB\x56\x51\xA6\xA3\xEA\xE4\xA3\x3F\xA7\xFF\x61\xDC\xF1\x5A\x4D\x6C\x32\x23\x43\xEE\xAC\xA8\xEE\xEE\x4A\x12\x09\x3C\x5D\x71\xC2\xBE\x79\xFA\xC2\x87\x68\x1D\x0B\xFD\x5C\x69\xCC\x06\xD0\x9A\x7D\x54\x99\x2A\xC9\x39\x1A\x19\xAF\x4B\x2A\x43\xF3\x63\x5D\x5A\x58\xE2\x2F\xE3\x1D\xE4\xA9\xD6\xD0\x0A\xD0\x9E\xBF\xD7\x81\x09\xF1\xC9\xC7\x26\x0D\xAC\x98\x16\x56\xA0", + ["CN=Buypass Class 3 Root CA,O=Buypass AS-983163327,C=NO"] = "\x30\x82\x05\x59\x30\x82\x03\x41\xA0\x03\x02\x01\x02\x02\x01\x02\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x30\x4E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4E\x4F\x31\x1D\x30\x1B\x06\x03\x55\x04\x0A\x0C\x14\x42\x75\x79\x70\x61\x73\x73\x20\x41\x53\x2D\x39\x38\x33\x31\x36\x33\x33\x32\x37\x31\x20\x30\x1E\x06\x03\x55\x04\x03\x0C\x17\x42\x75\x79\x70\x61\x73\x73\x20\x43\x6C\x61\x73\x73\x20\x33\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x31\x30\x31\x30\x32\x36\x30\x38\x32\x38\x35\x38\x5A\x17\x0D\x34\x30\x31\x30\x32\x36\x30\x38\x32\x38\x35\x38\x5A\x30\x4E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x4E\x4F\x31\x1D\x30\x1B\x06\x03\x55\x04\x0A\x0C\x14\x42\x75\x79\x70\x61\x73\x73\x20\x41\x53\x2D\x39\x38\x33\x31\x36\x33\x33\x32\x37\x31\x20\x30\x1E\x06\x03\x55\x04\x03\x0C\x17\x42\x75\x79\x70\x61\x73\x73\x20\x43\x6C\x61\x73\x73\x20\x33\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x02\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x02\x0F\x00\x30\x82\x02\x0A\x02\x82\x02\x01\x00\xA5\xDA\x0A\x95\x16\x50\xE3\x95\xF2\x5E\x9D\x76\x31\x06\x32\x7A\x9B\xF1\x10\x76\xB8\x00\x9A\xB5\x52\x36\xCD\x24\x47\xB0\x9F\x18\x64\xBC\x9A\xF6\xFA\xD5\x79\xD8\x90\x62\x4C\x22\x2F\xDE\x38\x3D\xD6\xE0\xA8\xE9\x1C\x2C\xDB\x78\x11\xE9\x8E\x68\x51\x15\x72\xC7\xF3\x33\x87\xE4\xA0\x5D\x0B\x5C\xE0\x57\x07\x2A\x30\xF5\xCD\xC4\x37\x77\x28\x4D\x18\x91\xE6\xBF\xD5\x52\xFD\x71\x2D\x70\x3E\xE7\xC6\xC4\x8A\xE3\xF0\x28\x0B\xF4\x76\x98\xA1\x8B\x87\x55\xB2\x3A\x13\xFC\xB7\x3E\x27\x37\x8E\x22\xE3\xA8\x4F\x2A\xEF\x60\xBB\x3D\xB7\x39\xC3\x0E\x01\x47\x99\x5D\x12\x4F\xDB\x43\xFA\x57\xA1\xED\xF9\x9D\xBE\x11\x47\x26\x5B\x13\x98\xAB\x5D\x16\x8A\xB0\x37\x1C\x57\x9D\x45\xFF\x88\x96\x36\xBF\xBB\xCA\x07\x7B\x6F\x87\x63\xD7\xD0\x32\x6A\xD6\x5D\x6C\x0C\xF1\xB3\x6E\x39\xE2\x6B\x31\x2E\x39\x00\x27\x14\xDE\x38\xC0\xEC\x19\x66\x86\x12\xE8\x9D\x72\x16\x13\x64\x52\xC7\xA9\x37\x1C\xFD\x82\x30\xED\x84\x18\x1D\xF4\xAE\x5C\xFF\x70\x13\x00\xEB\xB1\xF5\x33\x7A\x4B\xD6\x55\xF8\x05\x8D\x4B\x69\xB0\xF5\xB3\x28\x36\x5C\x14\xC4\x51\x73\x4D\x6B\x0B\xF1\x34\x07\xDB\x17\x39\xD7\xDC\x28\x7B\x6B\xF5\x9F\xF3\x2E\xC1\x4F\x17\x2A\x10\xF3\xCC\xCA\xE8\xEB\xFD\x6B\xAB\x2E\x9A\x9F\x2D\x82\x6E\x04\xD4\x52\x01\x93\x2D\x3D\x86\xFC\x7E\xFC\xDF\xEF\x42\x1D\xA6\x6B\xEF\xB9\x20\xC6\xF7\xBD\xA0\xA7\x95\xFD\xA7\xE6\x89\x24\xD8\xCC\x8C\x34\x6C\xE2\x23\x2F\xD9\x12\x1A\x21\xB9\x55\x91\x6F\x0B\x91\x79\x19\x0C\xAD\x40\x88\x0B\x70\xE2\x7A\xD2\x0E\xD8\x68\x48\xBB\x82\x13\x39\x10\x58\xE9\xD8\x2A\x07\xC6\x12\xDB\x58\xDB\xD2\x3B\x55\x10\x47\x05\x15\x67\x62\x7E\x18\x63\xA6\x46\x3F\x09\x0E\x54\x32\x5E\xBF\x0D\x62\x7A\x27\xEF\x80\xE8\xDB\xD9\x4B\x06\x5A\x37\x5A\x25\xD0\x08\x12\x77\xD4\x6F\x09\x50\x97\x3D\xC8\x1D\xC3\xDF\x8C\x45\x30\x56\xC6\xD3\x64\xAB\x66\xF3\xC0\x5E\x96\x9C\xC3\xC4\xEF\xC3\x7C\x6B\x8B\x3A\x79\x7F\xB3\x49\xCF\x3D\xE2\x89\x9F\xA0\x30\x4B\x85\xB9\x9C\x94\x24\x79\x8F\x7D\x6B\xA9\x45\x68\x0F\x2B\xD0\xF1\xDA\x1C\xCB\x69\xB8\xCA\x49\x62\x6D\xC8\xD0\x63\x62\xDD\x60\x0F\x58\xAA\x8F\xA1\xBC\x05\xA5\x66\xA2\xCF\x1B\x76\xB2\x84\x64\xB1\x4C\x39\x52\xC0\x30\xBA\xF0\x8C\x4B\x02\xB0\xB6\xB7\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x47\xB8\xCD\xFF\xE5\x6F\xEE\xF8\xB2\xEC\x2F\x4E\x0E\xF9\x25\xB0\x8E\x3C\x6B\xC3\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x03\x82\x02\x01\x00\x00\x20\x23\x41\x35\x04\x90\xC2\x40\x62\x60\xEF\xE2\x35\x4C\xD7\x3F\xAC\xE2\x34\x90\xB8\xA1\x6F\x76\xFA\x16\x16\xA4\x48\x37\x2C\xE9\x90\xC2\xF2\x3C\xF8\x0A\x9F\xD8\x81\xE5\xBB\x5B\xDA\x25\x2C\xA4\xA7\x55\x71\x24\x32\xF6\xC8\x0B\xF2\xBC\x6A\xF8\x93\xAC\xB2\x07\xC2\x5F\x9F\xDB\xCC\xC8\x8A\xAA\xBE\x6A\x6F\xE1\x49\x10\xCC\x31\xD7\x80\xBB\xBB\xC8\xD8\xA2\x0E\x64\x57\xEA\xA2\xF5\xC2\xA9\x31\x15\xD2\x20\x6A\xEC\xFC\x22\x01\x28\xCF\x86\xB8\x80\x1E\xA9\xCC\x11\xA5\x3C\xF2\x16\xB3\x47\x9D\xFC\xD2\x80\x21\xC4\xCB\xD0\x47\x70\x41\xA1\xCA\x83\x19\x08\x2C\x6D\xF2\x5D\x77\x9C\x8A\x14\x13\xD4\x36\x1C\x92\xF0\xE5\x06\x37\xDC\xA6\xE6\x90\x9B\x38\x8F\x5C\x6B\x1B\x46\x86\x43\x42\x5F\x3E\x01\x07\x53\x54\x5D\x65\x7D\xF7\x8A\x73\xA1\x9A\x54\x5A\x1F\x29\x43\x14\x27\xC2\x85\x0F\xB5\x88\x7B\x1A\x3B\x94\xB7\x1D\x60\xA7\xB5\x9C\xE7\x29\x69\x57\x5A\x9B\x93\x7A\x43\x30\x1B\x03\xD7\x62\xC8\x40\xA6\xAA\xFC\x64\xE4\x4A\xD7\x91\x53\x01\xA8\x20\x88\x6E\x9C\x5F\x44\xB9\xCB\x60\x81\x34\xEC\x6F\xD3\x7D\xDA\x48\x5F\xEB\xB4\x90\xBC\x2D\xA9\x1C\x0B\xAC\x1C\xD5\xA2\x68\x20\x80\x04\xD6\xFC\xB1\x8F\x2F\xBB\x4A\x31\x0D\x4A\x86\x1C\xEB\xE2\x36\x29\x26\xF5\xDA\xD8\xC4\xF2\x75\x61\xCF\x7E\xAE\x76\x63\x4A\x7A\x40\x65\x93\x87\xF8\x1E\x80\x8C\x86\xE5\x86\xD6\x8F\x0E\xFC\x53\x2C\x60\xE8\x16\x61\x1A\xA2\x3E\x43\x7B\xCD\x39\x60\x54\x6A\xF5\xF2\x89\x26\x01\x68\x83\x48\xA2\x33\xE8\xC9\x04\x91\xB2\x11\x34\x11\x3E\xEA\xD0\x43\x19\x1F\x03\x93\x90\x0C\xFF\x51\x3D\x57\xF4\x41\x6E\xE1\xCB\xA0\xBE\xEB\xC9\x63\xCD\x6D\xCC\xE4\xF8\x36\xAA\x68\x9D\xED\xBD\x5D\x97\x70\x44\x0D\xB6\x0E\x35\xDC\xE1\x0C\x5D\xBB\xA0\x51\x94\xCB\x7E\x16\xEB\x11\x2F\xA3\x92\x45\xC8\x4C\x71\xD9\xBC\xC9\x99\x52\x57\x46\x2F\x50\xCF\xBD\x35\x69\xF4\x3D\x15\xCE\x06\xA5\x2C\x0F\x3E\xF6\x81\xBA\x94\xBB\xC3\xBB\xBF\x65\x78\xD2\x86\x79\xFF\x49\x3B\x1A\x83\x0C\xF0\xDE\x78\xEC\xC8\xF2\x4D\x4C\x1A\xDE\x82\x29\xF8\xC1\x5A\xDA\xED\xEE\xE6\x27\x5E\xE8\x45\xD0\x9D\x1C\x51\xA8\x68\xAB\x44\xE3\xD0\x8B\x6A\xE3\xF8\x3B\xBB\xDC\x4D\xD7\x64\xF2\x51\xBE\xE6\xAA\xAB\x5A\xE9\x31\xEE\x06\xBC\x73\xBF\x13\x62\x0A\x9F\xC7\xB9\x97", }; From 750e1ddf69d9f3375801615e872ec42b8a8d5a6d Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Mon, 16 Jul 2012 15:51:47 -0400 Subject: [PATCH 477/651] Fixed a couple of init-time mem leaks. --- src/DPM.cc | 1 + src/Scope.cc | 11 +++++------ src/scan.l | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/DPM.cc b/src/DPM.cc index d7e5cd25ef..6ecf3b1336 100644 --- a/src/DPM.cc +++ b/src/DPM.cc @@ -117,6 +117,7 @@ void DPM::AddConfig(const Analyzer::Config& cfg) desc.SP(); #endif } + Unref(plist); } } diff --git a/src/Scope.cc b/src/Scope.cc index 4916cdbfce..731ced93b7 100644 --- a/src/Scope.cc +++ b/src/Scope.cc @@ -50,7 +50,7 @@ Scope::~Scope() ID* Scope::GenerateTemporary(const char* name) { - return new ID(copy_string(name), SCOPE_FUNCTION, false); + return new ID(name, SCOPE_FUNCTION, false); } id_list* Scope::GetInits() @@ -166,16 +166,15 @@ ID* install_ID(const char* name, const char* module_name, else scope = SCOPE_FUNCTION; - string full_name_str = make_full_var_name(module_name, name); - char* full_name = copy_string(full_name_str.c_str()); + string full_name = make_full_var_name(module_name, name); - ID* id = new ID(full_name, scope, is_export); + ID* id = new ID(full_name.c_str(), scope, is_export); if ( SCOPE_FUNCTION != scope ) - global_scope()->Insert(full_name, id); + global_scope()->Insert(full_name.c_str(), id); else { id->SetOffset(top_scope->Length()); - top_scope->Insert(full_name, id); + top_scope->Insert(full_name.c_str(), id); } return id; diff --git a/src/scan.l b/src/scan.l index 645ce659cd..d90501dd55 100644 --- a/src/scan.l +++ b/src/scan.l @@ -776,7 +776,7 @@ void add_input_file(const char* file) if ( ! filename ) (void) load_files(file); else - input_files.append(copy_string(file)); + input_files.append((char*) file); } void add_to_name_list(char* s, char delim, name_list& nl) From 5d04d583854efd592495d6678895a042cf03e698 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 17 Jul 2012 13:57:23 -0400 Subject: [PATCH 478/651] Fixed small elasticsearch problem in configure output. --- CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 14cf66ac19..bd6bf95737 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -122,13 +122,15 @@ if (LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) list(APPEND OPTLIBS ${LibXML2_LIBRARIES}) endif() +set(USE_ELASTICSEARCH false) set(USE_CURL false) find_package(CURL) + if (CURL_FOUND) + set(USE_ELASTICSEARCH true) set(USE_CURL true) include_directories(BEFORE ${CURL_INCLUDE_DIR}) list(APPEND OPTLIBS ${CURL_LIBRARIES}) - set(USE_ELASTICSEARCH true) endif() if (ENABLE_PERFTOOLS_DEBUG) From 81edec8b2eeef682c4bb2639a0b191e12bc2f561 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 17 Jul 2012 14:16:15 -0700 Subject: [PATCH 479/651] Fix bug, where in dns.log rcode always was set to 0/NOERROR when no reply package was seen. In the fixed version rcode is only set when a reply packet was seen. Updates for the baseline have been commited separately in the topic/bernhard/dns-fix branch. --- scripts/base/protocols/dns/main.bro | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index 600de4beaf..c951ff4fd2 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -162,11 +162,11 @@ function set_session(c: connection, msg: dns_msg, is_query: bool) c$dns = c$dns_state$pending[msg$id]; - c$dns$rcode = msg$rcode; - c$dns$rcode_name = base_errors[msg$rcode]; - if ( ! is_query ) { + c$dns$rcode = msg$rcode; + c$dns$rcode_name = base_errors[msg$rcode]; + if ( ! c$dns?$total_answers ) c$dns$total_answers = msg$num_answers; From 1fa182c16918d258cbda6bfc69b3394103d4313f Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 18 Jul 2012 00:00:31 -0400 Subject: [PATCH 480/651] Some better elasticsearch reliability. - Added a configurable option for timing out ES HTTP requests. - Stop sending reporter messages after one message for one failure. --- .../logging/writers/elasticsearch.bro | 3 ++ src/logging.bif | 1 + src/logging/writers/ElasticSearch.cc | 38 ++++++++++++++----- src/logging/writers/ElasticSearch.h | 2 + 4 files changed, 35 insertions(+), 9 deletions(-) diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index 93c6c98705..adc675e487 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -17,6 +17,9 @@ export { ## e.g. prefix = "bro_" would create types of bro_dns, bro_software, etc. const type_prefix = "" &redef; + ## The time before an ElasticSearch transfer will timeout. + const transfer_timeout = 2secs; + ## The batch size is the number of messages that will be queued up before ## they are sent to be bulk indexed. ## Note: this is mainly a memory usage parameter. diff --git a/src/logging.bif b/src/logging.bif index 3cdb414d80..7e50a9d285 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -91,6 +91,7 @@ const server_host: string; const server_port: count; const index_prefix: string; const type_prefix: string; +const transfer_timeout: interval; const max_batch_size: count; const max_batch_interval: interval; const max_byte_size: count; diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 1b8dfa495d..71be036a72 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -42,7 +42,10 @@ ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) current_index = string(); prev_index = string(); last_send = current_time(); + failing = false; + transfer_timeout = BifConst::LogElasticSearch::transfer_timeout * 1000; + curl_handle = HTTPSetup(); } @@ -77,12 +80,13 @@ bool ElasticSearch::BatchIndex() curl_easy_setopt(curl_handle, CURLOPT_POST, 1); curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)buffer.Len()); curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); - HTTPSend(curl_handle); - + failing = ! HTTPSend(curl_handle); + + // We are currently throwing the data out regardless of if the send failed. Fire and forget! buffer.Clear(); counter = 0; last_send = current_time(); - + return true; } @@ -347,6 +351,8 @@ bool ElasticSearch::HTTPSend(CURL *handle) // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. // The best (only?) way to disable that is to just use HTTP 1.0 curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); + + curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, transfer_timeout); CURLcode return_code = curl_easy_perform(handle); @@ -355,21 +361,35 @@ bool ElasticSearch::HTTPSend(CURL *handle) case CURLE_COULDNT_CONNECT: case CURLE_COULDNT_RESOLVE_HOST: case CURLE_WRITE_ERROR: - return false; + case CURLE_RECV_ERROR: + { + if ( ! failing ) + Error(Fmt("ElasticSearch server may not be accessible.")); + } + + case CURLE_OPERATION_TIMEDOUT: + { + if ( ! failing ) + Warning(Fmt("HTTP operation with elasticsearch server timed out at %" PRIu64 " msecs.", transfer_timeout)); + } case CURLE_OK: { uint http_code = 0; curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &http_code); - if ( http_code != 200 ) - Error(Fmt("Received a non-successful status code back from ElasticSearch server.")); - - return true; + if ( http_code == 200 ) + // Hopefully everything goes through here. + return true; + else if ( ! failing ) + Error(Fmt("Received a non-successful status code back from ElasticSearch server, check the elasticsearch server log.")); } default: - return true; + { + } } + // The "successful" return happens above + return false; } #endif diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h index 375845b002..60977f7737 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/ElasticSearch.h @@ -65,6 +65,8 @@ private: string path; string index_prefix; + uint64 transfer_timeout; + bool failing; uint64 batch_size; }; From 50f5f8131df7691643209ccf2d058ab98a4ba6ad Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 18 Jul 2012 07:29:01 -0700 Subject: [PATCH 481/651] Revert "Fixed a couple of init-time mem leaks." This reverts commit 750e1ddf69d9f3375801615e872ec42b8a8d5a6d. --- src/DPM.cc | 1 - src/Scope.cc | 11 ++++++----- src/scan.l | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/DPM.cc b/src/DPM.cc index 6ecf3b1336..d7e5cd25ef 100644 --- a/src/DPM.cc +++ b/src/DPM.cc @@ -117,7 +117,6 @@ void DPM::AddConfig(const Analyzer::Config& cfg) desc.SP(); #endif } - Unref(plist); } } diff --git a/src/Scope.cc b/src/Scope.cc index 731ced93b7..4916cdbfce 100644 --- a/src/Scope.cc +++ b/src/Scope.cc @@ -50,7 +50,7 @@ Scope::~Scope() ID* Scope::GenerateTemporary(const char* name) { - return new ID(name, SCOPE_FUNCTION, false); + return new ID(copy_string(name), SCOPE_FUNCTION, false); } id_list* Scope::GetInits() @@ -166,15 +166,16 @@ ID* install_ID(const char* name, const char* module_name, else scope = SCOPE_FUNCTION; - string full_name = make_full_var_name(module_name, name); + string full_name_str = make_full_var_name(module_name, name); + char* full_name = copy_string(full_name_str.c_str()); - ID* id = new ID(full_name.c_str(), scope, is_export); + ID* id = new ID(full_name, scope, is_export); if ( SCOPE_FUNCTION != scope ) - global_scope()->Insert(full_name.c_str(), id); + global_scope()->Insert(full_name, id); else { id->SetOffset(top_scope->Length()); - top_scope->Insert(full_name.c_str(), id); + top_scope->Insert(full_name, id); } return id; diff --git a/src/scan.l b/src/scan.l index d90501dd55..645ce659cd 100644 --- a/src/scan.l +++ b/src/scan.l @@ -776,7 +776,7 @@ void add_input_file(const char* file) if ( ! filename ) (void) load_files(file); else - input_files.append((char*) file); + input_files.append(copy_string(file)); } void add_to_name_list(char* s, char delim, name_list& nl) From 43507b1bb9b2ff484716e2e8f151a5fdc8974951 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 18 Jul 2012 11:28:41 -0400 Subject: [PATCH 482/651] New script for easily duplicating logs to ElasticSearch. --- .../policy/tuning/logs-to-elasticsearch.bro | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 scripts/policy/tuning/logs-to-elasticsearch.bro diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro new file mode 100644 index 0000000000..c3cc9d5002 --- /dev/null +++ b/scripts/policy/tuning/logs-to-elasticsearch.bro @@ -0,0 +1,45 @@ +##! Load this script to enable global log output to an ElasticSearch database. + +module LogElasticSearch; + +export { + ## An elasticsearch specific rotation interval. + const rotation_interval = 24hr &redef; + + ## Optionally ignore any :bro:enum:`Log::ID` from being sent to + ## ElasticSearch with this script. + const excluded_log_ids: set[string] = set("Communication::LOG") &redef; + + ## If you want to explicitly only send certain :bro:enum:`Log::ID` + ## streams, add them to this set. If the set remains empty, all will + ## be sent. The :bro:id:`excluded_log_ids` option will remain in + ## effect as well. + const send_logs: set[string] = set() &redef; +} + +module Log; + +event bro_init() &priority=-5 + { + local my_filters: table[ID, string] of Filter = table(); + + for ( [id, name] in filters ) + { + local filter = filters[id, name]; + if ( fmt("%s", id) in LogElasticSearch::excluded_log_ids || + (|LogElasticSearch::send_logs| > 0 && fmt("%s", id) !in LogElasticSearch::send_logs) ) + next; + + filter$name = cat(name, "-es"); + filter$writer = Log::WRITER_ELASTICSEARCH; + filter$interv = LogElasticSearch::rotation_interval; + my_filters[id, name] = filter; + } + + # This had to be done separately to avoid an ever growing filters list + # where the for loop would never end. + for ( [id, name] in my_filters ) + { + Log::add_filter(id, filter); + } + } \ No newline at end of file From 6335dbb5e1cf694afea3c306012a258614d13880 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 18 Jul 2012 11:32:14 -0400 Subject: [PATCH 483/651] Fixing calc_next_rotate to use UTC based time functions. --- src/util.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util.cc b/src/util.cc index 3cfa5fca1c..abbea3e906 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1112,9 +1112,9 @@ double calc_next_rotate(double current, double interval, double base) time_t teatime = time_t(current); struct tm t; - t = *localtime_r(&teatime, &t); + t = *gmtime_r(&teatime, &t); t.tm_hour = t.tm_min = t.tm_sec = 0; - double startofday = mktime(&t); + double startofday = timegm(&t); if ( base < 0 ) // No base time given. To get nice timestamps, we round From 18268273594900cdeabc811d1a9cf6562caf2687 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 18 Jul 2012 15:42:23 -0400 Subject: [PATCH 484/651] Changed ES index names to localtime and added a meta index. --- src/logging/writers/ElasticSearch.cc | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 71be036a72..2095ed62df 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -263,11 +263,28 @@ bool ElasticSearch::UpdateIndex(double now, double rinterval, double rbase) struct tm tm; char buf[128]; time_t teatime = (time_t)interval_beginning; - gmtime_r(&teatime, &tm); + localtime_r(&teatime, &tm); strftime(buf, sizeof(buf), "%Y%m%d%H%M", &tm); prev_index = current_index; current_index = index_prefix + "-" + buf; + + // Send some metadata about this index. + buffer.AddRaw("{\"index\":{\"_index\":\"@", 21); + buffer.Add(index_prefix); + buffer.AddRaw("-meta\",\"_type\":\"index\",\"_id\":\"", 30); + buffer.Add(current_index); + buffer.AddRaw("-", 1); + buffer.Add(Info().rotation_base); + buffer.AddRaw("-", 1); + buffer.Add(Info().rotation_interval); + buffer.AddRaw("\"}}\n{\"name\":\"", 13); + buffer.Add(current_index); + buffer.AddRaw("\",\"start\":", 10); + buffer.Add(interval_beginning); + buffer.AddRaw(",\"end\":", 7); + buffer.Add(interval_beginning+rinterval); + buffer.AddRaw("}\n", 2); } //printf("%s - prev:%s current:%s\n", Info().path.c_str(), prev_index.c_str(), current_index.c_str()); From 0c4c5ff33571c9f5cec67d432dd401fc1770e0d4 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 19 Jul 2012 12:14:13 -0400 Subject: [PATCH 485/651] Temporarily removing the ES timeout because it works with signals and is incompatible with Bro threads. --- src/logging/writers/ElasticSearch.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 2095ed62df..1ae81dfde8 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -369,7 +369,7 @@ bool ElasticSearch::HTTPSend(CURL *handle) // The best (only?) way to disable that is to just use HTTP 1.0 curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); - curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, transfer_timeout); + //curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, transfer_timeout); CURLcode return_code = curl_easy_perform(handle); From f73eb3b086c1ae88c122434613501af950a9dba0 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 12 Jul 2012 13:44:24 -0700 Subject: [PATCH 486/651] Reworking thread termination logic. Turns out the finish methods weren't called correctly, caused by a mess up with method names which all sounded too similar and the wrong one ended up being called. I've reworked this by changing the thread/writer/reader interfaces, which actually also simplifies them by getting rid of the requirement for writer backends to call their parent methods (i.e., less opportunity for errors). This commit also includes the following (because I noticed the problem above when working on some of these): - The ASCII log writer now includes "#start " and "#end lines in the each file. The latter supersedes Bernhard's "EOF" patch. This required a number of tests updates. The standard canonifier removes the timestamps, but some tests compare files directly, which doesn't work if they aren't printing out the same timestamps (like the comm tests). - The above required yet another change to the writer API to network_time to methods. - Renamed ASCII logger "header" options to "meta". - Fixes #763 "Escape # when first character in log file line". All btests pass for me on Linux FC15. Will try MacOS next. --- NEWS | 5 + .../base/frameworks/logging/writers/ascii.bro | 11 ++- src/input/Manager.cc | 2 - src/input/ReaderBackend.cc | 9 +- src/input/ReaderBackend.h | 25 ++--- src/input/ReaderFrontend.cc | 26 ----- src/input/readers/Ascii.cc | 2 - src/input/readers/Benchmark.cc | 1 - src/input/readers/Raw.cc | 2 - src/logging.bif | 4 +- src/logging/Manager.cc | 3 +- src/logging/WriterBackend.cc | 16 +-- src/logging/WriterBackend.h | 32 ++++-- src/logging/WriterFrontend.cc | 33 ++----- src/logging/WriterFrontend.h | 8 +- src/logging/writers/Ascii.cc | 97 ++++++++++++++----- src/logging/writers/Ascii.h | 13 ++- src/logging/writers/DataSeries.cc | 14 ++- src/logging/writers/DataSeries.h | 5 +- src/logging/writers/None.h | 5 +- src/threading/BasicThread.cc | 35 ++----- src/threading/BasicThread.h | 8 ++ src/threading/MsgThread.cc | 31 ++++-- src/threading/MsgThread.h | 34 ++++--- .../ssh-filtered.log | 12 +++ .../ssh.log | 12 --- .../test.log | 12 +++ testing/btest/core/expr-exception.bro | 2 +- testing/btest/istate/events-ssl.bro | 9 +- testing/btest/istate/events.bro | 9 +- .../base/frameworks/logging/ascii-empty.bro | 5 +- .../logging/ascii-line-like-comment.bro | 23 +++++ .../base/frameworks/logging/ascii-options.bro | 2 +- .../base/frameworks/logging/remote-types.bro | 8 +- .../base/frameworks/logging/remote.bro | 8 +- .../notice/default-policy-order.test | 6 +- testing/scripts/diff-remove-timestamps | 7 +- 37 files changed, 313 insertions(+), 223 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh.log create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log create mode 100644 testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.bro diff --git a/NEWS b/NEWS index d9410e1c7c..0798920d8a 100644 --- a/NEWS +++ b/NEWS @@ -140,6 +140,11 @@ the full set. Bro now supports decapsulating tunnels directly for protocols it understands. +- ASCII logs now record the time when they were opened/closed at the + beginning and end of the file, respectively. The options + LogAscii::header_prefix and LogAscii::include_header have been + renamed to LogAscii::meta_prefix and LogAscii::include_meta, + respectively. Bro 2.0 ------- diff --git a/scripts/base/frameworks/logging/writers/ascii.bro b/scripts/base/frameworks/logging/writers/ascii.bro index fa1fcd6797..bacb0996d0 100644 --- a/scripts/base/frameworks/logging/writers/ascii.bro +++ b/scripts/base/frameworks/logging/writers/ascii.bro @@ -8,12 +8,13 @@ export { ## into files. This is primarily for debugging purposes. const output_to_stdout = F &redef; - ## If true, include a header line with column names and description - ## of the other ASCII logging options that were used. - const include_header = T &redef; + ## If true, include lines with log meta information such as column names with + ## types, the values of ASCII logging options that in use, and the time when the + ## file was opened and closes (the latter at the end). + const include_meta = T &redef; - ## Prefix for the header line if included. - const header_prefix = "#" &redef; + ## Prefix for lines with meta information. + const meta_prefix = "#" &redef; ## Separator between fields. const separator = "\t" &redef; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index fc68343813..1c6b69e8ec 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -726,8 +726,6 @@ bool Manager::RemoveStream(Stream *i) i->removed = true; - i->reader->Close(); - DBG_LOG(DBG_INPUT, "Successfully queued removal of stream %s", i->name.c_str()); diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index dea554251e..84106a3c94 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -207,7 +207,7 @@ bool ReaderBackend::Init(const ReaderInfo& arg_info, const int arg_num_fields, return success; } -void ReaderBackend::Close() +bool ReaderBackend::OnFinish(double network_time) { DoClose(); disabled = true; // frontend disables itself when it gets the Close-message. @@ -221,6 +221,8 @@ void ReaderBackend::Close() delete [] (fields); fields = 0; } + + return true; } bool ReaderBackend::Update() @@ -243,10 +245,9 @@ void ReaderBackend::DisableFrontend() SendOut(new DisableMessage(frontend)); } -bool ReaderBackend::DoHeartbeat(double network_time, double current_time) +bool ReaderBackend::OnHeartbeat(double network_time, double current_time) { - MsgThread::DoHeartbeat(network_time, current_time); - return true; + return DoHeartbeat(network_time, current_time); } TransportProto ReaderBackend::StringToProto(const string &proto) diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 820633254a..1e77a61f37 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -108,15 +108,6 @@ public: */ bool Init(const ReaderInfo& info, int num_fields, const threading::Field* const* fields); - /** - * Finishes reading from this input stream in a regular fashion. Must - * not be called if an error has been indicated earlier. After - * calling this, no further reading from the stream can be performed. - * - * @return False if an error occured. - */ - void Close(); - /** * Force trigger an update of the input stream. The action that will * be taken depends on the current read mode and the individual input @@ -149,6 +140,9 @@ public: */ int NumFields() const { return num_fields; } + // Overridden from MsgThread. + virtual bool OnHeartbeat(double network_time, double current_time); + virtual bool OnFinish(double network_time); protected: // Methods that have to be overwritten by the individual readers @@ -200,6 +194,11 @@ protected: */ virtual bool DoUpdate() = 0; + /** + * Triggered by regular heartbeat messages from the main thread. + */ + virtual bool DoHeartbeat(double network_time, double current_time) = 0; + /** * Method allowing a reader to send a specified Bro event. Vals must * match the values expected by the bro event. @@ -271,14 +270,6 @@ protected: */ void EndCurrentSend(); - /** - * Triggered by regular heartbeat messages from the main thread. - * - * This method can be overridden but once must call - * ReaderBackend::DoHeartbeat(). - */ - virtual bool DoHeartbeat(double network_time, double current_time); - /** * Convert a string into a TransportProto. This is just a utility * function for Readers. diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index e489147d36..7e4ef201b1 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -37,17 +37,6 @@ public: virtual bool Process() { return Object()->Update(); } }; -class CloseMessage : public threading::InputMessage -{ -public: - CloseMessage(ReaderBackend* backend) - : threading::InputMessage("Close", backend) - { } - - virtual bool Process() { Object()->Close(); return true; } -}; - - ReaderFrontend::ReaderFrontend(bro_int_t type) { disabled = initialized = false; @@ -93,21 +82,6 @@ void ReaderFrontend::Update() backend->SendIn(new UpdateMessage(backend)); } -void ReaderFrontend::Close() - { - if ( disabled ) - return; - - if ( ! initialized ) - { - reporter->Error("Tried to call finish on uninitialized reader"); - return; - } - - disabled = true; - backend->SendIn(new CloseMessage(backend)); - } - string ReaderFrontend::Name() const { if ( ! info.source.size() ) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index dd1e742e5e..7f93a3138c 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -506,8 +506,6 @@ bool Ascii::DoUpdate() bool Ascii::DoHeartbeat(double network_time, double current_time) { - ReaderBackend::DoHeartbeat(network_time, current_time); - switch ( Info().mode ) { case MODE_MANUAL: // yay, we do nothing :) diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index d71901fa66..28afdc1c89 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -222,7 +222,6 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) bool Benchmark::DoHeartbeat(double network_time, double current_time) { - ReaderBackend::DoHeartbeat(network_time, current_time); num_lines = (int) ( (double) num_lines*multiplication_factor); num_lines += add; heartbeatstarttime = CurrTime(); diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index 1bae6cfa0c..f62e966883 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -252,8 +252,6 @@ bool Raw::DoUpdate() bool Raw::DoHeartbeat(double network_time, double current_time) { - ReaderBackend::DoHeartbeat(network_time, current_time); - switch ( Info().mode ) { case MODE_MANUAL: // yay, we do nothing :) diff --git a/src/logging.bif b/src/logging.bif index d25e89c33c..48e0edbb06 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -65,8 +65,8 @@ function Log::__flush%(id: Log::ID%): bool module LogAscii; const output_to_stdout: bool; -const include_header: bool; -const header_prefix: string; +const include_meta: bool; +const meta_prefix: string; const separator: string; const set_separator: string; const empty_field: string; diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 0fea3d577d..1808b83738 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -771,6 +771,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) WriterBackend::WriterInfo info; info.path = path; + info.network_time = network_time; HashKey* k; IterCookie* c = filter->config->AsTable()->InitForIteration(); @@ -1156,7 +1157,7 @@ bool Manager::Flush(EnumVal* id) for ( Stream::WriterMap::iterator i = stream->writers.begin(); i != stream->writers.end(); i++ ) - i->second->writer->Flush(); + i->second->writer->Flush(network_time); RemoveDisabledWriters(stream); diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 00590208d5..68b0b506a1 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -18,7 +18,7 @@ namespace logging { class RotationFinishedMessage : public threading::OutputMessage { public: - RotationFinishedMessage(WriterFrontend* writer, string new_name, string old_name, + RotationFinishedMessage(WriterFrontend* writer, string new_name, string old_name, double open, double close, bool terminating) : threading::OutputMessage("RotationFinished", writer), new_name(new_name), old_name(old_name), open(open), @@ -260,9 +260,9 @@ bool WriterBackend::Rotate(string rotated_path, double open, return true; } -bool WriterBackend::Flush() +bool WriterBackend::Flush(double network_time) { - if ( ! DoFlush() ) + if ( ! DoFlush(network_time) ) { DisableFrontend(); return false; @@ -271,13 +271,15 @@ bool WriterBackend::Flush() return true; } -bool WriterBackend::DoHeartbeat(double network_time, double current_time) +bool WriterBackend::OnFinish(double network_time) { - MsgThread::DoHeartbeat(network_time, current_time); + return DoFinish(network_time); + } +bool WriterBackend::OnHeartbeat(double network_time, double current_time) + { SendOut(new FlushWriteBufferMessage(frontend)); - - return true; + return DoHeartbeat(network_time, current_time); } string WriterBackend::Render(const threading::Value::addr_t& addr) const diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 6e65a8151a..33cde8679e 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -67,6 +67,11 @@ public: */ double rotation_base; + /** + * The network time when the writer is created. + */ + double network_time; + /** * A map of key/value pairs corresponding to the relevant * filter's "config" table. @@ -129,9 +134,11 @@ public: * Flushes any currently buffered output, assuming the writer * supports that. (If not, it will be ignored). * + * @param network_time The network time when the flush was triggered. + * * @return False if an error occured. */ - bool Flush(); + bool Flush(double network_time); /** * Triggers rotation, if the writer supports that. (If not, it will @@ -213,6 +220,10 @@ public: */ string Render(double d) const; + // Overridden from MsgThread. + virtual bool OnHeartbeat(double network_time, double current_time); + virtual bool OnFinish(double network_time); + protected: friend class FinishMessage; @@ -272,8 +283,10 @@ protected: * will then be disabled and eventually deleted. When returning * false, an implementation should also call Error() to indicate what * happened. + * + * @param network_time The network time when the flush was triggered. */ - virtual bool DoFlush() = 0; + virtual bool DoFlush(double network_time) = 0; /** * Writer-specific method implementing log rotation. Most directly @@ -314,20 +327,19 @@ protected: /** * Writer-specific method called just before the threading system is - * going to shutdown. + * going to shutdown. It is assumed that once this messages returns, + * the thread can be safely terminated. * - * This method can be overridden but one must call - * WriterBackend::DoFinish(). + * @param network_time The network time when the finish is triggered. */ - virtual bool DoFinish() { return MsgThread::DoFinish(); } - + virtual bool DoFinish(double network_time) = 0; /** * Triggered by regular heartbeat messages from the main thread. * - * This method can be overridden but one must call - * WriterBackend::DoHeartbeat(). + * This method can be overridden. Default implementation does + * nothing. */ - virtual bool DoHeartbeat(double network_time, double current_time); + virtual bool DoHeartbeat(double network_time, double current_time) = 0; private: /** diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 21bde0d43c..577003926b 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -81,19 +81,13 @@ private: class FlushMessage : public threading::InputMessage { public: - FlushMessage(WriterBackend* backend) - : threading::InputMessage("Flush", backend) {} + FlushMessage(WriterBackend* backend, double network_time) + : threading::InputMessage("Flush", backend), + network_time(network_time) {} - virtual bool Process() { return Object()->Flush(); } -}; - -class FinishMessage : public threading::InputMessage -{ -public: - FinishMessage(WriterBackend* backend) - : threading::InputMessage("Finish", backend) {} - - virtual bool Process() { return Object()->DoFinish(); } + virtual bool Process() { return Object()->Flush(network_time); } +private: + double network_time; }; } @@ -240,7 +234,7 @@ void WriterFrontend::SetBuf(bool enabled) FlushWriteBuffer(); } -void WriterFrontend::Flush() +void WriterFrontend::Flush(double network_time) { if ( disabled ) return; @@ -248,7 +242,7 @@ void WriterFrontend::Flush() FlushWriteBuffer(); if ( backend ) - backend->SendIn(new FlushMessage(backend)); + backend->SendIn(new FlushMessage(backend, network_time)); } void WriterFrontend::Rotate(string rotated_path, double open, double close, bool terminating) @@ -266,17 +260,6 @@ void WriterFrontend::Rotate(string rotated_path, double open, double close, bool log_mgr->FinishedRotation(0, "", rotated_path, open, close, terminating); } -void WriterFrontend::Finish() - { - if ( disabled ) - return; - - FlushWriteBuffer(); - - if ( backend ) - backend->SendIn(new FinishMessage(backend)); - } - void WriterFrontend::DeleteVals(Value** vals) { // Note this code is duplicated in Manager::DeleteVals(). diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index 8a0dce4645..6581fb1c1b 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -114,8 +114,10 @@ public: * message back that will asynchronously call Disable(). * * This method must only be called from the main thread. + * + * @param network_time The network time when the flush was triggered. */ - void Flush(); + void Flush(double network_time); /** * Triggers log rotation. @@ -138,8 +140,10 @@ public: * sends a message back that will asynchronously call Disable(). * * This method must only be called from the main thread. + * + * @param network_time The network time when the finish was triggered. */ - void Finish(); + void Finish(double network_time); /** * Explicitly triggers a transfer of all potentially buffered Write() diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 6e5ceef678..ab68cd77d8 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -18,7 +18,7 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) file = 0; output_to_stdout = BifConst::LogAscii::output_to_stdout; - include_header = BifConst::LogAscii::include_header; + include_meta = BifConst::LogAscii::include_meta; separator_len = BifConst::LogAscii::separator->Len(); separator = new char[separator_len]; @@ -40,10 +40,10 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) memcpy(unset_field, BifConst::LogAscii::unset_field->Bytes(), unset_field_len); - header_prefix_len = BifConst::LogAscii::header_prefix->Len(); - header_prefix = new char[header_prefix_len]; - memcpy(header_prefix, BifConst::LogAscii::header_prefix->Bytes(), - header_prefix_len); + meta_prefix_len = BifConst::LogAscii::meta_prefix->Len(); + meta_prefix = new char[meta_prefix_len]; + memcpy(meta_prefix, BifConst::LogAscii::meta_prefix->Bytes(), + meta_prefix_len); desc.EnableEscaping(); desc.AddEscapeSequence(separator, separator_len); @@ -51,24 +51,39 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) Ascii::~Ascii() { + // Normally, the file will be closed here already via the Finish() + // message. But when we terminate abnormally, we may still have it + // open. if ( file ) - fclose(file); + CloseFile(0); delete [] separator; delete [] set_separator; delete [] empty_field; delete [] unset_field; - delete [] header_prefix; + delete [] meta_prefix; } bool Ascii::WriteHeaderField(const string& key, const string& val) { - string str = string(header_prefix, header_prefix_len) + + string str = string(meta_prefix, meta_prefix_len) + key + string(separator, separator_len) + val + "\n"; return (fwrite(str.c_str(), str.length(), 1, file) == 1); } +void Ascii::CloseFile(double t) + { + if ( ! file ) + return; + + if ( include_meta ) + WriteHeaderField("end", t ? Timestamp(t) : ""); + + fclose(file); + file = 0; + } + bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * fields) { string path = info.path; @@ -81,17 +96,17 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * if ( ! (file = fopen(fname.c_str(), "w")) ) { Error(Fmt("cannot open %s: %s", fname.c_str(), - strerror(errno))); + Strerror(errno))); return false; } - if ( include_header ) + if ( include_meta ) { string names; string types; - string str = string(header_prefix, header_prefix_len) + string str = string(meta_prefix, meta_prefix_len) + "separator " // Always use space as separator here. + get_escaped_string(string(separator, separator_len), false) + "\n"; @@ -105,8 +120,9 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * string(empty_field, empty_field_len), false)) && WriteHeaderField("unset_field", get_escaped_string( string(unset_field, unset_field_len), false)) && - WriteHeaderField("path", get_escaped_string(path, false))) ) - goto write_error; + WriteHeaderField("path", get_escaped_string(path, false)) && + WriteHeaderField("start", Timestamp(info.network_time))) ) + goto write_error; for ( int i = 0; i < num_fields; ++i ) { @@ -128,21 +144,23 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * return true; write_error: - Error(Fmt("error writing to %s: %s", fname.c_str(), strerror(errno))); + Error(Fmt("error writing to %s: %s", fname.c_str(), Strerror(errno))); return false; } -bool Ascii::DoFlush() +bool Ascii::DoFlush(double network_time) { fflush(file); return true; } -bool Ascii::DoFinish() +bool Ascii::DoFinish(double network_time) { - return WriterBackend::DoFinish(); + CloseFile(network_time); + return true; } + bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) { if ( ! val->present ) @@ -307,16 +325,33 @@ bool Ascii::DoWrite(int num_fields, const Field* const * fields, desc.AddRaw("\n", 1); - if ( fwrite(desc.Bytes(), desc.Len(), 1, file) != 1 ) + const char* bytes = (const char*)desc.Bytes(); + int len = desc.Len(); + + // Make sure the line doesn't look like meta information. + if ( strncmp(bytes, meta_prefix, meta_prefix_len) == 0 ) { - Error(Fmt("error writing to %s: %s", fname.c_str(), strerror(errno))); - return false; + // It would so escape the first character. + char buf[16]; + snprintf(buf, sizeof(buf), "\\x%02x", bytes[0]); + if ( fwrite(buf, strlen(buf), 1, file) != 1 ) + goto write_error; + + ++bytes; + --len; } + if ( fwrite(bytes, len, 1, file) != 1 ) + goto write_error; + if ( IsBuf() ) fflush(file); return true; + +write_error: + Error(Fmt("error writing to %s: %s", fname.c_str(), Strerror(errno))); + return false; } bool Ascii::DoRotate(string rotated_path, double open, double close, bool terminating) @@ -325,8 +360,7 @@ bool Ascii::DoRotate(string rotated_path, double open, double close, bool termin if ( ! file || IsSpecial(Info().path) ) return true; - fclose(file); - file = 0; + CloseFile(close); string nname = rotated_path + "." + LogExt(); rename(fname.c_str(), nname.c_str()); @@ -346,9 +380,28 @@ bool Ascii::DoSetBuf(bool enabled) return true; } +bool Ascii::DoHeartbeat(double network_time, double current_time) + { + // Nothing to do. + return true; + } + string Ascii::LogExt() { const char* ext = getenv("BRO_LOG_SUFFIX"); if ( ! ext ) ext = "log"; return ext; } + +string Ascii::Timestamp(double t) + { + struct tm tm; + char buf[128]; + const char* const date_fmt = "%Y-%m-%d-%H-%M-%S"; + time_t teatime = time_t(t); + + localtime_r(&teatime, &tm); + strftime(buf, sizeof(buf), date_fmt, &tm); + return buf; + } + diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index a95e644d49..857954ce37 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -26,13 +26,16 @@ protected: virtual bool DoSetBuf(bool enabled); virtual bool DoRotate(string rotated_path, double open, double close, bool terminating); - virtual bool DoFlush(); - virtual bool DoFinish(); + virtual bool DoFlush(double network_time); + virtual bool DoFinish(double network_time); + virtual bool DoHeartbeat(double network_time, double current_time); private: bool IsSpecial(string path) { return path.find("/dev/") == 0; } bool DoWriteOne(ODesc* desc, threading::Value* val, const threading::Field* field); bool WriteHeaderField(const string& key, const string& value); + void CloseFile(double t); + string Timestamp(double t); FILE* file; string fname; @@ -40,7 +43,7 @@ private: // Options set from the script-level. bool output_to_stdout; - bool include_header; + bool include_meta; char* separator; int separator_len; @@ -54,8 +57,8 @@ private: char* unset_field; int unset_field_len; - char* header_prefix; - int header_prefix_len; + char* meta_prefix; + int meta_prefix_len; }; } diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index b34ea3412a..1978a8b781 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -311,7 +311,7 @@ bool DataSeries::DoInit(const WriterInfo& info, int num_fields, const threading: } else - Error(Fmt("cannot dump schema: %s", strerror(errno))); + Error(Fmt("cannot dump schema: %s", Strerror(errno))); } compress_type = Extent::compress_all; @@ -343,7 +343,7 @@ bool DataSeries::DoInit(const WriterInfo& info, int num_fields, const threading: return OpenLog(info.path); } -bool DataSeries::DoFlush() +bool DataSeries::DoFlush(double network_time) { // Flushing is handled by DataSeries automatically, so this function // doesn't do anything. @@ -366,11 +366,10 @@ void DataSeries::CloseLog() log_file = 0; } -bool DataSeries::DoFinish() +bool DataSeries::DoFinish(double network_time) { CloseLog(); - - return WriterBackend::DoFinish(); + return true; } bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, @@ -420,4 +419,9 @@ bool DataSeries::DoSetBuf(bool enabled) return true; } +bool DataSeries::DoHeartbeat(double network_time, double current_time) +{ + return true; +} + #endif /* USE_DATASERIES */ diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index 0ae3572b76..31d17a1a7b 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -34,8 +34,9 @@ protected: virtual bool DoSetBuf(bool enabled); virtual bool DoRotate(string rotated_path, double open, double close, bool terminating); - virtual bool DoFlush(); - virtual bool DoFinish(); + virtual bool DoFlush(double network_time); + virtual bool DoFinish(double network_time); + virtual bool DoHeartbeat(double network_time, double current_time); private: static const size_t ROW_MIN = 2048; // Minimum extent size. diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index 7e2e4ef4eb..c6d7cba56a 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -26,8 +26,9 @@ protected: virtual bool DoSetBuf(bool enabled) { return true; } virtual bool DoRotate(string rotated_path, double open, double close, bool terminating); - virtual bool DoFlush() { return true; } - virtual bool DoFinish() { WriterBackend::DoFinish(); return true; } + virtual bool DoFlush(double network_time) { return true; } + virtual bool DoFinish(double network_time) { return true; } + virtual bool DoHeartbeat(double network_time, double current_time) { return true; } }; } diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index dfa4c28eda..88c4ac0965 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -78,24 +78,22 @@ const char* BasicThread::Fmt(const char* format, ...) return buf; } +const char* BasicThread::Strerror(int err) + { + static char buf[128] = ""; + strerror_r(err, buf, sizeof(buf)); + return buf; + } + void BasicThread::Start() { if ( started ) return; - int err = pthread_mutex_init(&terminate, 0); - if ( err != 0 ) - reporter->FatalError("Cannot create terminate mutex for thread %s: %s", name.c_str(), strerror(err)); - - // We use this like a binary semaphore and acquire it immediately. - err = pthread_mutex_lock(&terminate); + int err = pthread_create(&pthread, 0, BasicThread::launcher, this); if ( err != 0 ) - reporter->FatalError("Cannot aquire terminate mutex for thread %s: %s", name.c_str(), strerror(err)); - - err = pthread_create(&pthread, 0, BasicThread::launcher, this); - if ( err != 0 ) - reporter->FatalError("Cannot create thread %s:%s", name.c_str(), strerror(err)); + reporter->FatalError("Cannot create thread %s:%s", name.c_str(), Strerror(err)); DBG_LOG(DBG_THREADING, "Started thread %s", name.c_str()); @@ -114,12 +112,6 @@ void BasicThread::Stop() DBG_LOG(DBG_THREADING, "Signaling thread %s to terminate ...", name.c_str()); - // Signal that it's ok for the thread to exit now by unlocking the - // mutex. - int err = pthread_mutex_unlock(&terminate); - if ( err != 0 ) - reporter->FatalError("Failure flagging terminate condition for thread %s: %s", name.c_str(), strerror(err)); - terminating = true; OnStop(); @@ -130,16 +122,13 @@ void BasicThread::Join() if ( ! started ) return; - if ( ! terminating ) - Stop(); + assert(terminating); DBG_LOG(DBG_THREADING, "Joining thread %s ...", name.c_str()); if ( pthread_join(pthread, 0) != 0 ) reporter->FatalError("Failure joining thread %s", name.c_str()); - pthread_mutex_destroy(&terminate); - DBG_LOG(DBG_THREADING, "Done with thread %s", name.c_str()); pthread = 0; @@ -178,10 +167,6 @@ void* BasicThread::launcher(void *arg) // Run thread's main function. thread->Run(); - // Wait until somebody actually wants us to terminate. - if ( pthread_mutex_lock(&thread->terminate) != 0 ) - reporter->FatalError("Failure acquiring terminate mutex at end of thread %s", thread->Name().c_str()); - return 0; } diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h index cc87ae03bc..d47eb5c3c3 100644 --- a/src/threading/BasicThread.h +++ b/src/threading/BasicThread.h @@ -96,6 +96,14 @@ public: */ const char* Fmt(const char* format, ...); + /** + * A version of strerror() that the thread can safely use. This is + * essentially a wrapper around strerror_r(). Note that it keeps a + * single static buffer internally so the result remains valid only + * until the next call. + */ + const char* Strerror(int err); + protected: friend class Manager; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 6a3d496325..81ef123661 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -16,9 +16,17 @@ namespace threading { class FinishMessage : public InputMessage { public: - FinishMessage(MsgThread* thread) : InputMessage("Finish", thread) { } + FinishMessage(MsgThread* thread, double network_time) : InputMessage("Finish", thread), + network_time(network_time) { } - virtual bool Process() { return Object()->DoFinish(); } + virtual bool Process() { + bool result = Object()->OnFinish(network_time); + Object()->Finished(); + return result; + } + +private: + double network_time; }; // A dummy message that's only purpose is unblock the current read operation @@ -39,7 +47,10 @@ public: : InputMessage("Heartbeat", thread) { network_time = arg_network_time; current_time = arg_current_time; } - virtual bool Process() { return Object()->DoHeartbeat(network_time, current_time); } + virtual bool Process() { + Object()->HeartbeatInChild(); + return Object()->OnHeartbeat(network_time, current_time); + } private: double network_time; @@ -146,8 +157,11 @@ MsgThread::MsgThread() : BasicThread() void MsgThread::OnStop() { + if ( finished ) + return; + // Signal thread to terminate and wait until it has acknowledged. - SendIn(new FinishMessage(this), true); + SendIn(new FinishMessage(this, network_time), true); int cnt = 0; while ( ! finished ) @@ -161,6 +175,8 @@ void MsgThread::OnStop() usleep(1000); } + Finished(); + // One more message to make sure the current queue read operation unblocks. SendIn(new UnblockMessage(this), true); } @@ -170,7 +186,7 @@ void MsgThread::Heartbeat() SendIn(new HeartbeatMessage(this, network_time, current_time())); } -bool MsgThread::DoHeartbeat(double network_time, double current_time) +void MsgThread::HeartbeatInChild() { string n = Name(); @@ -179,16 +195,13 @@ bool MsgThread::DoHeartbeat(double network_time, double current_time) cnt_sent_out - queue_out.Size()); SetOSName(n.c_str()); - - return true; } -bool MsgThread::DoFinish() +void MsgThread::Finished() { // This is thread-safe "enough", we're the only one ever writing // there. finished = true; - return true; } void MsgThread::Info(const char* msg) diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index a917f54396..67ab9517c5 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -189,39 +189,45 @@ protected: * * This is method is called regularly by the threading::Manager. * - * Can be overriden in derived classed to hook into the heart beat, - * but must call the parent implementation. Note that this method is - * always called by the main thread and must not access data of the - * child thread directly. See DoHeartbeat() if you want to do - * something on the child-side. + * Can be overriden in derived classed to hook into the heart beat + * sending, but must call the parent implementation. Note that this + * method is always called by the main thread and must not access + * data of the child thread directly. Implement OnHeartbeat() if you + * want to do something on the child-side. */ virtual void Heartbeat(); - /** - * Overriden from BasicThread. - * + /** Flags that the child process has finished processing. Called from child. */ - virtual void Run(); - virtual void OnStop(); + void Finished(); + + /** Internal heartbeat processing. Called from child. + */ + void HeartbeatInChild(); /** * Regulatly triggered for execution in the child thread. * - * When overriding, one must call the parent class' implementation. - * * network_time: The network_time when the heartbeat was trigger by * the main thread. * * current_time: Wall clock when the heartbeat was trigger by the * main thread. */ - virtual bool DoHeartbeat(double network_time, double current_time); + virtual bool OnHeartbeat(double network_time, double current_time) = 0; /** Triggered for execution in the child thread just before shutting threads down. * The child thread should finish its operations and then *must* * call this class' implementation. */ - virtual bool DoFinish(); + virtual bool OnFinish(double network_time) = 0; + + /** + * Overriden from BasicThread. + * + */ + virtual void Run(); + virtual void OnStop(); private: /** diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log new file mode 100644 index 0000000000..a2610bb522 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log @@ -0,0 +1,12 @@ +PREFIX<>separator | +PREFIX<>set_separator|, +PREFIX<>empty_field|EMPTY +PREFIX<>unset_field|NOT-SET +PREFIX<>path|ssh +PREFIX<>fields|t|id.orig_h|id.orig_p|id.resp_h|id.resp_p|status|country|b +PREFIX<>types|time|addr|port|addr|port|string|string|bool +1342126762.852986|1.2.3.4|1234|2.3.4.5|80|success|unknown|NOT-SET +1342126762.852986|1.2.3.4|1234|2.3.4.5|80|NOT-SET|US|NOT-SET +1342126762.852986|1.2.3.4|1234|2.3.4.5|80|failure|UK|NOT-SET +1342126762.852986|1.2.3.4|1234|2.3.4.5|80|NOT-SET|BR|NOT-SET +1342126762.852986|1.2.3.4|1234|2.3.4.5|80|failure|EMPTY|T diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh.log deleted file mode 100644 index 10275205a5..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh.log +++ /dev/null @@ -1,12 +0,0 @@ -PREFIX<>separator | -PREFIX<>set_separator|, -PREFIX<>empty_field|EMPTY -PREFIX<>unset_field|NOT-SET -PREFIX<>path|ssh -PREFIX<>fields|t|id.orig_h|id.orig_p|id.resp_h|id.resp_p|status|country|b -PREFIX<>types|time|addr|port|addr|port|string|string|bool -1324314313.345323|1.2.3.4|1234|2.3.4.5|80|success|unknown|NOT-SET -1324314313.345323|1.2.3.4|1234|2.3.4.5|80|NOT-SET|US|NOT-SET -1324314313.345323|1.2.3.4|1234|2.3.4.5|80|failure|UK|NOT-SET -1324314313.345323|1.2.3.4|1234|2.3.4.5|80|NOT-SET|BR|NOT-SET -1324314313.345323|1.2.3.4|1234|2.3.4.5|80|failure|EMPTY|T diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log new file mode 100644 index 0000000000..72df0d73d4 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log @@ -0,0 +1,12 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path test +#start 2012-07-12-21-00-27 +#fields data c +#types string count +Test1 42 +\x23Kaputt 42 +Test2 42 +#end 2012-07-12-21-00-27 diff --git a/testing/btest/core/expr-exception.bro b/testing/btest/core/expr-exception.bro index 66f9b78c4b..9e84717935 100644 --- a/testing/btest/core/expr-exception.bro +++ b/testing/btest/core/expr-exception.bro @@ -2,7 +2,7 @@ # shouldn't abort Bro entirely, but just return from the function body. # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT >output -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff reporter.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log # @TEST-EXEC: btest-diff output event connection_established(c: connection) diff --git a/testing/btest/istate/events-ssl.bro b/testing/btest/istate/events-ssl.bro index d87d014a21..afbee3f6d9 100644 --- a/testing/btest/istate/events-ssl.bro +++ b/testing/btest/istate/events-ssl.bro @@ -6,10 +6,13 @@ # # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log -# @TEST-EXEC: cmp sender/http.log receiver/http.log # -# @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log -# @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log +# @TEST-EXEC: cat sender/http.log $SCRIPTS/diff-remove-timestamps >sender.http.log +# @TEST-EXEC: cat receiver/http.log $SCRIPTS/diff-remove-timestamps >receiver.http.log +# @TEST-EXEC: cmp sender.http.log receiver.http.log +# +# @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' | $SCRIPTS/diff-remove-timestamps >events.snd.log +# @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' | $SCRIPTS/diff-remove-timestamps >events.rec.log # @TEST-EXEC: btest-diff events.rec.log # @TEST-EXEC: btest-diff events.snd.log # @TEST-EXEC: cmp events.rec.log events.snd.log diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index fe588b5c3b..1f05dfc729 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -1,12 +1,15 @@ # @TEST-SERIALIZE: comm # -# @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro -# @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro +# @TEST-EXEC: btest-bg-run sender bro -Bthreading,logging,comm -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro +# @TEST-EXEC: btest-bg-run receiver bro -Bthreading,logging,comm ../receiver.bro # @TEST-EXEC: btest-bg-wait -k 20 # # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log -# @TEST-EXEC: cmp sender/http.log receiver/http.log +# +# @TEST-EXEC: cat sender/http.log $SCRIPTS/diff-remove-timestamps >sender.http.log +# @TEST-EXEC: cat receiver/http.log $SCRIPTS/diff-remove-timestamps >receiver.http.log +# @TEST-EXEC: cmp sender.http.log receiver.http.log # # @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log # @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-empty.bro b/testing/btest/scripts/base/frameworks/logging/ascii-empty.bro index 9dace5d52a..0bb5900e30 100644 --- a/testing/btest/scripts/base/frameworks/logging/ascii-empty.bro +++ b/testing/btest/scripts/base/frameworks/logging/ascii-empty.bro @@ -1,12 +1,13 @@ # # @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log +# @TEST-EXEC: cat ssh.log | grep -v PREFIX.*20..- >ssh-filtered.log +# @TEST-EXEC: btest-diff ssh-filtered.log redef LogAscii::output_to_stdout = F; redef LogAscii::separator = "|"; redef LogAscii::empty_field = "EMPTY"; redef LogAscii::unset_field = "NOT-SET"; -redef LogAscii::header_prefix = "PREFIX<>"; +redef LogAscii::meta_prefix = "PREFIX<>"; module SSH; diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.bro b/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.bro new file mode 100644 index 0000000000..4670811b2a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.bro @@ -0,0 +1,23 @@ +# +# @TEST-EXEC: bro -b %INPUT +# @TEST-EXEC: btest-diff test.log + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + data: string &log; + c: count &log &default=42; + }; +} + +event bro_init() +{ + Log::create_stream(Test::LOG, [$columns=Info]); + Log::write(Test::LOG, [$data="Test1"]); + Log::write(Test::LOG, [$data="#Kaputt"]); + Log::write(Test::LOG, [$data="Test2"]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-options.bro b/testing/btest/scripts/base/frameworks/logging/ascii-options.bro index 8c228c1384..474b179536 100644 --- a/testing/btest/scripts/base/frameworks/logging/ascii-options.bro +++ b/testing/btest/scripts/base/frameworks/logging/ascii-options.bro @@ -4,7 +4,7 @@ redef LogAscii::output_to_stdout = F; redef LogAscii::separator = "|"; -redef LogAscii::include_header = F; +redef LogAscii::include_meta = F; module SSH; diff --git a/testing/btest/scripts/base/frameworks/logging/remote-types.bro b/testing/btest/scripts/base/frameworks/logging/remote-types.bro index f1ef4f0c31..3f102e6319 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote-types.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote-types.bro @@ -1,10 +1,12 @@ # @TEST-SERIALIZE: comm # -# @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro -# @TEST-EXEC: btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro +# @TEST-EXEC: btest-bg-run sender bro -B threading,logging --pseudo-realtime %INPUT ../sender.bro +# @TEST-EXEC: btest-bg-run receiver bro -B threading,logging --pseudo-realtime %INPUT ../receiver.bro # @TEST-EXEC: btest-bg-wait -k 10 # @TEST-EXEC: btest-diff receiver/test.log -# @TEST-EXEC: cmp receiver/test.log sender/test.log +# @TEST-EXEC: cat receiver/test.log | egrep -v '#start|#end' >r.log +# @TEST-EXEC: cat sender/test.log | egrep -v '#start|#end' >s.log +# @TEST-EXEC: cmp r.log s.log # Remote version testing all types. diff --git a/testing/btest/scripts/base/frameworks/logging/remote.bro b/testing/btest/scripts/base/frameworks/logging/remote.bro index 8375d7915a..48683148f5 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote.bro @@ -8,9 +8,11 @@ # @TEST-EXEC: btest-diff sender/test.log # @TEST-EXEC: btest-diff sender/test.failure.log # @TEST-EXEC: btest-diff sender/test.success.log -# @TEST-EXEC: cmp receiver/test.log sender/test.log -# @TEST-EXEC: cmp receiver/test.failure.log sender/test.failure.log -# @TEST-EXEC: cmp receiver/test.success.log sender/test.success.log +# @TEST-EXEC: ( cd sender && for i in *.log; do cat $i | $SCRIPTS/diff-remove-timestamps >c.$i; done ) +# @TEST-EXEC: ( cd receiver && for i in *.log; do cat $i | $SCRIPTS/diff-remove-timestamps >c.$i; done ) +# @TEST-EXEC: cmp receiver/c.test.log sender/c.test.log +# @TEST-EXEC: cmp receiver/c.test.failure.log sender/c.test.failure.log +# @TEST-EXEC: cmp receiver/c.test.success.log sender/c.test.success.log # This is the common part loaded by both sender and receiver. module Test; diff --git a/testing/btest/scripts/base/frameworks/notice/default-policy-order.test b/testing/btest/scripts/base/frameworks/notice/default-policy-order.test index 6e53bd3b54..d5d3f4c3fa 100644 --- a/testing/btest/scripts/base/frameworks/notice/default-policy-order.test +++ b/testing/btest/scripts/base/frameworks/notice/default-policy-order.test @@ -1,10 +1,10 @@ # This test checks that the default notice policy ordering does not # change from run to run. # @TEST-EXEC: bro -e '' -# @TEST-EXEC: mv notice_policy.log notice_policy.log.1 +# @TEST-EXEC: cat notice_policy.log | $SCRIPTS/diff-remove-timestamps > notice_policy.log.1 # @TEST-EXEC: bro -e '' -# @TEST-EXEC: mv notice_policy.log notice_policy.log.2 +# @TEST-EXEC: cat notice_policy.log | $SCRIPTS/diff-remove-timestamps > notice_policy.log.2 # @TEST-EXEC: bro -e '' -# @TEST-EXEC: mv notice_policy.log notice_policy.log.3 +# @TEST-EXEC: cat notice_policy.log | $SCRIPTS/diff-remove-timestamps > notice_policy.log.3 # @TEST-EXEC: diff notice_policy.log.1 notice_policy.log.2 # @TEST-EXEC: diff notice_policy.log.1 notice_policy.log.3 diff --git a/testing/scripts/diff-remove-timestamps b/testing/scripts/diff-remove-timestamps index 063f1e4900..2b029789de 100755 --- a/testing/scripts/diff-remove-timestamps +++ b/testing/scripts/diff-remove-timestamps @@ -1,5 +1,8 @@ #! /usr/bin/env bash # -# Replace anything which looks like timestamps with XXXs. +# Replace anything which looks like timestamps with XXXs (including the #start/end markers in logs). + +sed 's/[0-9]\{10\}\.[0-9]\{2,8\}/XXXXXXXXXX.XXXXXX/g' | \ +sed 's/^#\(start\|end\).20..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' | \ +grep -v '#start' | grep -v '#end' -sed 's/[0-9]\{10\}\.[0-9]\{2,8\}/XXXXXXXXXX.XXXXXX/g' From 1ca0d970fc8c3972511067cfbdf9314a6c35d0eb Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Jul 2012 13:39:19 -0700 Subject: [PATCH 487/651] Removing the thread kill functionality. Wasn't really used and has potential for trouble. --- src/main.cc | 6 ------ src/threading/Manager.cc | 8 -------- src/threading/Manager.h | 9 --------- 3 files changed, 23 deletions(-) diff --git a/src/main.cc b/src/main.cc index d94a32df63..d3937b3449 100644 --- a/src/main.cc +++ b/src/main.cc @@ -361,12 +361,6 @@ RETSIGTYPE sig_handler(int signo) set_processing_status("TERMINATING", "sig_handler"); signal_val = signo; - if ( thread_mgr->Terminating() && (signal_val == SIGTERM || signal_val == SIGINT) ) - // If the thread manager is already terminating (i.e., - // waiting for child threads to exit), another term signal - // will send the threads a kill. - thread_mgr->KillThreads(); - return RETSIGVAL; } diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index d8f3936037..8e0610a056 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -48,14 +48,6 @@ void Manager::Terminate() terminating = false; } -void Manager::KillThreads() - { - DBG_LOG(DBG_THREADING, "Killing threads ..."); - - for ( all_thread_list::iterator i = all_threads.begin(); i != all_threads.end(); i++ ) - (*i)->Kill(); - } - void Manager::AddThread(BasicThread* thread) { DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name().c_str()); diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 1afd115da0..1c7914fcde 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -49,15 +49,6 @@ public: */ bool Terminating() const { return terminating; } - /** - * Immediately kills all child threads. It does however not yet join - * them, one still needs to call Terminate() for that. - * - * This method is safe to call from a signal handler, and can in fact - * be called while Terminate() is already in progress. - */ - void KillThreads(); - typedef std::list > msg_stats_list; /** From c8789cff94c5200674ad08199a1f800882aabf72 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 16 Jul 2012 13:40:19 -0700 Subject: [PATCH 488/651] If a thread doesn't terminate, we log that but not longer proceed (because it could hang later still). Also logging to stderr as well to make sure one sees it. Also adding code to the ASCII writer to catch termination inconsistencies. --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- cmake | 2 +- src/logging/writers/Ascii.cc | 14 ++++++++++++++ src/logging/writers/Ascii.h | 1 + src/threading/MsgThread.cc | 8 +++----- 8 files changed, 23 insertions(+), 10 deletions(-) diff --git a/aux/binpac b/aux/binpac index 4ad8d15b63..b4094cb75e 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 4ad8d15b6395925c9875c9d2912a6cc3b4918e0a +Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 diff --git a/aux/bro-aux b/aux/bro-aux index c691c01e9c..2038e3de04 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit c691c01e9cefae5a79bcd4b0f84ca387c8c587a7 +Subproject commit 2038e3de042115c3caa706426e16c830c1fd1e9e diff --git a/aux/broccoli b/aux/broccoli index 8234b8903c..07866915a1 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 8234b8903cbc775f341bdb6a1c0159981d88d27b +Subproject commit 07866915a1450ddd25b888917f494b4824b0cc3f diff --git a/aux/broctl b/aux/broctl index d5ecd1a42c..892b60edb9 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit d5ecd1a42c04b0dca332edc31811e5a6d0f7f2fb +Subproject commit 892b60edb967bb456872638f22ba994e84530137 diff --git a/cmake b/cmake index 2a72c5e08e..96f3d92aca 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 2a72c5e08e018cf632033af3920432d5f684e130 +Subproject commit 96f3d92acadbe1ae64f410e974c5ff503903394b diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index ab68cd77d8..a0d4504d64 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -16,6 +16,7 @@ using threading::Field; Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) { file = 0; + ascii_done = false; output_to_stdout = BifConst::LogAscii::output_to_stdout; include_meta = BifConst::LogAscii::include_meta; @@ -51,6 +52,12 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) Ascii::~Ascii() { + if ( ! ascii_done ) + { + fprintf(stderr, "missing finish message\n"); + abort(); + } + // Normally, the file will be closed here already via the Finish() // message. But when we terminate abnormally, we may still have it // open. @@ -156,6 +163,13 @@ bool Ascii::DoFlush(double network_time) bool Ascii::DoFinish(double network_time) { + if ( ascii_done ) + { + fprintf(stderr, "duplicate finish message\n"); + abort(); + } + + ascii_done = true; CloseFile(network_time); return true; } diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index 857954ce37..c2cd33f203 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -40,6 +40,7 @@ private: FILE* file; string fname; ODesc desc; + bool ascii_done; // Options set from the script-level. bool output_to_stdout; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 81ef123661..e4cda1e84d 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -166,17 +166,15 @@ void MsgThread::OnStop() int cnt = 0; while ( ! finished ) { - if ( ++cnt > 1000 ) // Insurance against broken threads ... + if ( ++cnt % 2000 == 0 ) // Insurance against broken threads ... { - reporter->Warning("thread %s didn't finish in time", Name().c_str()); - break; + reporter->Warning("thread %s has not yet terminated ...", Name().c_str()); + fprintf(stderr, "warning: thread %s has not yet terminated ...", Name().c_str()); } usleep(1000); } - Finished(); - // One more message to make sure the current queue read operation unblocks. SendIn(new UnblockMessage(this), true); } From f7a6407ab1213d95f074e47c39061f541f630944 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 12 Jul 2012 13:44:24 -0700 Subject: [PATCH 489/651] Reworking thread termination logic. Turns out the finish methods weren't called correctly, caused by a mess up with method names which all sounded too similar and the wrong one ended up being called. I've reworked this by changing the thread/writer/reader interfaces, which actually also simplifies them by getting rid of the requirement for writer backends to call their parent methods (i.e., less opportunity for errors). This commit also includes the following (because I noticed the problem above when working on some of these): - The ASCII log writer now includes "#start " and "#end lines in the each file. The latter supersedes Bernhard's "EOF" patch. This required a number of tests updates. The standard canonifier removes the timestamps, but some tests compare files directly, which doesn't work if they aren't printing out the same timestamps (like the comm tests). - The above required yet another change to the writer API to network_time to methods. - Renamed ASCII logger "header" options to "meta". - Fixes #763 "Escape # when first character in log file line". All btests pass for me on Linux FC15. Will try MacOS next. --- src/logging/writers/Ascii.cc | 7 +------ src/threading/MsgThread.cc | 2 ++ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index a0d4504d64..3bc4ef4b38 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -52,12 +52,6 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) Ascii::~Ascii() { - if ( ! ascii_done ) - { - fprintf(stderr, "missing finish message\n"); - abort(); - } - // Normally, the file will be closed here already via the Finish() // message. But when we terminate abnormally, we may still have it // open. @@ -170,6 +164,7 @@ bool Ascii::DoFinish(double network_time) } ascii_done = true; + CloseFile(network_time); return true; } diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index e4cda1e84d..45fbf6afa5 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -175,6 +175,8 @@ void MsgThread::OnStop() usleep(1000); } + Finished(); + // One more message to make sure the current queue read operation unblocks. SendIn(new UnblockMessage(this), true); } From f6b883bafc71840e146768b966d37a9229559c18 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 17 Jul 2012 17:09:49 -0700 Subject: [PATCH 490/651] Further reworking the thread API. --- src/logging/WriterFrontend.cc | 3 --- src/logging/WriterFrontend.h | 2 +- src/threading/MsgThread.cc | 13 +++++-------- src/threading/MsgThread.h | 1 + src/util.cc | 22 ++++++++++++++++++++++ src/util.h | 6 ++++++ testing/scripts/diff-canonifier | 2 +- 7 files changed, 36 insertions(+), 13 deletions(-) diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 577003926b..b816327e9c 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -141,9 +141,6 @@ void WriterFrontend::Stop() { FlushWriteBuffer(); SetDisable(); - - if ( backend ) - backend->Stop(); } void WriterFrontend::Init(const WriterBackend::WriterInfo& arg_info, int arg_num_fields, const Field* const * arg_fields) diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index 6581fb1c1b..e8f3d06d6c 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -50,7 +50,7 @@ public: /** * Stops all output to this writer. Calling this methods disables all - * message forwarding to the backend and stops the backend thread. + * message forwarding to the backend. * * This method must only be called from the main thread. */ diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 45fbf6afa5..f101d0ca3c 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -152,12 +152,13 @@ MsgThread::MsgThread() : BasicThread() { cnt_sent_in = cnt_sent_out = 0; finished = false; + stopped = false; thread_mgr->AddMsgThread(this); } void MsgThread::OnStop() { - if ( finished ) + if ( stopped ) return; // Signal thread to terminate and wait until it has acknowledged. @@ -303,13 +304,8 @@ BasicInputMessage* MsgThread::RetrieveIn() void MsgThread::Run() { - while ( true ) + while ( ! finished ) { - // When requested to terminate, we only do so when - // all input has been processed. - if ( Terminating() && ! queue_in.Ready() ) - break; - BasicInputMessage* msg = RetrieveIn(); bool result = msg->Process(); @@ -318,12 +314,13 @@ void MsgThread::Run() { string s = msg->Name() + " failed, terminating thread (MsgThread)"; Error(s.c_str()); - Stop(); break; } delete msg; } + + Finished(); } void MsgThread::GetStats(Stats* stats) diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 67ab9517c5..d929c1f806 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -293,6 +293,7 @@ private: uint64_t cnt_sent_out; // Counts message sent by child. bool finished; // Set to true by Finished message. + bool stopped; // Set to true by OnStop(). }; /** diff --git a/src/util.cc b/src/util.cc index 3cfa5fca1c..b7a4683597 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1290,6 +1290,28 @@ uint64 calculate_unique_id(size_t pool) return HashKey::HashBytes(&(uid_pool[pool].key), sizeof(uid_pool[pool].key)); } +bool safe_write(int fd, const char* data, int len) + { + return true; + while ( len > 0 ) + { + int n = write(fd, data, len); + + if ( n < 0 ) + { + if ( errno == EINTR ) + continue; + + return false; + } + + data += n; + len -= n; + } + + return true; + } + void out_of_memory(const char* where) { reporter->FatalError("out of memory in %s.\n", where); diff --git a/src/util.h b/src/util.h index e4c995f45f..075c2af7c2 100644 --- a/src/util.h +++ b/src/util.h @@ -289,6 +289,11 @@ inline size_t pad_size(size_t size) #define padded_sizeof(x) (pad_size(sizeof(x))) +// Like write() but handles interrupted system calls by restarting. Returns +// true if the write was successful, otherwise sets errno. This function is +// thread-safe as long as no two threads write to the same descriptor. +extern bool safe_write(int fd, const char* data, int len); + extern void out_of_memory(const char* where); inline void* safe_realloc(void* ptr, size_t size) @@ -338,4 +343,5 @@ inline int safe_vsnprintf(char* str, size_t size, const char* format, va_list al // handed out by malloc. extern void get_memory_usage(unsigned int* total, unsigned int* malloced); + #endif diff --git a/testing/scripts/diff-canonifier b/testing/scripts/diff-canonifier index 3cb213a3f7..4d04b3372c 100755 --- a/testing/scripts/diff-canonifier +++ b/testing/scripts/diff-canonifier @@ -2,4 +2,4 @@ # # Default canonifier used with the tests in testing/btest/*. -`dirname $0`/diff-remove-timestamps +`dirname $0`/diff-remove-timestamps | grep -v XXX From e90918aa509c6c44078707d147144e62dc4bc4d4 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 17 Jul 2012 19:02:36 -0700 Subject: [PATCH 491/651] Moving the ASCII writer over to use UNIX I/O rather than stdio. --- src/logging/writers/Ascii.cc | 40 +++++++++++++++++++----------------- src/logging/writers/Ascii.h | 2 +- src/threading/BasicThread.cc | 5 ++--- src/util.cc | 1 - 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 3bc4ef4b38..c1f307fb4e 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -15,7 +15,7 @@ using threading::Field; Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) { - file = 0; + fd = 0; ascii_done = false; output_to_stdout = BifConst::LogAscii::output_to_stdout; @@ -53,9 +53,8 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) Ascii::~Ascii() { // Normally, the file will be closed here already via the Finish() - // message. But when we terminate abnormally, we may still have it - // open. - if ( file ) + // message. But when we terminate abnormally, we may still have it open. + if ( fd ) CloseFile(0); delete [] separator; @@ -70,23 +69,25 @@ bool Ascii::WriteHeaderField(const string& key, const string& val) string str = string(meta_prefix, meta_prefix_len) + key + string(separator, separator_len) + val + "\n"; - return (fwrite(str.c_str(), str.length(), 1, file) == 1); + return safe_write(fd, str.c_str(), str.length()); } void Ascii::CloseFile(double t) { - if ( ! file ) + if ( ! fd) return; if ( include_meta ) WriteHeaderField("end", t ? Timestamp(t) : ""); - fclose(file); - file = 0; + close(fd); + fd = 0; } bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * fields) { + assert(! fd); + string path = info.path; if ( output_to_stdout ) @@ -94,11 +95,13 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * fname = IsSpecial(path) ? path : path + "." + LogExt(); - if ( ! (file = fopen(fname.c_str(), "w")) ) + fd = open(fname.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0777); + + if ( fd < 0 ) { Error(Fmt("cannot open %s: %s", fname.c_str(), Strerror(errno))); - + fd = 0; return false; } @@ -112,7 +115,7 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * + get_escaped_string(string(separator, separator_len), false) + "\n"; - if( fwrite(str.c_str(), str.length(), 1, file) != 1 ) + if ( ! safe_write(fd, str.c_str(), str.length()) ) goto write_error; if ( ! (WriteHeaderField("set_separator", get_escaped_string( @@ -151,7 +154,7 @@ write_error: bool Ascii::DoFlush(double network_time) { - fflush(file); + fsync(fd); return true; } @@ -318,7 +321,7 @@ bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) bool Ascii::DoWrite(int num_fields, const Field* const * fields, Value** vals) { - if ( ! file ) + if ( ! fd ) DoInit(Info(), NumFields(), Fields()); desc.Clear(); @@ -337,24 +340,23 @@ bool Ascii::DoWrite(int num_fields, const Field* const * fields, const char* bytes = (const char*)desc.Bytes(); int len = desc.Len(); - // Make sure the line doesn't look like meta information. if ( strncmp(bytes, meta_prefix, meta_prefix_len) == 0 ) { // It would so escape the first character. char buf[16]; snprintf(buf, sizeof(buf), "\\x%02x", bytes[0]); - if ( fwrite(buf, strlen(buf), 1, file) != 1 ) + if ( ! safe_write(fd, buf, strlen(buf)) ) goto write_error; ++bytes; --len; } - if ( fwrite(bytes, len, 1, file) != 1 ) + if ( ! safe_write(fd, bytes, len) ) goto write_error; - if ( IsBuf() ) - fflush(file); + if ( IsBuf() ) + fsync(fd); return true; @@ -366,7 +368,7 @@ write_error: bool Ascii::DoRotate(string rotated_path, double open, double close, bool terminating) { // Don't rotate special files or if there's not one currently open. - if ( ! file || IsSpecial(Info().path) ) + if ( ! fd || IsSpecial(Info().path) ) return true; CloseFile(close); diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index c2cd33f203..371ded4344 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -37,7 +37,7 @@ private: void CloseFile(double t); string Timestamp(double t); - FILE* file; + int fd; string fname; ODesc desc; bool ascii_done; diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 88c4ac0965..075581e9db 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -87,18 +87,17 @@ const char* BasicThread::Strerror(int err) void BasicThread::Start() { - if ( started ) return; + started = true; + int err = pthread_create(&pthread, 0, BasicThread::launcher, this); if ( err != 0 ) reporter->FatalError("Cannot create thread %s:%s", name.c_str(), Strerror(err)); DBG_LOG(DBG_THREADING, "Started thread %s", name.c_str()); - started = true; - OnStart(); } diff --git a/src/util.cc b/src/util.cc index b7a4683597..553944c69c 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1292,7 +1292,6 @@ uint64 calculate_unique_id(size_t pool) bool safe_write(int fd, const char* data, int len) { - return true; while ( len > 0 ) { int n = write(fd, data, len); From 490859cfeff6b8747a09e31122ec0afc60e318d0 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 17 Jul 2012 19:36:30 -0700 Subject: [PATCH 492/651] Reworking forceful thread termination. Ctrl-C now kills a thread even if it hangs at termination. And readded a (rather long) timeout to kill threads automatically that don't shutdown. --- src/logging/WriterBackend.cc | 4 +--- src/logging/writers/Ascii.cc | 1 + src/threading/BasicThread.cc | 10 +++++----- src/threading/Manager.cc | 8 ++++++++ src/threading/Manager.h | 7 +++++++ src/threading/MsgThread.cc | 32 ++++++++++++++++++++++++++++---- 6 files changed, 50 insertions(+), 12 deletions(-) diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 68b0b506a1..a284c56201 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -162,9 +162,7 @@ bool WriterBackend::Init(const WriterInfo& arg_info, int arg_num_fields, const F num_fields = arg_num_fields; fields = arg_fields; - string name = Fmt("%s/%s", info.path.c_str(), frontend_name.c_str()); - - SetName(name); + SetName(frontend->Name()); if ( ! DoInit(arg_info, arg_num_fields, arg_fields) ) { diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index c1f307fb4e..20963d1535 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -169,6 +169,7 @@ bool Ascii::DoFinish(double network_time) ascii_done = true; CloseFile(network_time); + return true; } diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 075581e9db..e7fb3f4c84 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -125,7 +125,7 @@ void BasicThread::Join() DBG_LOG(DBG_THREADING, "Joining thread %s ...", name.c_str()); - if ( pthread_join(pthread, 0) != 0 ) + if ( pthread && pthread_join(pthread, 0) != 0 ) reporter->FatalError("Failure joining thread %s", name.c_str()); DBG_LOG(DBG_THREADING, "Done with thread %s", name.c_str()); @@ -135,13 +135,13 @@ void BasicThread::Join() void BasicThread::Kill() { + terminating = true; + if ( ! (started && pthread) ) return; - // I believe this is safe to call from a signal handler ... Not error - // checking so that killing doesn't bail out if we have already - // terminated. - pthread_kill(pthread, SIGKILL); + pthread = 0; + pthread_kill(pthread, SIGTERM); } void* BasicThread::launcher(void *arg) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 8e0610a056..f1f9307b03 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -83,6 +83,14 @@ double Manager::NextTimestamp(double* network_time) return -1.0; } +void Manager::KillThreads() + { + DBG_LOG(DBG_THREADING, "Killing threads ..."); + + for ( all_thread_list::iterator i = all_threads.begin(); i != all_threads.end(); i++ ) + (*i)->Kill(); + } + void Manager::Process() { bool do_beat = false; diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 1c7914fcde..be81c69ba0 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -106,6 +106,13 @@ protected: */ virtual double NextTimestamp(double* network_time); + /** + * Kills all thread immediately. Note that this may cause race conditions + * if a child thread currently holds a lock that might block somebody + * else. + */ + virtual void KillThreads(); + /** * Part of the IOSource interface. */ diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index f101d0ca3c..3913624654 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -156,6 +156,9 @@ MsgThread::MsgThread() : BasicThread() thread_mgr->AddMsgThread(this); } +// Set by Bro's main signal handler. +extern int signal_val; + void MsgThread::OnStop() { if ( stopped ) @@ -164,13 +167,31 @@ void MsgThread::OnStop() // Signal thread to terminate and wait until it has acknowledged. SendIn(new FinishMessage(this, network_time), true); + int old_signal_val = signal_val; + signal_val = 0; + int cnt = 0; + bool aborted = 0; + while ( ! finished ) { - if ( ++cnt % 2000 == 0 ) // Insurance against broken threads ... + // Terminate if we get another kill signal. + if ( signal_val == SIGTERM || signal_val == SIGINT ) { - reporter->Warning("thread %s has not yet terminated ...", Name().c_str()); - fprintf(stderr, "warning: thread %s has not yet terminated ...", Name().c_str()); + // Abort all threads here so that we won't hang next + // on another one. + fprintf(stderr, "received signal while waiting for thread %s, aborting all ...\n", Name().c_str()); + thread_mgr->KillThreads(); + aborted = true; + break; + } + + if ( ++cnt % 10000 == 0 ) // Insurance against broken threads ... + { + fprintf(stderr, "killing thread %s ...\n", Name().c_str()); + Kill(); + aborted = true; + break; } usleep(1000); @@ -178,8 +199,11 @@ void MsgThread::OnStop() Finished(); + signal_val = old_signal_val; + // One more message to make sure the current queue read operation unblocks. - SendIn(new UnblockMessage(this), true); + if ( ! aborted ) + SendIn(new UnblockMessage(this), true); } void MsgThread::Heartbeat() From 87e10b5f97a897f8c5fac2f983379a8c8966dcae Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 18 Jul 2012 12:47:13 -0700 Subject: [PATCH 493/651] Further threading and API restructuring for logging and input frameworks. There were a number of cases that weren't thread-safe. In particular, we don't use std::string anymore for anything that's passed between threads (but instead plain old const char*, with manual memmory managmenet). This is still a check-point commit, I'll do more testing. --- src/RemoteSerializer.cc | 4 +- src/input/Manager.cc | 68 ++++++------ src/input/ReaderBackend.cc | 21 ++-- src/input/ReaderBackend.h | 49 +++++++-- src/input/ReaderFrontend.cc | 30 +++--- src/input/ReaderFrontend.h | 27 ++--- src/input/readers/Ascii.cc | 23 +++-- src/input/readers/Benchmark.cc | 8 +- src/input/readers/Raw.cc | 10 +- src/logging/Manager.cc | 89 +++++++++------- src/logging/Manager.h | 6 +- src/logging/WriterBackend.cc | 40 +++++--- src/logging/WriterBackend.h | 51 +++++++-- src/logging/WriterFrontend.cc | 46 ++++----- src/logging/WriterFrontend.h | 18 ++-- src/logging/writers/Ascii.cc | 45 ++++---- src/logging/writers/Ascii.h | 2 +- src/logging/writers/DataSeries.cc | 15 +-- src/logging/writers/DataSeries.h | 2 +- src/logging/writers/None.cc | 21 +++- src/logging/writers/None.h | 2 +- src/threading/BasicThread.cc | 95 ++++++++++++----- src/threading/BasicThread.h | 55 ++++++++-- src/threading/Manager.cc | 19 +++- src/threading/MsgThread.cc | 137 +++++++++++++------------ src/threading/MsgThread.h | 18 ++-- src/threading/Queue.h | 82 ++++++++++++--- src/threading/SerialTypes.cc | 48 +++++++-- src/threading/SerialTypes.h | 34 ++++-- testing/btest/istate/events.bro | 4 +- testing/scripts/diff-remove-timestamps | 4 +- 31 files changed, 692 insertions(+), 381 deletions(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 9409a34634..7ed8b9318e 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -2692,12 +2692,12 @@ bool RemoteSerializer::ProcessLogCreateWriter() int id, writer; int num_fields; - logging::WriterBackend::WriterInfo info; + logging::WriterBackend::WriterInfo* info = new logging::WriterBackend::WriterInfo(); bool success = fmt.Read(&id, "id") && fmt.Read(&writer, "writer") && fmt.Read(&num_fields, "num_fields") && - info.Read(&fmt); + info->Read(&fmt); if ( ! success ) goto error; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 1c6b69e8ec..f38613a6f8 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -71,7 +71,7 @@ declare(PDict, InputHash); class Manager::Stream { public: string name; - ReaderBackend::ReaderInfo info; + ReaderBackend::ReaderInfo* info; bool removed; StreamType stream_type; // to distinguish between event and table streams @@ -257,7 +257,6 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) assert(ir->factory); - frontend->SetTypeName(ir->name); ReaderBackend* backend = (*ir->factory)(frontend); assert(backend); @@ -291,9 +290,6 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) EnumVal* reader = description->LookupWithDefault(rtype->FieldOffset("reader"))->AsEnumVal(); - ReaderFrontend* reader_obj = new ReaderFrontend(reader->InternalInt()); - assert(reader_obj); - // get the source ... Val* sourceval = description->LookupWithDefault(rtype->FieldOffset("source")); assert ( sourceval != 0 ); @@ -301,21 +297,22 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) string source((const char*) bsource->Bytes(), bsource->Len()); Unref(sourceval); - EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); - Val* config = description->LookupWithDefault(rtype->FieldOffset("config")); + ReaderBackend::ReaderInfo* rinfo = new ReaderBackend::ReaderInfo(); + rinfo->source = copy_string(source.c_str()); + EnumVal* mode = description->LookupWithDefault(rtype->FieldOffset("mode"))->AsEnumVal(); switch ( mode->InternalInt() ) { case 0: - info->info.mode = MODE_MANUAL; + rinfo->mode = MODE_MANUAL; break; case 1: - info->info.mode = MODE_REREAD; + rinfo->mode = MODE_REREAD; break; case 2: - info->info.mode = MODE_STREAM; + rinfo->mode = MODE_STREAM; break; default: @@ -324,12 +321,16 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) Unref(mode); + Val* config = description->LookupWithDefault(rtype->FieldOffset("config")); + + ReaderFrontend* reader_obj = new ReaderFrontend(*rinfo, reader); + assert(reader_obj); + info->reader = reader_obj; info->type = reader->AsEnumVal(); // ref'd by lookupwithdefault info->name = name; info->config = config->AsTableVal(); // ref'd by LookupWithDefault - - info->info.source = source; + info->info = rinfo; Ref(description); info->description = description; @@ -344,7 +345,7 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) ListVal* index = info->config->RecoverIndex(k); string key = index->Index(0)->AsString()->CheckString(); string value = v->Value()->AsString()->CheckString(); - info->info.config.insert(std::make_pair(key, value)); + info->info->config.insert(std::make_pair(copy_string(key.c_str()), copy_string(value.c_str()))); Unref(index); delete k; } @@ -475,7 +476,7 @@ bool Manager::CreateEventStream(RecordVal* fval) assert(stream->reader); - stream->reader->Init(stream->info, stream->num_fields, logf ); + stream->reader->Init(stream->num_fields, logf ); readers[stream->reader] = stream; @@ -652,7 +653,7 @@ bool Manager::CreateTableStream(RecordVal* fval) assert(stream->reader); - stream->reader->Init(stream->info, fieldsV.size(), fields ); + stream->reader->Init(fieldsV.size(), fields ); readers[stream->reader] = stream; @@ -791,17 +792,19 @@ bool Manager::UnrollRecordType(vector *fields, else { - Field* field = new Field(); - field->name = nameprepend + rec->FieldName(i); - field->type = rec->FieldType(i)->Tag(); + string name = nameprepend + rec->FieldName(i); + const char* secondary = 0; + TypeTag ty = rec->FieldType(i)->Tag(); + TypeTag st = TYPE_VOID; + bool optional = false; - if ( field->type == TYPE_TABLE ) - field->subtype = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); + if ( ty == TYPE_TABLE ) + st = rec->FieldType(i)->AsSetType()->Indices()->PureType()->Tag(); - else if ( field->type == TYPE_VECTOR ) - field->subtype = rec->FieldType(i)->AsVectorType()->YieldType()->Tag(); + else if ( ty == TYPE_VECTOR ) + st = rec->FieldType(i)->AsVectorType()->YieldType()->Tag(); - else if ( field->type == TYPE_PORT && + else if ( ty == TYPE_PORT && rec->FieldDecl(i)->FindAttr(ATTR_TYPE_COLUMN) ) { // we have an annotation for the second column @@ -811,12 +814,13 @@ bool Manager::UnrollRecordType(vector *fields, assert(c); assert(c->Type()->Tag() == TYPE_STRING); - field->secondary_name = c->AsStringVal()->AsString()->CheckString(); + secondary = c->AsStringVal()->AsString()->CheckString(); } if ( rec->FieldDecl(i)->FindAttr(ATTR_OPTIONAL ) ) - field->optional = true; + optional = true; + Field* field = new Field(name.c_str(), secondary, ty, st, optional); fields->push_back(field); } } @@ -1230,7 +1234,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) #endif // Send event that the current update is indeed finished. - SendEvent(update_finished, 2, new StringVal(i->name.c_str()), new StringVal(i->info.source.c_str())); + SendEvent(update_finished, 2, new StringVal(i->name.c_str()), new StringVal(i->info->source)); } void Manager::Put(ReaderFrontend* reader, Value* *vals) @@ -1707,7 +1711,7 @@ int Manager::GetValueLength(const Value* val) { case TYPE_STRING: case TYPE_ENUM: { - length += val->val.string_val->size(); + length += val->val.string_val.length; break; } @@ -1806,8 +1810,8 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) case TYPE_STRING: case TYPE_ENUM: { - memcpy(data+startpos, val->val.string_val->c_str(), val->val.string_val->length()); - return val->val.string_val->size(); + memcpy(data+startpos, val->val.string_val.data, val->val.string_val.length); + return val->val.string_val.length; } case TYPE_ADDR: @@ -1955,7 +1959,7 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) case TYPE_STRING: { - BroString *s = new BroString(*(val->val.string_val)); + BroString *s = new BroString((const u_char*)val->val.string_val.data, val->val.string_val.length, 0); return new StringVal(s); } @@ -2039,8 +2043,8 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) case TYPE_ENUM: { // well, this is kind of stupid, because EnumType just mangles the module name and the var name together again... // but well - string module = extract_module_name(val->val.string_val->c_str()); - string var = extract_var_name(val->val.string_val->c_str()); + string module = extract_module_name(val->val.string_val.data); + string var = extract_var_name(val->val.string_val.data); bro_int_t index = request_type->AsEnumType()->Lookup(module, var.c_str()); if ( index == -1 ) reporter->InternalError("Value not found in enum mappimg. Module: %s, var: %s", diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 84106a3c94..88a78c3cd7 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -56,22 +56,24 @@ private: class SendEventMessage : public threading::OutputMessage { public: - SendEventMessage(ReaderFrontend* reader, const string& name, const int num_vals, Value* *val) + SendEventMessage(ReaderFrontend* reader, const char* name, const int num_vals, Value* *val) : threading::OutputMessage("SendEvent", reader), - name(name), num_vals(num_vals), val(val) {} + name(copy_string(name)), num_vals(num_vals), val(val) {} + + virtual ~SendEventMessage() { delete [] name; } virtual bool Process() { bool success = input_mgr->SendEvent(name, num_vals, val); if ( ! success ) - reporter->Error("SendEvent for event %s failed", name.c_str()); + reporter->Error("SendEvent for event %s failed", name); return true; // We do not want to die if sendEvent fails because the event did not return. } private: - const string name; + const char* name; const int num_vals; Value* *val; }; @@ -146,12 +148,14 @@ ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { disabled = true; // disabled will be set correcty in init. frontend = arg_frontend; + info = new ReaderInfo(frontend->Info()); SetName(frontend->Name()); } ReaderBackend::~ReaderBackend() { + delete info; } void ReaderBackend::Put(Value* *val) @@ -169,7 +173,7 @@ void ReaderBackend::Clear() SendOut(new ClearMessage(frontend)); } -void ReaderBackend::SendEvent(const string& name, const int num_vals, Value* *vals) +void ReaderBackend::SendEvent(const char* name, const int num_vals, Value* *vals) { SendOut(new SendEventMessage(frontend, name, num_vals, vals)); } @@ -184,17 +188,14 @@ void ReaderBackend::SendEntry(Value* *vals) SendOut(new SendEntryMessage(frontend, vals)); } -bool ReaderBackend::Init(const ReaderInfo& arg_info, const int arg_num_fields, +bool ReaderBackend::Init(const int arg_num_fields, const threading::Field* const* arg_fields) { - info = arg_info; num_fields = arg_num_fields; fields = arg_fields; - SetName("InputReader/"+info.source); - // disable if DoInit returns error. - int success = DoInit(arg_info, arg_num_fields, arg_fields); + int success = DoInit(*info, arg_num_fields, arg_fields); if ( ! success ) { diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 1e77a61f37..7626cc25ed 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -34,7 +34,10 @@ enum ReaderMode { * for new appended data. When new data is appended is has to be sent * using the Put api functions. */ - MODE_STREAM + MODE_STREAM, + + /** Internal dummy mode for initialization. */ + MODE_NONE }; class ReaderFrontend; @@ -70,14 +73,17 @@ public: */ struct ReaderInfo { - typedef std::map config_map; + // Structure takes ownership of the strings. + typedef std::map config_map; /** * A string left to the interpretation of the reader * implementation; it corresponds to the value configured on * the script-level for the logging filter. + * + * Structure takes ownership of the string. */ - string source; + const char* source; /** * A map of key/value pairs corresponding to the relevant @@ -89,6 +95,35 @@ public: * The opening mode for the input source. */ ReaderMode mode; + + ReaderInfo() + { + source = 0; + mode = MODE_NONE; + } + + ReaderInfo(const ReaderInfo& other) + { + source = other.source ? copy_string(other.source) : 0; + mode = other.mode; + + for ( config_map::const_iterator i = other.config.begin(); i != other.config.end(); i++ ) + config.insert(std::make_pair(copy_string(i->first), copy_string(i->second))); + } + + ~ReaderInfo() + { + delete [] source; + + for ( config_map::iterator i = config.begin(); i != config.end(); i++ ) + { + delete [] i->first; + delete [] i->second; + } + } + + private: + const ReaderInfo& operator=(const ReaderInfo& other); // Disable. }; /** @@ -106,7 +141,7 @@ public: * * @return False if an error occured. */ - bool Init(const ReaderInfo& info, int num_fields, const threading::Field* const* fields); + bool Init(int num_fields, const threading::Field* const* fields); /** * Force trigger an update of the input stream. The action that will @@ -133,7 +168,7 @@ public: /** * Returns the additional reader information into the constructor. */ - const ReaderInfo& Info() const { return info; } + const ReaderInfo& Info() const { return *info; } /** * Returns the number of log fields as passed into the constructor. @@ -209,7 +244,7 @@ protected: * * @param vals the values to be given to the event */ - void SendEvent(const string& name, const int num_vals, threading::Value* *vals); + void SendEvent(const char* name, const int num_vals, threading::Value* *vals); // Content-sending-functions (simple mode). Include table-specific // functionality that simply is not used if we have no table. @@ -291,7 +326,7 @@ private: // from this class, it's running in a different thread! ReaderFrontend* frontend; - ReaderInfo info; + ReaderInfo* info; unsigned int num_fields; const threading::Field* const * fields; // raw mapping diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index 7e4ef201b1..a8528c002d 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -11,18 +11,17 @@ namespace input { class InitMessage : public threading::InputMessage { public: - InitMessage(ReaderBackend* backend, const ReaderBackend::ReaderInfo& info, + InitMessage(ReaderBackend* backend, const int num_fields, const threading::Field* const* fields) : threading::InputMessage("Init", backend), - info(info), num_fields(num_fields), fields(fields) { } + num_fields(num_fields), fields(fields) { } virtual bool Process() { - return Object()->Init(info, num_fields, fields); + return Object()->Init(num_fields, fields); } private: - const ReaderBackend::ReaderInfo info; const int num_fields; const threading::Field* const* fields; }; @@ -37,21 +36,26 @@ public: virtual bool Process() { return Object()->Update(); } }; -ReaderFrontend::ReaderFrontend(bro_int_t type) +ReaderFrontend::ReaderFrontend(const ReaderBackend::ReaderInfo& arg_info, EnumVal* type) { disabled = initialized = false; - ty_name = ""; - backend = input_mgr->CreateBackend(this, type); + info = new ReaderBackend::ReaderInfo(arg_info); + const char* t = type->Type()->AsEnumType()->Lookup(type->InternalInt()); + name = copy_string(fmt("%s/%s", arg_info.source, t)); + + backend = input_mgr->CreateBackend(this, type->InternalInt()); assert(backend); backend->Start(); } ReaderFrontend::~ReaderFrontend() { + delete [] name; + delete info; } -void ReaderFrontend::Init(const ReaderBackend::ReaderInfo& arg_info, const int arg_num_fields, +void ReaderFrontend::Init(const int arg_num_fields, const threading::Field* const* arg_fields) { if ( disabled ) @@ -60,12 +64,11 @@ void ReaderFrontend::Init(const ReaderBackend::ReaderInfo& arg_info, const int a if ( initialized ) reporter->InternalError("reader initialize twice"); - info = arg_info; num_fields = arg_num_fields; fields = arg_fields; initialized = true; - backend->SendIn(new InitMessage(backend, info, num_fields, fields)); + backend->SendIn(new InitMessage(backend, num_fields, fields)); } void ReaderFrontend::Update() @@ -82,12 +85,9 @@ void ReaderFrontend::Update() backend->SendIn(new UpdateMessage(backend)); } -string ReaderFrontend::Name() const +const char* ReaderFrontend::Name() const { - if ( ! info.source.size() ) - return ty_name; - - return ty_name + "/" + info.source; + return name; } } diff --git a/src/input/ReaderFrontend.h b/src/input/ReaderFrontend.h index 93e416e65b..a93f7703ac 100644 --- a/src/input/ReaderFrontend.h +++ b/src/input/ReaderFrontend.h @@ -4,10 +4,11 @@ #define INPUT_READERFRONTEND_H #include "ReaderBackend.h" - #include "threading/MsgThread.h" #include "threading/SerialTypes.h" +#include "Val.h" + namespace input { class Manager; @@ -25,6 +26,8 @@ public: /** * Constructor. * + * info: The meta information struct for the writer. + * * type: The backend writer type, with the value corresponding to the * script-level \c Input::Reader enum (e.g., \a READER_ASCII). The * frontend will internally instantiate a ReaderBackend of the @@ -32,7 +35,7 @@ public: * * Frontends must only be instantiated by the main thread. */ - ReaderFrontend(bro_int_t type); + ReaderFrontend(const ReaderBackend::ReaderInfo& info, EnumVal* type); /** * Destructor. @@ -52,7 +55,7 @@ public: * * This method must only be called from the main thread. */ - void Init(const ReaderBackend::ReaderInfo& info, const int arg_num_fields, const threading::Field* const* fields); + void Init(const int arg_num_fields, const threading::Field* const* fields); /** * Force an update of the current input source. Actual action depends @@ -100,12 +103,12 @@ public: * * This method is safe to call from any thread. */ - string Name() const; + const char* Name() const; /** * Returns the additional reader information passed into the constructor. */ - const ReaderBackend::ReaderInfo& Info() const { return info; } + const ReaderBackend::ReaderInfo& Info() const { assert(info); return *info; } /** * Returns the number of log fields as passed into the constructor. @@ -120,24 +123,14 @@ public: protected: friend class Manager; - /** - * Returns the name of the backend's type. - */ - const string& TypeName() const { return ty_name; } - - /** - * Sets the name of the backend's type. - */ - void SetTypeName(const string& name) { ty_name = name; } - private: ReaderBackend* backend; // The backend we have instanatiated. - ReaderBackend::ReaderInfo info; // Meta information as passed to Init(). + ReaderBackend::ReaderInfo* info; // Meta information. const threading::Field* const* fields; // The input fields. int num_fields; // Information as passed to Init(). - string ty_name; // Backend type, set by manager. bool disabled; // True if disabled. bool initialized; // True if initialized. + const char* name; // Descriptive name. }; } diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 7f93a3138c..73821d7cb6 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -87,10 +87,10 @@ bool Ascii::DoInit(const ReaderInfo& info, int num_fields, const Field* const* f { mtime = 0; - file = new ifstream(info.source.c_str()); + file = new ifstream(info.source); if ( ! file->is_open() ) { - Error(Fmt("Init: cannot open %s", info.source.c_str())); + Error(Fmt("Init: cannot open %s", info.source)); delete(file); file = 0; return false; @@ -98,7 +98,7 @@ bool Ascii::DoInit(const ReaderInfo& info, int num_fields, const Field* const* f if ( ReadHeader(false) == false ) { - Error(Fmt("Init: cannot open %s; headers are incorrect", info.source.c_str())); + Error(Fmt("Init: cannot open %s; headers are incorrect", info.source)); file->close(); delete(file); file = 0; @@ -164,20 +164,20 @@ bool Ascii::ReadHeader(bool useCached) } Error(Fmt("Did not find requested field %s in input data file %s.", - field->name.c_str(), Info().source.c_str())); + field->name, Info().source)); return false; } FieldMapping f(field->name, field->type, field->subtype, ifields[field->name]); - if ( field->secondary_name != "" ) + if ( field->secondary_name && strlen(field->secondary_name) != 0 ) { map::iterator fit2 = ifields.find(field->secondary_name); if ( fit2 == ifields.end() ) { Error(Fmt("Could not find requested port type field %s in input data file.", - field->secondary_name.c_str())); + field->secondary_name)); return false; } @@ -220,7 +220,8 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) switch ( field.type ) { case TYPE_ENUM: case TYPE_STRING: - val->val.string_val = new string(s); + val->val.string_val.length = s.size(); + val->val.string_val.data = copy_string(s.c_str()); break; case TYPE_BOOL: @@ -367,9 +368,9 @@ bool Ascii::DoUpdate() { // check if the file has changed struct stat sb; - if ( stat(Info().source.c_str(), &sb) == -1 ) + if ( stat(Info().source, &sb) == -1 ) { - Error(Fmt("Could not get stat for %s", Info().source.c_str())); + Error(Fmt("Could not get stat for %s", Info().source)); return false; } @@ -403,10 +404,10 @@ bool Ascii::DoUpdate() file = 0; } - file = new ifstream(Info().source.c_str()); + file = new ifstream(Info().source); if ( ! file->is_open() ) { - Error(Fmt("cannot open %s", Info().source.c_str())); + Error(Fmt("cannot open %s", Info().source)); return false; } diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/Benchmark.cc index 28afdc1c89..b8cec0f14d 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/Benchmark.cc @@ -38,7 +38,7 @@ void Benchmark::DoClose() bool Benchmark::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fields) { - num_lines = atoi(info.source.c_str()); + num_lines = atoi(info.source); if ( autospread != 0.0 ) autospread_time = (int) ( (double) 1000000 / (autospread * (double) num_lines) ); @@ -126,8 +126,12 @@ threading::Value* Benchmark::EntryToVal(TypeTag type, TypeTag subtype) assert(false); // no enums, please. case TYPE_STRING: - val->val.string_val = new string(RandomString(10)); + { + string rnd = RandomString(10); + val->val.string_val.data = copy_string(rnd.c_str()); + val->val.string_val.length = rnd.size(); break; + } case TYPE_BOOL: val->val.int_val = 1; // we never lie. diff --git a/src/input/readers/Raw.cc b/src/input/readers/Raw.cc index f62e966883..ac96e5c0f5 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/Raw.cc @@ -108,7 +108,7 @@ bool Raw::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fie firstrun = true; bool result; - if ( info.source.length() == 0 ) + if ( ! info.source || strlen(info.source) == 0 ) { Error("No source path provided"); return false; @@ -129,11 +129,12 @@ bool Raw::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fie } // do Initialization - char last = info.source[info.source.length()-1]; + string source = string(info.source); + char last = info.source[source.length() - 1]; if ( last == '|' ) { execute = true; - fname = info.source.substr(0, fname.length() - 1); + fname = source.substr(0, fname.length() - 1); if ( (info.mode != MODE_MANUAL) ) { @@ -237,7 +238,8 @@ bool Raw::DoUpdate() // filter has exactly one text field. convert to it. Value* val = new Value(TYPE_STRING, true); - val->val.string_val = new string(line); + val->val.string_val.data = copy_string(line.c_str()); + val->val.string_val.length = line.size(); fields[0] = val; Put(fields); diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 1808b83738..fd970c48b2 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -6,6 +6,7 @@ #include "../EventHandler.h" #include "../NetVar.h" #include "../Net.h" +#include "../Type.h" #include "threading/Manager.h" #include "threading/SerialTypes.h" @@ -75,7 +76,7 @@ struct Manager::WriterInfo { double interval; Func* postprocessor; WriterFrontend* writer; - WriterBackend::WriterInfo info; + WriterBackend::WriterInfo* info; }; struct Manager::Stream { @@ -118,6 +119,7 @@ Manager::Stream::~Stream() Unref(winfo->type); delete winfo->writer; + delete winfo->info; delete winfo; } @@ -193,7 +195,6 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) assert(ld->factory); - frontend->ty_name = ld->name; WriterBackend* backend = (*ld->factory)(frontend); assert(backend); @@ -476,18 +477,17 @@ bool Manager::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, return false; } - threading::Field* field = new threading::Field(); - field->name = new_path; - field->type = t->Tag(); - field->optional = rt->FieldDecl(i)->FindAttr(ATTR_OPTIONAL); + TypeTag st = TYPE_VOID; - if ( field->type == TYPE_TABLE ) - field->subtype = t->AsSetType()->Indices()->PureType()->Tag(); + if ( t->Tag() == TYPE_TABLE ) + st = t->AsSetType()->Indices()->PureType()->Tag(); - else if ( field->type == TYPE_VECTOR ) - field->subtype = t->AsVectorType()->YieldType()->Tag(); + else if ( t->Tag() == TYPE_VECTOR ) + st = t->AsVectorType()->YieldType()->Tag(); - filter->fields[filter->num_fields - 1] = field; + bool optional = rt->FieldDecl(i)->FindAttr(ATTR_OPTIONAL); + + filter->fields[filter->num_fields - 1] = new threading::Field(new_path.c_str(), 0, t->Tag(), st, optional); } return true; @@ -594,7 +594,7 @@ bool Manager::AddFilter(EnumVal* id, RecordVal* fval) { threading::Field* field = filter->fields[i]; DBG_LOG(DBG_LOGGING, " field %10s: %s", - field->name.c_str(), type_name(field->type)); + field->name, type_name(field->type)); } #endif @@ -769,9 +769,9 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) for ( int j = 0; j < filter->num_fields; ++j ) arg_fields[j] = new threading::Field(*filter->fields[j]); - WriterBackend::WriterInfo info; - info.path = path; - info.network_time = network_time; + WriterBackend::WriterInfo* info = new WriterBackend::WriterInfo; + info->path = copy_string(path.c_str()); + info->network_time = network_time; HashKey* k; IterCookie* c = filter->config->AsTable()->InitForIteration(); @@ -782,7 +782,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) ListVal* index = filter->config->RecoverIndex(k); string key = index->Index(0)->AsString()->CheckString(); string value = v->Value()->AsString()->CheckString(); - info.config.insert(std::make_pair(key, value)); + info->config.insert(std::make_pair(copy_string(key.c_str()), copy_string(value.c_str()))); Unref(index); delete k; } @@ -844,11 +844,16 @@ threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) val->Type()->AsEnumType()->Lookup(val->InternalInt()); if ( s ) - lval->val.string_val = new string(s); + { + lval->val.string_val.data = copy_string(s); + lval->val.string_val.length = strlen(s); + } + else { val->Type()->Error("enum type does not contain value", val); - lval->val.string_val = new string(); + lval->val.string_val.data = copy_string(""); + lval->val.string_val.length = 0; } break; } @@ -880,15 +885,20 @@ threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) case TYPE_STRING: { const BroString* s = val->AsString(); - lval->val.string_val = - new string((const char*) s->Bytes(), s->Len()); + char* buf = new char[s->Len()]; + memcpy(buf, s->Bytes(), s->Len()); + + lval->val.string_val.data = buf; + lval->val.string_val.length = s->Len(); break; } case TYPE_FILE: { const BroFile* f = val->AsFile(); - lval->val.string_val = new string(f->Name()); + string s = f->Name(); + lval->val.string_val.data = copy_string(s.c_str()); + lval->val.string_val.length = s.size(); break; } @@ -897,7 +907,9 @@ threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) ODesc d; const Func* f = val->AsFunc(); f->Describe(&d); - lval->val.string_val = new string(d.Description()); + const char* s = d.Description(); + lval->val.string_val.data = copy_string(s); + lval->val.string_val.length = strlen(s); break; } @@ -977,7 +989,7 @@ threading::Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, return vals; } -WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const WriterBackend::WriterInfo& info, +WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, WriterBackend::WriterInfo* info, int num_fields, const threading::Field* const* fields, bool local, bool remote) { Stream* stream = FindStream(id); @@ -987,7 +999,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer return 0; Stream::WriterMap::iterator w = - stream->writers.find(Stream::WriterPathPair(writer->AsEnum(), info.path)); + stream->writers.find(Stream::WriterPathPair(writer->AsEnum(), info->path)); if ( w != stream->writers.end() ) // If we already have a writer for this. That's fine, we just @@ -1013,7 +1025,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer { Filter* f = *it; if ( f->writer->AsEnum() == writer->AsEnum() && - f->path == info.path ) + f->path == info->path ) { found_filter_match = true; winfo->interval = f->interval; @@ -1030,7 +1042,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer } stream->writers.insert( - Stream::WriterMap::value_type(Stream::WriterPathPair(writer->AsEnum(), info.path), + Stream::WriterMap::value_type(Stream::WriterPathPair(writer->AsEnum(), info->path), winfo)); // Still need to set the WriterInfo's rotation parameters, which we @@ -1038,11 +1050,11 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, const Writer const char* base_time = log_rotate_base_time ? log_rotate_base_time->AsString()->CheckString() : 0; - winfo->info.rotation_interval = winfo->interval; - winfo->info.rotation_base = parse_rotate_base_time(base_time); + winfo->info->rotation_interval = winfo->interval; + winfo->info->rotation_base = parse_rotate_base_time(base_time); - winfo->writer = new WriterFrontend(id, writer, local, remote); - winfo->writer->Init(winfo->info, num_fields, fields); + winfo->writer = new WriterFrontend(*winfo->info, id, writer, local, remote); + winfo->writer->Init(num_fields, fields); InstallRotationTimer(winfo); @@ -1124,7 +1136,7 @@ void Manager::SendAllWritersTo(RemoteSerializer::PeerID peer) EnumVal writer_val(i->first.first, BifType::Enum::Log::Writer); remote_serializer->SendLogCreateWriter(peer, (*s)->id, &writer_val, - i->second->info, + *i->second->info, writer->NumFields(), writer->Fields()); } @@ -1260,14 +1272,14 @@ void Manager::InstallRotationTimer(WriterInfo* winfo) timer_mgr->Add(winfo->rotation_timer); DBG_LOG(DBG_LOGGING, "Scheduled rotation timer for %s to %.6f", - winfo->writer->Name().c_str(), winfo->rotation_timer->Time()); + winfo->writer->Name(), winfo->rotation_timer->Time()); } } void Manager::Rotate(WriterInfo* winfo) { DBG_LOG(DBG_LOGGING, "Rotating %s at %.6f", - winfo->writer->Name().c_str(), network_time); + winfo->writer->Name(), network_time); // Build a temporary path for the writer to move the file to. struct tm tm; @@ -1278,15 +1290,14 @@ void Manager::Rotate(WriterInfo* winfo) localtime_r(&teatime, &tm); strftime(buf, sizeof(buf), date_fmt, &tm); - string tmp = string(fmt("%s-%s", winfo->writer->Info().path.c_str(), buf)); - // Trigger the rotation. + const char* tmp = fmt("%s-%s", winfo->writer->Info().path, buf); winfo->writer->Rotate(tmp, winfo->open_time, network_time, terminating); ++rotations_pending; } -bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string old_name, +bool Manager::FinishedRotation(WriterFrontend* writer, const char* new_name, const char* old_name, double open, double close, bool terminating) { --rotations_pending; @@ -1296,7 +1307,7 @@ bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string o return true; DBG_LOG(DBG_LOGGING, "Finished rotating %s at %.6f, new name %s", - writer->Name().c_str(), network_time, new_name.c_str()); + writer->Name(), network_time, new_name); WriterInfo* winfo = FindWriter(writer); if ( ! winfo ) @@ -1305,8 +1316,8 @@ bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string o // Create the RotationInfo record. RecordVal* info = new RecordVal(BifType::Record::Log::RotationInfo); info->Assign(0, winfo->type->Ref()); - info->Assign(1, new StringVal(new_name.c_str())); - info->Assign(2, new StringVal(winfo->writer->Info().path.c_str())); + info->Assign(1, new StringVal(new_name)); + info->Assign(2, new StringVal(winfo->writer->Info().path)); info->Assign(3, new Val(open, TYPE_TIME)); info->Assign(4, new Val(close, TYPE_TIME)); info->Assign(5, new Val(terminating, TYPE_BOOL)); diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 38dd9258b3..ae7a1796ba 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -162,8 +162,8 @@ protected: //// Function also used by the RemoteSerializer. - // Takes ownership of fields. - WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, const WriterBackend::WriterInfo& info, + // Takes ownership of fields and info. + WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, WriterBackend::WriterInfo* info, int num_fields, const threading::Field* const* fields, bool local, bool remote); @@ -175,7 +175,7 @@ protected: void SendAllWritersTo(RemoteSerializer::PeerID peer); // Signals that a file has been rotated. - bool FinishedRotation(WriterFrontend* writer, string new_name, string old_name, + bool FinishedRotation(WriterFrontend* writer, const char* new_name, const char* old_name, double open, double close, bool terminating); // Deletes the values as passed into Write(). diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index a284c56201..8f119d6f8f 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -18,20 +18,26 @@ namespace logging { class RotationFinishedMessage : public threading::OutputMessage { public: - RotationFinishedMessage(WriterFrontend* writer, string new_name, string old_name, + RotationFinishedMessage(WriterFrontend* writer, const char* new_name, const char* old_name, double open, double close, bool terminating) : threading::OutputMessage("RotationFinished", writer), - new_name(new_name), old_name(old_name), open(open), + new_name(copy_string(new_name)), old_name(copy_string(old_name)), open(open), close(close), terminating(terminating) { } + virtual ~RotationFinishedMessage() + { + delete [] new_name; + delete [] old_name; + } + virtual bool Process() { return log_mgr->FinishedRotation(Object(), new_name, old_name, open, close, terminating); } private: - string new_name; - string old_name; + const char* new_name; + const char* old_name; double open; double close; bool terminating; @@ -65,12 +71,16 @@ bool WriterBackend::WriterInfo::Read(SerializationFormat* fmt) { int size; - if ( ! (fmt->Read(&path, "path") && + string tmp_path; + + if ( ! (fmt->Read(&tmp_path, "path") && fmt->Read(&rotation_base, "rotation_base") && fmt->Read(&rotation_interval, "rotation_interval") && fmt->Read(&size, "config_size")) ) return false; + path = copy_string(tmp_path.c_str()); + config.clear(); while ( size ) @@ -81,7 +91,7 @@ bool WriterBackend::WriterInfo::Read(SerializationFormat* fmt) if ( ! (fmt->Read(&value, "config-value") && fmt->Read(&value, "config-key")) ) return false; - config.insert(std::make_pair(value, key)); + config.insert(std::make_pair(copy_string(value.c_str()), copy_string(key.c_str()))); } return true; @@ -98,7 +108,7 @@ bool WriterBackend::WriterInfo::Write(SerializationFormat* fmt) const fmt->Write(size, "config_size")) ) return false; - for ( config_map::const_iterator i = config.begin(); i != config.end(); ++i ) + for ( config_map::const_iterator i = config.begin(); i != config.end(); ++i ) { if ( ! (fmt->Write(i->first, "config-value") && fmt->Write(i->second, "config-key")) ) return false; @@ -113,8 +123,7 @@ WriterBackend::WriterBackend(WriterFrontend* arg_frontend) : MsgThread() fields = 0; buffering = true; frontend = arg_frontend; - - info.path = ""; + info = new WriterInfo(frontend->Info()); SetName(frontend->Name()); } @@ -128,6 +137,8 @@ WriterBackend::~WriterBackend() delete [] fields; } + + delete info; } void WriterBackend::DeleteVals(int num_writes, Value*** vals) @@ -144,7 +155,7 @@ void WriterBackend::DeleteVals(int num_writes, Value*** vals) delete [] vals; } -bool WriterBackend::FinishedRotation(string new_name, string old_name, +bool WriterBackend::FinishedRotation(const char* new_name, const char* old_name, double open, double close, bool terminating) { SendOut(new RotationFinishedMessage(frontend, new_name, old_name, open, close, terminating)); @@ -156,15 +167,12 @@ void WriterBackend::DisableFrontend() SendOut(new DisableMessage(frontend)); } -bool WriterBackend::Init(const WriterInfo& arg_info, int arg_num_fields, const Field* const* arg_fields, const string& frontend_name) +bool WriterBackend::Init(int arg_num_fields, const Field* const* arg_fields) { - info = arg_info; num_fields = arg_num_fields; fields = arg_fields; - SetName(frontend->Name()); - - if ( ! DoInit(arg_info, arg_num_fields, arg_fields) ) + if ( ! DoInit(*info, arg_num_fields, arg_fields) ) { DisableFrontend(); return false; @@ -246,7 +254,7 @@ bool WriterBackend::SetBuf(bool enabled) return true; } -bool WriterBackend::Rotate(string rotated_path, double open, +bool WriterBackend::Rotate(const char* rotated_path, double open, double close, bool terminating) { if ( ! DoRotate(rotated_path, open, close, terminating) ) diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 33cde8679e..a59cd1893e 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -48,14 +48,17 @@ public: */ struct WriterInfo { - typedef std::map config_map; + // Structure takes ownership of these strings. + typedef std::map config_map; /** * A string left to the interpretation of the writer * implementation; it corresponds to the 'path' value configured * on the script-level for the logging filter. + * + * Structure takes ownership of string. */ - string path; + const char* path; /** * The rotation interval as configured for this writer. @@ -76,9 +79,38 @@ public: * A map of key/value pairs corresponding to the relevant * filter's "config" table. */ - std::map config; + config_map config; + + WriterInfo() + { + path = 0; + } + + WriterInfo(const WriterInfo& other) + { + path = other.path ? copy_string(other.path) : 0; + rotation_interval = other.rotation_interval; + rotation_base = other.rotation_base; + network_time = other.network_time; + + for ( config_map::const_iterator i = other.config.begin(); i != other.config.end(); i++ ) + config.insert(std::make_pair(copy_string(i->first), copy_string(i->second))); + } + + ~WriterInfo() + { + delete [] path; + + for ( config_map::iterator i = config.begin(); i != config.end(); i++ ) + { + delete [] i->first; + delete [] i->second; + } + } private: + const WriterInfo& operator=(const WriterInfo& other); // Disable. + friend class ::RemoteSerializer; // Note, these need to be adapted when changing the struct's @@ -90,7 +122,6 @@ public: /** * One-time initialization of the writer to define the logged fields. * - * @param info Meta information for the writer. * @param num_fields * * @param fields An array of size \a num_fields with the log fields. @@ -100,7 +131,7 @@ public: * * @return False if an error occured. */ - bool Init(const WriterInfo& info, int num_fields, const threading::Field* const* fields, const string& frontend_name); + bool Init(int num_fields, const threading::Field* const* fields); /** * Writes one log entry. @@ -146,7 +177,7 @@ public: * * @return False if an error occured. */ - bool Rotate(string rotated_path, double open, double close, bool terminating); + bool Rotate(const char* rotated_path, double open, double close, bool terminating); /** * Disables the frontend that has instantiated this backend. Once @@ -157,7 +188,7 @@ public: /** * Returns the additional writer information passed into the constructor. */ - const WriterInfo& Info() const { return info; } + const WriterInfo& Info() const { return *info; } /** * Returns the number of log fields as passed into the constructor. @@ -193,7 +224,7 @@ public: * @param terminating: True if the original rotation request occured * due to the main Bro process shutting down. */ - bool FinishedRotation(string new_name, string old_name, + bool FinishedRotation(const char* new_name, const char* old_name, double open, double close, bool terminating); /** Helper method to render an IP address as a string. @@ -322,7 +353,7 @@ protected: * due the main Bro prcoess terminating (and not because we've * reached a regularly scheduled time for rotation). */ - virtual bool DoRotate(string rotated_path, double open, double close, + virtual bool DoRotate(const char* rotated_path, double open, double close, bool terminating) = 0; /** @@ -351,7 +382,7 @@ private: // this class, it's running in a different thread! WriterFrontend* frontend; - WriterInfo info; // Meta information as passed to Init(). + const WriterInfo* info; // Meta information. int num_fields; // Number of log fields. const threading::Field* const* fields; // Log fields. bool buffering; // True if buffering is enabled. diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index b816327e9c..fc237d6f6e 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -16,35 +16,36 @@ namespace logging { class InitMessage : public threading::InputMessage { public: - InitMessage(WriterBackend* backend, const WriterBackend::WriterInfo& info, const int num_fields, const Field* const* fields, const string& frontend_name) + InitMessage(WriterBackend* backend, const int num_fields, const Field* const* fields) : threading::InputMessage("Init", backend), - info(info), num_fields(num_fields), fields(fields), - frontend_name(frontend_name) { } + num_fields(num_fields), fields(fields) + {} - virtual bool Process() { return Object()->Init(info, num_fields, fields, frontend_name); } + + virtual bool Process() { return Object()->Init(num_fields, fields); } private: - WriterBackend::WriterInfo info; const int num_fields; const Field * const* fields; - const string frontend_name; }; class RotateMessage : public threading::InputMessage { public: - RotateMessage(WriterBackend* backend, WriterFrontend* frontend, const string rotated_path, const double open, + RotateMessage(WriterBackend* backend, WriterFrontend* frontend, const char* rotated_path, const double open, const double close, const bool terminating) : threading::InputMessage("Rotate", backend), frontend(frontend), - rotated_path(rotated_path), open(open), + rotated_path(copy_string(rotated_path)), open(open), close(close), terminating(terminating) { } + virtual ~RotateMessage() { delete [] rotated_path; } + virtual bool Process() { return Object()->Rotate(rotated_path, open, close, terminating); } private: WriterFrontend* frontend; - const string rotated_path; + const char* rotated_path; const double open; const double close; const bool terminating; @@ -96,7 +97,7 @@ private: using namespace logging; -WriterFrontend::WriterFrontend(EnumVal* arg_stream, EnumVal* arg_writer, bool arg_local, bool arg_remote) +WriterFrontend::WriterFrontend(const WriterBackend::WriterInfo& arg_info, EnumVal* arg_stream, EnumVal* arg_writer, bool arg_local, bool arg_remote) { stream = arg_stream; writer = arg_writer; @@ -109,7 +110,10 @@ WriterFrontend::WriterFrontend(EnumVal* arg_stream, EnumVal* arg_writer, bool ar remote = arg_remote; write_buffer = 0; write_buffer_pos = 0; - ty_name = ""; + info = new WriterBackend::WriterInfo(arg_info); + + const char* w = arg_writer->Type()->AsEnumType()->Lookup(arg_stream->InternalInt()); + name = copy_string(fmt("%s/%s", arg_info.path, w)); if ( local ) { @@ -127,14 +131,7 @@ WriterFrontend::~WriterFrontend() { Unref(stream); Unref(writer); - } - -string WriterFrontend::Name() const - { - if ( ! info.path.size() ) - return ty_name; - - return ty_name + "/" + info.path; + delete info; } void WriterFrontend::Stop() @@ -143,7 +140,7 @@ void WriterFrontend::Stop() SetDisable(); } -void WriterFrontend::Init(const WriterBackend::WriterInfo& arg_info, int arg_num_fields, const Field* const * arg_fields) +void WriterFrontend::Init(int arg_num_fields, const Field* const * arg_fields) { if ( disabled ) return; @@ -151,19 +148,18 @@ void WriterFrontend::Init(const WriterBackend::WriterInfo& arg_info, int arg_num if ( initialized ) reporter->InternalError("writer initialize twice"); - info = arg_info; num_fields = arg_num_fields; fields = arg_fields; initialized = true; if ( backend ) - backend->SendIn(new InitMessage(backend, arg_info, arg_num_fields, arg_fields, Name())); + backend->SendIn(new InitMessage(backend, arg_num_fields, arg_fields)); if ( remote ) remote_serializer->SendLogCreateWriter(stream, writer, - arg_info, + *info, arg_num_fields, arg_fields); @@ -177,7 +173,7 @@ void WriterFrontend::Write(int num_fields, Value** vals) if ( remote ) remote_serializer->SendLogWrite(stream, writer, - info.path, + info->path, num_fields, vals); @@ -242,7 +238,7 @@ void WriterFrontend::Flush(double network_time) backend->SendIn(new FlushMessage(backend, network_time)); } -void WriterFrontend::Rotate(string rotated_path, double open, double close, bool terminating) +void WriterFrontend::Rotate(const char* rotated_path, double open, double close, bool terminating) { if ( disabled ) return; diff --git a/src/logging/WriterFrontend.h b/src/logging/WriterFrontend.h index e8f3d06d6c..549d602bd5 100644 --- a/src/logging/WriterFrontend.h +++ b/src/logging/WriterFrontend.h @@ -31,6 +31,10 @@ public: * script-level \c Log::Writer enum (e.g., \a WRITER_ASCII). The * frontend will internally instantiate a WriterBackend of the * corresponding type. + * + * info: The meta information struct for the writer. + * + * writer_name: A descriptive name for the writer's type. * * local: If true, the writer will instantiate a local backend. * @@ -39,7 +43,7 @@ public: * * Frontends must only be instantiated by the main thread. */ - WriterFrontend(EnumVal* stream, EnumVal* writer, bool local, bool remote); + WriterFrontend(const WriterBackend::WriterInfo& info, EnumVal* stream, EnumVal* writer, bool local, bool remote); /** * Destructor. @@ -68,7 +72,7 @@ public: * * This method must only be called from the main thread. */ - void Init(const WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const* fields); + void Init(int num_fields, const threading::Field* const* fields); /** * Write out a record. @@ -130,7 +134,7 @@ public: * * This method must only be called from the main thread. */ - void Rotate(string rotated_path, double open, double close, bool terminating); + void Rotate(const char* rotated_path, double open, double close, bool terminating); /** * Finalizes writing to this tream. @@ -175,7 +179,7 @@ public: /** * Returns the additional writer information as passed into the constructor. */ - const WriterBackend::WriterInfo& Info() const { return info; } + const WriterBackend::WriterInfo& Info() const { return *info; } /** * Returns the number of log fields as passed into the constructor. @@ -188,7 +192,7 @@ public: * * This method is safe to call from any thread. */ - string Name() const; + const char* Name() const { return name; } /** * Returns the log fields as passed into the constructor. @@ -210,8 +214,8 @@ protected: bool local; // True if logging locally. bool remote; // True if loggin remotely. - string ty_name; // Name of the backend type. Set by the manager. - WriterBackend::WriterInfo info; // The writer information. + const char* name; // Descriptive name of the + WriterBackend::WriterInfo* info; // The writer information. int num_fields; // The number of log fields. const threading::Field* const* fields; // The log fields. diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 20963d1535..99fd3f3c6e 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -52,6 +52,8 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) Ascii::~Ascii() { + //fprintf(stderr, "DTOR %p\n", this); + // Normally, the file will be closed here already via the Finish() // message. But when we terminate abnormally, we may still have it open. if ( fd ) @@ -78,7 +80,10 @@ void Ascii::CloseFile(double t) return; if ( include_meta ) - WriteHeaderField("end", t ? Timestamp(t) : ""); + { + string ts = t ? Timestamp(t) : string(""); + WriteHeaderField("end", ts); + } close(fd); fd = 0; @@ -118,6 +123,8 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * if ( ! safe_write(fd, str.c_str(), str.length()) ) goto write_error; + string ts = Timestamp(info.network_time); + if ( ! (WriteHeaderField("set_separator", get_escaped_string( string(set_separator, set_separator_len), false)) && WriteHeaderField("empty_field", get_escaped_string( @@ -125,8 +132,8 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * WriteHeaderField("unset_field", get_escaped_string( string(unset_field, unset_field_len), false)) && WriteHeaderField("path", get_escaped_string(path, false)) && - WriteHeaderField("start", Timestamp(info.network_time))) ) - goto write_error; + WriteHeaderField("start", ts)) ) + goto write_error; for ( int i = 0; i < num_fields; ++i ) { @@ -136,8 +143,8 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * types += string(separator, separator_len); } - names += fields[i]->name; - types += fields[i]->TypeName(); + names += string(fields[i]->name); + types += fields[i]->TypeName().c_str(); } if ( ! (WriteHeaderField("fields", names) @@ -229,8 +236,8 @@ bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) case TYPE_FILE: case TYPE_FUNC: { - int size = val->val.string_val->size(); - const char* data = val->val.string_val->data(); + int size = val->val.string_val.length; + const char* data = val->val.string_val.data; if ( ! size ) { @@ -311,8 +318,7 @@ bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) } default: - Error(Fmt("unsupported field format %d for %s", val->type, - field->name.c_str())); + Error(Fmt("unsupported field format %d for %s", val->type, field->name)); return false; } @@ -366,7 +372,7 @@ write_error: return false; } -bool Ascii::DoRotate(string rotated_path, double open, double close, bool terminating) +bool Ascii::DoRotate(const char* rotated_path, double open, double close, bool terminating) { // Don't rotate special files or if there's not one currently open. if ( ! fd || IsSpecial(Info().path) ) @@ -374,10 +380,10 @@ bool Ascii::DoRotate(string rotated_path, double open, double close, bool termin CloseFile(close); - string nname = rotated_path + "." + LogExt(); + string nname = string(rotated_path) + "." + LogExt(); rename(fname.c_str(), nname.c_str()); - if ( ! FinishedRotation(nname, fname, open, close, terminating) ) + if ( ! FinishedRotation(nname.c_str(), fname.c_str(), open, close, terminating) ) { Error(Fmt("error rotating %s to %s", fname.c_str(), nname.c_str())); return false; @@ -401,19 +407,22 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) string Ascii::LogExt() { const char* ext = getenv("BRO_LOG_SUFFIX"); - if ( ! ext ) ext = "log"; + if ( ! ext ) + ext = "log"; + return ext; } string Ascii::Timestamp(double t) { - struct tm tm; - char buf[128]; - const char* const date_fmt = "%Y-%m-%d-%H-%M-%S"; time_t teatime = time_t(t); - localtime_r(&teatime, &tm); - strftime(buf, sizeof(buf), date_fmt, &tm); + struct tm tmbuf; + struct tm* tm = localtime_r(&teatime, &tmbuf); + + char buf[128]; + const char* const date_fmt = "%Y-%m-%d-%H-%M-%S"; + strftime(buf, sizeof(buf), date_fmt, tm); return buf; } diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index 371ded4344..cb82860cb7 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -24,7 +24,7 @@ protected: virtual bool DoWrite(int num_fields, const threading::Field* const* fields, threading::Value** vals); virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(string rotated_path, double open, + virtual bool DoRotate(const char* rotated_path, double open, double close, bool terminating); virtual bool DoFlush(double network_time); virtual bool DoFinish(double network_time); diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 1978a8b781..7d3053e341 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -78,10 +78,10 @@ std::string DataSeries::LogValueToString(threading::Value *val) case TYPE_STRING: case TYPE_FILE: case TYPE_FUNC: - if ( ! val->val.string_val->size() ) + if ( ! val->val.string_val.length ) return ""; - return string(val->val.string_val->data(), val->val.string_val->size()); + return string(val->val.string_val.data, val->val.string_val.length); case TYPE_TABLE: { @@ -302,7 +302,8 @@ bool DataSeries::DoInit(const WriterInfo& info, int num_fields, const threading: if( ds_dump_schema ) { - FILE* pFile = fopen ( string(info.path + ".ds.xml").c_str() , "wb" ); + string name = string(info.path) + ".ds.xml"; + FILE* pFile = fopen(name.c_str(), "wb" ); if( pFile ) { @@ -394,17 +395,17 @@ bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, return true; } -bool DataSeries::DoRotate(string rotated_path, double open, double close, bool terminating) +bool DataSeries::DoRotate(const char* rotated_path, double open, double close, bool terminating) { // Note that if DS files are rotated too often, the aggregate log // size will be (much) larger. CloseLog(); - string dsname = Info().path + ".ds"; - string nname = rotated_path + ".ds"; + string dsname = string(Info().path) + ".ds"; + string nname = string(rotated_path) + ".ds"; rename(dsname.c_str(), nname.c_str()); - if ( ! FinishedRotation(nname, dsname, open, close, terminating) ) + if ( ! FinishedRotation(nname.c_str(), dsname.c_str(), open, close, terminating) ) { Error(Fmt("error rotating %s to %s", dsname.c_str(), nname.c_str())); return false; diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index 31d17a1a7b..9773c7ce1b 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -32,7 +32,7 @@ protected: virtual bool DoWrite(int num_fields, const threading::Field* const* fields, threading::Value** vals); virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(string rotated_path, double open, + virtual bool DoRotate(const char* rotated_path, double open, double close, bool terminating); virtual bool DoFlush(double network_time); virtual bool DoFinish(double network_time); diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index acf9355cf7..9b91b82199 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -1,4 +1,6 @@ +#include + #include "None.h" #include "NetVar.h" @@ -15,8 +17,17 @@ bool None::DoInit(const WriterInfo& info, int num_fields, std::cout << " rotation_interval=" << info.rotation_interval << std::endl; std::cout << " rotation_base=" << info.rotation_base << std::endl; - for ( std::map::const_iterator i = info.config.begin(); i != info.config.end(); i++ ) - std::cout << " config[" << i->first << "] = " << i->second << std::endl; + // Output the config sorted by keys. + + std::vector > keys; + + for ( WriterInfo::config_map::const_iterator i = info.config.begin(); i != info.config.end(); i++ ) + keys.push_back(std::make_pair(i->first, i->second)); + + std::sort(keys.begin(), keys.end()); + + for ( std::vector >::const_iterator i = keys.begin(); i != keys.end(); i++ ) + std::cout << " config[" << (*i).first << "] = " << (*i).second << std::endl; for ( int i = 0; i < num_fields; i++ ) { @@ -31,11 +42,11 @@ bool None::DoInit(const WriterInfo& info, int num_fields, return true; } -bool None::DoRotate(string rotated_path, double open, double close, bool terminating) +bool None::DoRotate(const char* rotated_path, double open, double close, bool terminating) { - if ( ! FinishedRotation(string("/dev/null"), Info().path, open, close, terminating)) + if ( ! FinishedRotation("/dev/null", Info().path, open, close, terminating)) { - Error(Fmt("error rotating %s", Info().path.c_str())); + Error(Fmt("error rotating %s", Info().path)); return false; } diff --git a/src/logging/writers/None.h b/src/logging/writers/None.h index c6d7cba56a..2a6f71a06a 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/None.h @@ -24,7 +24,7 @@ protected: virtual bool DoWrite(int num_fields, const threading::Field* const* fields, threading::Value** vals) { return true; } virtual bool DoSetBuf(bool enabled) { return true; } - virtual bool DoRotate(string rotated_path, double open, + virtual bool DoRotate(const char* rotated_path, double open, double close, bool terminating); virtual bool DoFlush(double network_time) { return true; } virtual bool DoFinish(double network_time) { return true; } diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index e7fb3f4c84..af57c26939 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -12,18 +12,23 @@ using namespace threading; +static const int STD_FMT_BUF_LEN = 2048; + uint64_t BasicThread::thread_counter = 0; BasicThread::BasicThread() { started = false; terminating = false; + killed = false; pthread = 0; - buf_len = 2048; + buf_len = STD_FMT_BUF_LEN; buf = (char*) malloc(buf_len); - name = Fmt("thread-%d", ++thread_counter); + strerr_buffer = 0; + + name = copy_string(fmt("thread-%" PRIu64, ++thread_counter)); thread_mgr->AddThread(this); } @@ -32,31 +37,41 @@ BasicThread::~BasicThread() { if ( buf ) free(buf); + + delete [] name; + delete [] strerr_buffer; } -void BasicThread::SetName(const string& arg_name) +void BasicThread::SetName(const char* name) { - // Slight race condition here with reader threads, but shouldn't matter. - name = arg_name; + delete [] name; + name = copy_string(name); } -void BasicThread::SetOSName(const string& name) +void BasicThread::SetOSName(const char* name) { #ifdef HAVE_LINUX - prctl(PR_SET_NAME, name.c_str(), 0, 0, 0); + prctl(PR_SET_NAME, name, 0, 0, 0); #endif #ifdef __APPLE__ - pthread_setname_np(name.c_str()); + pthread_setname_np(name); #endif #ifdef FREEBSD - pthread_set_name_np(pthread_self(), name, name.c_str()); + pthread_set_name_np(pthread_self(), name, name); #endif } const char* BasicThread::Fmt(const char* format, ...) { + if ( buf_len > 10 * STD_FMT_BUF_LEN ) + { + // Shrink back to normal. + buf = (char*) safe_realloc(buf, STD_FMT_BUF_LEN); + buf_len = STD_FMT_BUF_LEN; + } + va_list al; va_start(al, format); int n = safe_vsnprintf(buf, buf_len, format, al); @@ -64,15 +79,13 @@ const char* BasicThread::Fmt(const char* format, ...) if ( (unsigned int) n >= buf_len ) { // Not enough room, grow the buffer. - int tmp_len = n + 32; - char* tmp = (char*) malloc(tmp_len); + buf_len = n + 32; + buf = (char*) safe_realloc(buf, buf_len); // Is it portable to restart? va_start(al, format); - n = safe_vsnprintf(tmp, tmp_len, format, al); + n = safe_vsnprintf(buf, buf_len, format, al); va_end(al); - - free(tmp); } return buf; @@ -94,14 +107,14 @@ void BasicThread::Start() int err = pthread_create(&pthread, 0, BasicThread::launcher, this); if ( err != 0 ) - reporter->FatalError("Cannot create thread %s:%s", name.c_str(), Strerror(err)); + reporter->FatalError("Cannot create thread %s: %s", name, Strerror(err)); - DBG_LOG(DBG_THREADING, "Started thread %s", name.c_str()); + DBG_LOG(DBG_THREADING, "Started thread %s", name); OnStart(); } -void BasicThread::Stop() +void BasicThread::PrepareStop() { if ( ! started ) return; @@ -109,11 +122,28 @@ void BasicThread::Stop() if ( terminating ) return; - DBG_LOG(DBG_THREADING, "Signaling thread %s to terminate ...", name.c_str()); + DBG_LOG(DBG_THREADING, "Preparing thread %s to terminate ...", name); - terminating = true; + OnPrepareStop(); + } + +void BasicThread::Stop() + { + // XX fprintf(stderr, "stop1 %s %d %d\n", name, started, terminating); + + if ( ! started ) + return; + + if ( terminating ) + return; + + // XX fprintf(stderr, "stop2 %s\n", name); + + DBG_LOG(DBG_THREADING, "Signaling thread %s to terminate ...", name); OnStop(); + + terminating = true; } void BasicThread::Join() @@ -123,25 +153,33 @@ void BasicThread::Join() assert(terminating); - DBG_LOG(DBG_THREADING, "Joining thread %s ...", name.c_str()); + DBG_LOG(DBG_THREADING, "Joining thread %s ...", name); if ( pthread && pthread_join(pthread, 0) != 0 ) - reporter->FatalError("Failure joining thread %s", name.c_str()); + reporter->FatalError("Failure joining thread %s", name); - DBG_LOG(DBG_THREADING, "Done with thread %s", name.c_str()); + DBG_LOG(DBG_THREADING, "Joined with thread %s", name); pthread = 0; } void BasicThread::Kill() { + // We don't *really* kill the thread here because that leads to race + // conditions. Instead we set a flag that parts of the the code need + // to check and get out of any loops they might be in. terminating = true; + killed = true; + OnKill(); + } - if ( ! (started && pthread) ) - return; +void BasicThread::Done() + { + // XX fprintf(stderr, "DONE from thread %s\n", name); + DBG_LOG(DBG_THREADING, "Thread %s has finished", name); - pthread = 0; - pthread_kill(pthread, SIGTERM); + terminating = true; + killed = true; } void* BasicThread::launcher(void *arg) @@ -161,11 +199,12 @@ void* BasicThread::launcher(void *arg) sigdelset(&mask_set, SIGSEGV); sigdelset(&mask_set, SIGBUS); int res = pthread_sigmask(SIG_BLOCK, &mask_set, 0); - assert(res == 0); // + assert(res == 0); // Run thread's main function. thread->Run(); + thread->Done(); + return 0; } - diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h index d47eb5c3c3..037420b077 100644 --- a/src/threading/BasicThread.h +++ b/src/threading/BasicThread.h @@ -5,7 +5,6 @@ #include #include -#include "Queue.h" #include "util.h" using namespace std; @@ -42,22 +41,25 @@ public: * * This method is safe to call from any thread. */ - const string& Name() const { return name; } + const char* Name() const { return name; } /** * Sets a descriptive name for the thread. This should be a string * that's useful in output presented to the user and uniquely * identifies the thread. * - * This method must be called only from the thread itself. + * This method must be called only from main thread at initialization + * time. */ - void SetName(const string& name); + void SetName(const char* name); /** * Set the name shown by the OS as the thread's description. Not * supported on all OSs. + * + * Must be called only from the child thread. */ - void SetOSName(const string& name); + void SetOSName(const char* name); /** * Starts the thread. Calling this methods will spawn a new OS thread @@ -68,6 +70,18 @@ public: */ void Start(); + /** + * Signals the thread to prepare for stopping. This must be called + * before Stop() and allows the thread to trigger shutting down + * without yet blocking for doing so. + * + * Calling this method has no effect if Start() hasn't been executed + * yet. + * + * Only Bro's main thread must call this method. + */ + void PrepareStop(); + /** * Signals the thread to stop. The method lets Terminating() now * return true. It does however not force the thread to terminate. @@ -88,6 +102,13 @@ public: */ bool Terminating() const { return terminating; } + /** + * Returns true if Kill() has been called. + * + * This method is safe to call from any thread. + */ + bool Killed() const { return killed; } + /** * A version of fmt() that the thread can safely use. * @@ -124,12 +145,24 @@ protected: virtual void OnStart() {} /** - * Executed with Stop(). This is a hook into stopping the thread. It - * will be called from Bro's main thread after the thread has been - * signaled to stop. + * Executed with PrepareStop() (and before OnStop()). This is a hook + * into preparing the thread for stopping. It will be called from + * Bro's main thread before the thread has been signaled to stop. + */ + virtual void OnPrepareStop() {} + + /** + * Executed with Stop() (and after OnPrepareStop()). This is a hook + * into stopping the thread. It will be called from Bro's main thread + * after the thread has been signaled to stop. */ virtual void OnStop() {} + /** + * Executed with Kill(). This is a hook into killing the thread. + */ + virtual void OnKill() {} + /** * Destructor. This will be called by the manager. * @@ -153,14 +186,18 @@ protected: */ void Kill(); + /** Called by child thread's launcher when it's done processing. */ + void Done(); + private: // pthread entry function. static void* launcher(void *arg); - string name; + const char* name; pthread_t pthread; bool started; // Set to to true once running. bool terminating; // Set to to true to signal termination. + bool killed; // Set to true once forcefully killed. // Used as a semaphore to tell the pthread thread when it may // terminate. diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index f1f9307b03..b997aeec47 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -30,6 +30,10 @@ void Manager::Terminate() do Process(); while ( did_process ); // Signal all to stop. + + for ( all_thread_list::iterator i = all_threads.begin(); i != all_threads.end(); i++ ) + (*i)->PrepareStop(); + for ( all_thread_list::iterator i = all_threads.begin(); i != all_threads.end(); i++ ) (*i)->Stop(); @@ -50,14 +54,14 @@ void Manager::Terminate() void Manager::AddThread(BasicThread* thread) { - DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name().c_str()); + DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name()); all_threads.push_back(thread); idle = false; } void Manager::AddMsgThread(MsgThread* thread) { - DBG_LOG(DBG_THREADING, "%s is a MsgThread ...", thread->Name().c_str()); + DBG_LOG(DBG_THREADING, "%s is a MsgThread ...", thread->Name()); msg_threads.push_back(thread); } @@ -114,6 +118,12 @@ void Manager::Process() { Message* msg = t->RetrieveOut(); + if ( ! msg ) + { + assert(t->Killed()); + break; + } + if ( msg->Process() ) { if ( network_time ) @@ -122,10 +132,9 @@ void Manager::Process() else { - string s = msg->Name() + " failed, terminating thread"; - reporter->Error("%s", s.c_str()); + reporter->Error("%s failed, terminating thread", msg->Name()); t->Stop(); - } + } delete msg; } diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 3913624654..3e06a3fe1e 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -29,16 +29,6 @@ private: double network_time; }; -// A dummy message that's only purpose is unblock the current read operation -// so that the child's Run() methods can check the termination status. -class UnblockMessage : public InputMessage -{ -public: - UnblockMessage(MsgThread* thread) : InputMessage("Unblock", thread) { } - - virtual bool Process() { return true; } -}; - /// Sends a heartbeat to the child thread. class HeartbeatMessage : public InputMessage { @@ -66,14 +56,16 @@ public: INTERNAL_WARNING, INTERNAL_ERROR }; - ReporterMessage(Type arg_type, MsgThread* thread, const string& arg_msg) + ReporterMessage(Type arg_type, MsgThread* thread, const char* arg_msg) : OutputMessage("ReporterMessage", thread) - { type = arg_type; msg = arg_msg; } + { type = arg_type; msg = copy_string(arg_msg); } + + ~ReporterMessage() { delete [] msg; } virtual bool Process(); private: - string msg; + const char* msg; Type type; }; @@ -82,18 +74,19 @@ private: class DebugMessage : public OutputMessage { public: - DebugMessage(DebugStream arg_stream, MsgThread* thread, const string& arg_msg) + DebugMessage(DebugStream arg_stream, MsgThread* thread, const char* arg_msg) : OutputMessage("DebugMessage", thread) - { stream = arg_stream; msg = arg_msg; } + { stream = arg_stream; msg = copy_string(arg_msg); } + + virtual ~DebugMessage() { delete [] msg; } virtual bool Process() { - string s = Object()->Name() + ": " + msg; - debug_logger.Log(stream, "%s", s.c_str()); + debug_logger.Log(stream, "%s: %s", Object()->Name(), msg); return true; } private: - string msg; + const char* msg; DebugStream stream; }; #endif @@ -104,41 +97,39 @@ private: Message::~Message() { + delete [] name; } bool ReporterMessage::Process() { - string s = Object()->Name() + ": " + msg; - const char* cmsg = s.c_str(); - switch ( type ) { case INFO: - reporter->Info("%s", cmsg); + reporter->Info("%s: %s", Object()->Name(), msg); break; case WARNING: - reporter->Warning("%s", cmsg); + reporter->Warning("%s: %s", Object()->Name(), msg); break; case ERROR: - reporter->Error("%s", cmsg); + reporter->Error("%s: %s", Object()->Name(), msg); break; case FATAL_ERROR: - reporter->FatalError("%s", cmsg); + reporter->FatalError("%s: %s", Object()->Name(), msg); break; case FATAL_ERROR_WITH_CORE: - reporter->FatalErrorWithCore("%s", cmsg); + reporter->FatalErrorWithCore("%s: %s", Object()->Name(), msg); break; case INTERNAL_WARNING: - reporter->InternalWarning("%s", cmsg); + reporter->InternalWarning("%s: %s", Object()->Name(), msg); break; case INTERNAL_ERROR : - reporter->InternalError("%s", cmsg); + reporter->InternalError("%s: %s", Object()->Name(), msg); break; default: @@ -148,62 +139,78 @@ bool ReporterMessage::Process() return true; } -MsgThread::MsgThread() : BasicThread() +MsgThread::MsgThread() : BasicThread(), queue_in(this, 0), queue_out(0, this) { cnt_sent_in = cnt_sent_out = 0; finished = false; - stopped = false; thread_mgr->AddMsgThread(this); } // Set by Bro's main signal handler. extern int signal_val; -void MsgThread::OnStop() +void MsgThread::OnPrepareStop() { - if ( stopped ) + if ( finished || Killed() ) return; + // XX fprintf(stderr, "Sending FINISH to thread %s ...\n", Name()); + // Signal thread to terminate and wait until it has acknowledged. SendIn(new FinishMessage(this, network_time), true); + } +void MsgThread::OnStop() + { + int signal_count = 0; int old_signal_val = signal_val; signal_val = 0; int cnt = 0; - bool aborted = 0; + uint64_t last_size = 0; + uint64_t cur_size = 0; - while ( ! finished ) + // XX fprintf(stderr, "WAITING for thread %s to stop ...\n", Name()); + + while ( ! (finished || Killed() ) ) { // Terminate if we get another kill signal. if ( signal_val == SIGTERM || signal_val == SIGINT ) { - // Abort all threads here so that we won't hang next - // on another one. - fprintf(stderr, "received signal while waiting for thread %s, aborting all ...\n", Name().c_str()); - thread_mgr->KillThreads(); - aborted = true; - break; + ++signal_count; + + if ( signal_count == 1 ) + { + // Abort all threads here so that we won't hang next + // on another one. + fprintf(stderr, "received signal while waiting for thread %s, aborting all ...\n", Name()); + thread_mgr->KillThreads(); + } + else + { + // More than one signal. Abort processing + // right away. on another one. + fprintf(stderr, "received another signal while waiting for thread %s, aborting processing\n", Name()); + exit(1); + } + + signal_val = 0; } - if ( ++cnt % 10000 == 0 ) // Insurance against broken threads ... - { - fprintf(stderr, "killing thread %s ...\n", Name().c_str()); - Kill(); - aborted = true; - break; - } + queue_in.WakeUp(); usleep(1000); } - Finished(); - signal_val = old_signal_val; + } - // One more message to make sure the current queue read operation unblocks. - if ( ! aborted ) - SendIn(new UnblockMessage(this), true); +void MsgThread::OnKill() + { + // Send a message to unblock the reader if its currently waiting for + // input. This is just an optimization to make it terminate more + // quickly, even without the message it will eventually time out. + queue_in.WakeUp(); } void MsgThread::Heartbeat() @@ -213,9 +220,7 @@ void MsgThread::Heartbeat() void MsgThread::HeartbeatInChild() { - string n = Name(); - - n = Fmt("bro: %s (%" PRIu64 "/%" PRIu64 ")", n.c_str(), + string n = Fmt("bro: %s (%" PRIu64 "/%" PRIu64 ")", Name(), cnt_sent_in - queue_in.Size(), cnt_sent_out - queue_out.Size()); @@ -283,7 +288,7 @@ void MsgThread::SendIn(BasicInputMessage* msg, bool force) return; } - DBG_LOG(DBG_THREADING, "Sending '%s' to %s ...", msg->Name().c_str(), Name().c_str()); + DBG_LOG(DBG_THREADING, "Sending '%s' to %s ...", msg->Name(), Name()); queue_in.Put(msg); ++cnt_sent_in; @@ -306,9 +311,10 @@ void MsgThread::SendOut(BasicOutputMessage* msg, bool force) BasicOutputMessage* MsgThread::RetrieveOut() { BasicOutputMessage* msg = queue_out.Get(); - assert(msg); + if ( ! msg ) + return 0; - DBG_LOG(DBG_THREADING, "Retrieved '%s' from %s", msg->Name().c_str(), Name().c_str()); + DBG_LOG(DBG_THREADING, "Retrieved '%s' from %s", msg->Name(), Name()); return msg; } @@ -316,10 +322,12 @@ BasicOutputMessage* MsgThread::RetrieveOut() BasicInputMessage* MsgThread::RetrieveIn() { BasicInputMessage* msg = queue_in.Get(); - assert(msg); + + if ( ! msg ) + return 0; #ifdef DEBUG - string s = Fmt("Retrieved '%s' in %s", msg->Name().c_str(), Name().c_str()); + string s = Fmt("Retrieved '%s' in %s", msg->Name(), Name()); Debug(DBG_THREADING, s.c_str()); #endif @@ -328,15 +336,18 @@ BasicInputMessage* MsgThread::RetrieveIn() void MsgThread::Run() { - while ( ! finished ) + while ( ! (finished || Killed() ) ) { BasicInputMessage* msg = RetrieveIn(); + if ( ! msg ) + continue; + bool result = msg->Process(); if ( ! result ) { - string s = msg->Name() + " failed, terminating thread (MsgThread)"; + string s = Fmt("%s failed, terminating thread (MsgThread)", Name()); Error(s.c_str()); break; } @@ -344,7 +355,7 @@ void MsgThread::Run() delete msg; } - Finished(); + Finished(); } void MsgThread::GetStats(Stats* stats) diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index d929c1f806..1d9b17c7d9 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -228,6 +228,8 @@ protected: */ virtual void Run(); virtual void OnStop(); + virtual void OnPrepareStop(); + virtual void OnKill(); private: /** @@ -293,7 +295,6 @@ private: uint64_t cnt_sent_out; // Counts message sent by child. bool finished; // Set to true by Finished message. - bool stopped; // Set to true by OnStop(). }; /** @@ -312,7 +313,7 @@ public: * what's passed into the constructor and used mainly for debugging * purposes. */ - const string& Name() const { return name; } + const char* Name() const { return name; } /** * Callback that must be overriden for processing a message. @@ -326,10 +327,11 @@ protected: * @param arg_name A descriptive name for the type of message. Used * mainly for debugging purposes. */ - Message(const string& arg_name) { name = arg_name; } + Message(const char* arg_name) + { name = copy_string(arg_name); } private: - string name; + const char* name; }; /** @@ -344,7 +346,7 @@ protected: * @param name A descriptive name for the type of message. Used * mainly for debugging purposes. */ - BasicInputMessage(const string& name) : Message(name) {} + BasicInputMessage(const char* name) : Message(name) {} }; /** @@ -359,7 +361,7 @@ protected: * @param name A descriptive name for the type of message. Used * mainly for debugging purposes. */ - BasicOutputMessage(const string& name) : Message(name) {} + BasicOutputMessage(const char* name) : Message(name) {} }; /** @@ -384,7 +386,7 @@ protected: * * @param arg_object: An object to store with the message. */ - InputMessage(const string& name, O* arg_object) : BasicInputMessage(name) + InputMessage(const char* name, O* arg_object) : BasicInputMessage(name) { object = arg_object; } private: @@ -413,7 +415,7 @@ protected: * * @param arg_object An object to store with the message. */ - OutputMessage(const string& name, O* arg_object) : BasicOutputMessage(name) + OutputMessage(const char* name, O* arg_object) : BasicOutputMessage(name) { object = arg_object; } private: diff --git a/src/threading/Queue.h b/src/threading/Queue.h index b2ccd2a0ce..29a8084352 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -1,4 +1,3 @@ - #ifndef THREADING_QUEUE_H #define THREADING_QUEUE_H @@ -6,11 +5,28 @@ #include #include #include +#include #include "Reporter.h" +#include "BasicThread.h" #undef Queue // Defined elsewhere unfortunately. +#if 1 +// We don't have pthread spinlocks on DARWIN. +# define PTHREAD_MUTEX_T pthread_mutex_t +# define PTHREAD_MUTEX_LOCK(x) pthread_mutex_lock(x) +# define PTHREAD_MUTEX_UNLOCK(x) pthread_mutex_unlock(x) +# define PTHREAD_MUTEX_INIT(x) pthread_mutex_init(x, 0) +# define PTHREAD_MUTEX_DESTROY(x) pthread_mutex_destroy(x) +#else +# define PTHREAD_MUTEX_T pthrea_spinlock_T +# define PTHREAD_MUTEX_LOCK(x) pthrea_spin_lock(x) +# define PTHREAD_MUTEX_UNLOCK(x) pthrea_spin_unlock(x) +# define PTHREAD_MUTEX_INIT(x) pthrea_spin_init(x, PTHREAD_PROCESS_PRIVATE) +# define PTHREAD_MUTEX_DESTROY(x) pthrea_spin_destroy(x) +#endif + namespace threading { /** @@ -30,8 +46,12 @@ class Queue public: /** * Constructor. + * + * reader, writer: The corresponding threads. This is for checking + * whether they have terminated so that we can abort I/O opeations. + * Can be left null for the main thread. */ - Queue(); + Queue(BasicThread* arg_reader, BasicThread* arg_writer); /** * Destructor. @@ -39,7 +59,9 @@ public: ~Queue(); /** - * Retrieves one elment. + * Retrieves one elment. This may block for a little while of no + * input is available and eventually return with a null element if + * nothing shows up. */ T Get(); @@ -60,6 +82,11 @@ public: */ bool MaybeReady() { return ( ( read_ptr - write_ptr) != 0 ); } + /** Wake up the reader if it's currently blocked for input. This is + primarily to give it a chance to check termination quickly. + **/ + void WakeUp(); + /** * Returns the number of queued items not yet retrieved. */ @@ -82,45 +109,50 @@ public: void GetStats(Stats* stats); private: - static const int NUM_QUEUES = 8; + static const int NUM_QUEUES = 15; - pthread_mutex_t mutex[NUM_QUEUES]; // Mutex protected shared accesses. + PTHREAD_MUTEX_T mutex[NUM_QUEUES]; // Mutex protected shared accesses. pthread_cond_t has_data[NUM_QUEUES]; // Signals when data becomes available std::queue messages[NUM_QUEUES]; // Actually holds the queued messages int read_ptr; // Where the next operation will read from int write_ptr; // Where the next operation will write to + BasicThread* reader; + BasicThread* writer; + // Statistics. uint64_t num_reads; uint64_t num_writes; }; -inline static void safe_lock(pthread_mutex_t* mutex) +inline static void safe_lock(PTHREAD_MUTEX_T* mutex) { - if ( pthread_mutex_lock(mutex) != 0 ) + if ( PTHREAD_MUTEX_LOCK(mutex) != 0 ) reporter->FatalErrorWithCore("cannot lock mutex"); } -inline static void safe_unlock(pthread_mutex_t* mutex) +inline static void safe_unlock(PTHREAD_MUTEX_T* mutex) { - if ( pthread_mutex_unlock(mutex) != 0 ) + if ( PTHREAD_MUTEX_UNLOCK(mutex) != 0 ) reporter->FatalErrorWithCore("cannot unlock mutex"); } template -inline Queue::Queue() +inline Queue::Queue(BasicThread* arg_reader, BasicThread* arg_writer) { read_ptr = 0; write_ptr = 0; num_reads = num_writes = 0; + reader = arg_reader; + writer = arg_writer; for( int i = 0; i < NUM_QUEUES; ++i ) { - if ( pthread_cond_init(&has_data[i], NULL) != 0 ) + if ( pthread_cond_init(&has_data[i], 0) != 0 ) reporter->FatalError("cannot init queue condition variable"); - if ( pthread_mutex_init(&mutex[i], NULL) != 0 ) + if ( PTHREAD_MUTEX_INIT(&mutex[i]) != 0 ) reporter->FatalError("cannot init queue mutex"); } } @@ -131,19 +163,30 @@ inline Queue::~Queue() for( int i = 0; i < NUM_QUEUES; ++i ) { pthread_cond_destroy(&has_data[i]); - pthread_mutex_destroy(&mutex[i]); + PTHREAD_MUTEX_DESTROY(&mutex[i]); } } template inline T Queue::Get() { + if ( (reader && reader->Killed()) || (writer && writer->Killed()) ) + return 0; + safe_lock(&mutex[read_ptr]); int old_read_ptr = read_ptr; if ( messages[read_ptr].empty() ) - pthread_cond_wait(&has_data[read_ptr], &mutex[read_ptr]); + { + struct timespec ts; + ts.tv_sec = time(0) + 5; + ts.tv_nsec = 0; + + pthread_cond_timedwait(&has_data[read_ptr], &mutex[read_ptr], &ts); + safe_unlock(&mutex[read_ptr]); + return 0; + } T data = messages[read_ptr].front(); messages[read_ptr].pop(); @@ -222,6 +265,17 @@ inline void Queue::GetStats(Stats* stats) safe_unlock(&mutex[i]); } +template +inline void Queue::WakeUp() + { + for ( int i = 0; i < NUM_QUEUES; i++ ) + { + safe_lock(&mutex[i]); + pthread_cond_signal(&has_data[i]); + safe_unlock(&mutex[i]); + } + } + } diff --git a/src/threading/SerialTypes.cc b/src/threading/SerialTypes.cc index 4494e1b245..c0e26ccb32 100644 --- a/src/threading/SerialTypes.cc +++ b/src/threading/SerialTypes.cc @@ -11,23 +11,54 @@ bool Field::Read(SerializationFormat* fmt) { int t; int st; + string tmp_name; + bool have_2nd; - bool success = (fmt->Read(&name, "name") - && fmt->Read(&secondary_name, "secondary_name") + if ( ! fmt->Read(&have_2nd, "have_2nd") ) + return false; + + if ( have_2nd ) + { + string tmp_secondary_name; + if ( ! fmt->Read(&tmp_secondary_name, "secondary_name") ) + return false; + + secondary_name = copy_string(tmp_secondary_name.c_str()); + } + else + secondary_name = 0; + + bool success = (fmt->Read(&tmp_name, "name") && fmt->Read(&t, "type") && fmt->Read(&st, "subtype") && fmt->Read(&optional, "optional")); + if ( ! success ) + return false; + + name = copy_string(tmp_name.c_str()); + type = (TypeTag) t; subtype = (TypeTag) st; - return success; + return true; } bool Field::Write(SerializationFormat* fmt) const { + assert(name); + + if ( secondary_name ) + { + if ( ! (fmt->Write(true, "have_2nd") + && fmt->Write(secondary_name, "secondary_name")) ) + return false; + } + else + if ( ! fmt->Write(false, "have_2nd") ) + return false; + return (fmt->Write(name, "name") - && fmt->Write(secondary_name, "secondary_name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype"), fmt->Write(optional, "optional")); @@ -51,7 +82,7 @@ Value::~Value() { if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) && present ) - delete val.string_val; + delete [] val.string_val.data; if ( type == TYPE_TABLE && present ) { @@ -224,10 +255,7 @@ bool Value::Read(SerializationFormat* fmt) case TYPE_STRING: case TYPE_FILE: case TYPE_FUNC: - { - val.string_val = new string; - return fmt->Read(val.string_val, "string"); - } + return fmt->Read(&val.string_val.data, &val.string_val.length, "string"); case TYPE_TABLE: { @@ -339,7 +367,7 @@ bool Value::Write(SerializationFormat* fmt) const case TYPE_STRING: case TYPE_FILE: case TYPE_FUNC: - return fmt->Write(*val.string_val, "string"); + return fmt->Write(val.string_val.data, val.string_val.length, "string"); case TYPE_TABLE: { diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index 283d88bf4c..60aee2411e 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -12,6 +12,7 @@ using namespace std; class SerializationFormat; +class RemoteSerializer; namespace threading { @@ -19,10 +20,10 @@ namespace threading { * Definition of a log file, i.e., one column of a log stream. */ struct Field { - string name; //! Name of the field. + const char* name; //! Name of the field. //! Needed by input framework. Port fields have two names (one for the //! port, one for the type), and this specifies the secondary name. - string secondary_name; + const char* secondary_name; TypeTag type; //! Type of the field. TypeTag subtype; //! Inner type for sets. bool optional; //! True if field is optional. @@ -30,13 +31,24 @@ struct Field { /** * Constructor. */ - Field() { subtype = TYPE_VOID; optional = false; } + Field(const char* name, const char* secondary_name, TypeTag type, TypeTag subtype, bool optional) + : name(name ? copy_string(name) : 0), + secondary_name(secondary_name ? copy_string(secondary_name) : 0), + type(type), subtype(subtype), optional(optional) { } /** * Copy constructor. */ Field(const Field& other) - : name(other.name), type(other.type), subtype(other.subtype), optional(other.optional) { } + : name(other.name ? copy_string(other.name) : 0), + secondary_name(other.secondary_name ? copy_string(other.secondary_name) : 0), + type(other.type), subtype(other.subtype), optional(other.optional) { } + + ~Field() + { + delete [] name; + delete [] secondary_name; + } /** * Unserializes a field. @@ -63,6 +75,12 @@ struct Field { * thread-safe. */ string TypeName() const; + +private: + friend class ::RemoteSerializer; + + // Force usage of constructor above. + Field() {}; }; /** @@ -102,7 +120,11 @@ struct Value { vec_t vector_val; addr_t addr_val; subnet_t subnet_val; - string* string_val; + + struct { + char* data; + int length; + } string_val; } val; /** @@ -147,7 +169,7 @@ struct Value { static bool IsCompatibleType(BroType* t, bool atomic_only=false); private: -friend class ::IPAddr; + friend class ::IPAddr; Value(const Value& other) { } // Disabled. }; diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index 1f05dfc729..9298ac1c01 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -11,8 +11,8 @@ # @TEST-EXEC: cat receiver/http.log $SCRIPTS/diff-remove-timestamps >receiver.http.log # @TEST-EXEC: cmp sender.http.log receiver.http.log # -# @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log -# @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log +# @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' | $SCRIPTS/diff-remove-timestamps >events.snd.log +# @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' | $SCRIPTS/diff-remove-timestamps >events.rec.log # @TEST-EXEC: btest-diff events.rec.log # @TEST-EXEC: btest-diff events.snd.log # @TEST-EXEC: cmp events.rec.log events.snd.log diff --git a/testing/scripts/diff-remove-timestamps b/testing/scripts/diff-remove-timestamps index 2b029789de..9398c1cb4b 100755 --- a/testing/scripts/diff-remove-timestamps +++ b/testing/scripts/diff-remove-timestamps @@ -3,6 +3,4 @@ # Replace anything which looks like timestamps with XXXs (including the #start/end markers in logs). sed 's/[0-9]\{10\}\.[0-9]\{2,8\}/XXXXXXXXXX.XXXXXX/g' | \ -sed 's/^#\(start\|end\).20..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' | \ -grep -v '#start' | grep -v '#end' - +sed 's/^#\(start\|end\).20..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' From 5cfb8d65c3a205a3a8c03dccc041a8b24d070a49 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 19 Jul 2012 18:57:15 -0700 Subject: [PATCH 494/651] Updating tests for the #start/#end change. --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- cmake | 2 +- src/logging/writers/Ascii.cc | 1 + src/threading/BasicThread.cc | 8 +++-- src/threading/BasicThread.h | 7 ++-- src/threading/MsgThread.cc | 1 + testing/btest/Baseline/bifs.to_double/out | 2 +- testing/btest/Baseline/core.checksums/bad.out | 20 ++++++++++++ .../btest/Baseline/core.checksums/good.out | 14 ++++++++ .../core.disable-mobile-ipv6/weird.log | 2 ++ .../Baseline/core.expr-exception/reporter.log | 20 ++++++------ testing/btest/Baseline/core.ipv6-frag/dns.log | 2 ++ .../Baseline/core.print-bpf-filters/conn.log | 2 ++ .../Baseline/core.print-bpf-filters/output | 16 +++++++--- .../core.reporter-error-in-handler/output | 2 +- .../Baseline/core.reporter-fmt-strings/output | 2 +- .../Baseline/core.reporter-parse-error/output | 2 +- .../core.reporter-runtime-error/output | 2 +- .../core.reporter-type-mismatch/output | 6 ++-- .../Baseline/core.reporter/logger-test.log | 12 +++---- testing/btest/Baseline/core.reporter/output | 6 ++-- testing/btest/Baseline/core.truncation/output | 8 +++++ .../Baseline/core.tunnels.ayiya/conn.log | 2 ++ .../Baseline/core.tunnels.ayiya/http.log | 2 ++ .../Baseline/core.tunnels.ayiya/tunnel.log | 2 ++ .../core.tunnels.false-teredo/dpd.log | 2 ++ .../core.tunnels.false-teredo/weird.log | 2 ++ .../Baseline/core.tunnels.teredo/conn.log | 2 ++ .../Baseline/core.tunnels.teredo/http.log | 2 ++ .../Baseline/core.tunnels.teredo/tunnel.log | 2 ++ .../conn.log | 2 ++ .../http.log | 2 ++ .../tunnel.log | 2 ++ .../weird.log | 2 ++ .../btest/Baseline/core.vlan-mpls/conn.log | 2 ++ .../canonified_loaded_scripts.log | 2 ++ .../coverage.coverage-blacklist/output | 10 +++--- .../canonified_loaded_scripts.log | 2 ++ .../btest/Baseline/istate.broccoli/bro.log | 6 ++-- .../istate.events-ssl/receiver.http.log | 4 ++- .../istate.events-ssl/sender.http.log | 4 ++- .../Baseline/istate.events/receiver.http.log | 4 ++- .../Baseline/istate.events/sender.http.log | 4 ++- .../Baseline/istate.pybroccoli/bro..stdout | 2 +- .../istate.pybroccoli/python..stdout.filtered | 6 ++-- .../language.wrong-delete-field/output | 2 +- .../send.log | 32 ++++++++++--------- .../ssh-new-default.log | 6 ++-- .../ssh.log | 2 ++ .../ssh-filtered.log | 10 +++--- .../test.log | 2 ++ .../http.log | 2 ++ .../test.log | 2 ++ .../ssh.log | 10 +++--- .../test.log | 4 +-- .../ssh.log | 10 +++--- .../test.log | 2 ++ .../ssh.log | 2 ++ .../ssh.log | 2 ++ .../ssh.ds.txt | 10 +++--- .../ssh.log | 12 ++++--- .../output | 4 +-- .../ssh.log | 2 ++ .../ssh.log | 4 ++- .../ssh.log | 12 ++++--- .../local.log | 2 ++ .../remote.log | 2 ++ .../output | 28 ++++++++++++---- .../test.failure.log | 4 ++- .../test.success.log | 4 ++- .../receiver.test.log | 4 ++- .../sender.test.failure.log | 8 +++-- .../sender.test.log | 12 ++++--- .../sender.test.success.log | 6 ++-- .../ssh.failure.log | 6 ++-- .../ssh.log | 8 +++-- .../out | 20 ++++++++++++ .../out | 20 ++++++++++++ .../output | 12 ++++--- .../ssh.log | 12 ++++--- .../ssh.log | 4 ++- .../testing.log | 2 ++ .../ssh.log | 2 ++ .../manager-1.metrics.log | 8 +++-- .../metrics.log | 8 +++-- .../manager-1.notice.log | 4 ++- .../notice.log | 6 ++-- .../manager-1.notice.log | 4 ++- .../manager-1.notice.log | 4 ++- .../notice.log | 4 ++- .../conn.log | 2 ++ .../ftp.log | 2 ++ .../conn.log | 2 ++ .../ftp.log | 2 ++ .../http.log | 2 ++ .../http.log | 2 ++ .../http.log | 2 ++ .../http.log | 2 ++ .../scripts.base.protocols.irc.basic/irc.log | 2 ++ .../irc.log | 2 ++ .../smtp.log | 2 ++ .../smtp_entities.log | 2 ++ .../smtp_entities.log | 2 ++ .../socks.log | 2 ++ .../tunnel.log | 2 ++ .../socks.log | 2 ++ .../tunnel.log | 2 ++ .../tunnel.log | 2 ++ .../scripts.base.protocols.ssl.basic/ssl.log | 2 ++ .../knownhosts-all.log | 2 ++ .../knownhosts-local.log | 2 ++ .../knownhosts-remote.log | 2 ++ .../knownservices-all.log | 2 ++ .../knownservices-local.log | 2 ++ .../knownservices-remote.log | 2 ++ .../dns.log | 2 ++ testing/btest/istate/events-ssl.bro | 4 +-- testing/btest/istate/events.bro | 4 +-- .../base/frameworks/logging/ascii-escape.bro | 1 + testing/scripts/diff-remove-timestamps | 2 +- 123 files changed, 442 insertions(+), 162 deletions(-) diff --git a/aux/binpac b/aux/binpac index b4094cb75e..4ad8d15b63 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 +Subproject commit 4ad8d15b6395925c9875c9d2912a6cc3b4918e0a diff --git a/aux/bro-aux b/aux/bro-aux index 2038e3de04..c691c01e9c 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 2038e3de042115c3caa706426e16c830c1fd1e9e +Subproject commit c691c01e9cefae5a79bcd4b0f84ca387c8c587a7 diff --git a/aux/broccoli b/aux/broccoli index 07866915a1..8234b8903c 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 07866915a1450ddd25b888917f494b4824b0cc3f +Subproject commit 8234b8903cbc775f341bdb6a1c0159981d88d27b diff --git a/aux/broctl b/aux/broctl index 892b60edb9..d5ecd1a42c 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 892b60edb967bb456872638f22ba994e84530137 +Subproject commit d5ecd1a42c04b0dca332edc31811e5a6d0f7f2fb diff --git a/cmake b/cmake index 96f3d92aca..2a72c5e08e 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 96f3d92acadbe1ae64f410e974c5ff503903394b +Subproject commit 2a72c5e08e018cf632033af3920432d5f684e130 diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 99fd3f3c6e..d3c210ce47 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -2,6 +2,7 @@ #include #include +#include #include "NetVar.h" #include "threading/SerialTypes.h" diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index af57c26939..d4a82316e8 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -93,9 +93,11 @@ const char* BasicThread::Fmt(const char* format, ...) const char* BasicThread::Strerror(int err) { - static char buf[128] = ""; - strerror_r(err, buf, sizeof(buf)); - return buf; + if ( ! strerr_buffer ) + strerr_buffer = new char[256]; + + strerror_r(err, strerr_buffer, 256); + return strerr_buffer; } void BasicThread::Start() diff --git a/src/threading/BasicThread.h b/src/threading/BasicThread.h index 037420b077..e17324e948 100644 --- a/src/threading/BasicThread.h +++ b/src/threading/BasicThread.h @@ -120,8 +120,8 @@ public: /** * A version of strerror() that the thread can safely use. This is * essentially a wrapper around strerror_r(). Note that it keeps a - * single static buffer internally so the result remains valid only - * until the next call. + * single buffer per thread internally so the result remains valid + * only until the next call. */ const char* Strerror(int err); @@ -207,6 +207,9 @@ private: char* buf; unsigned int buf_len; + // For implementating Strerror(). + char* strerr_buffer; + static uint64_t thread_counter; }; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 3e06a3fe1e..0e55b99ba1 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -5,6 +5,7 @@ #include "Manager.h" #include +#include using namespace threading; diff --git a/testing/btest/Baseline/bifs.to_double/out b/testing/btest/Baseline/bifs.to_double/out index 8c2fef496a..8e172dcaa6 100644 --- a/testing/btest/Baseline/bifs.to_double/out +++ b/testing/btest/Baseline/bifs.to_double/out @@ -3,4 +3,4 @@ -60.0 3600.0 86400.0 -1337982322.762159 +1342748947.655087 diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index 44a27f7f0f..de4538e32b 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -3,81 +3,101 @@ #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-18-03-01 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332784981.078396 - - - - - bad_IP_checksum - F bro +#end 2012-03-26-18-03-01 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-18-01-25 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332784885.686428 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 80 bad_TCP_checksum - F bro +#end 2012-03-26-18-01-25 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-18-02-13 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332784933.501023 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 13000 bad_UDP_checksum - F bro +#end 2012-03-26-18-02-13 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-29-23 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075363.536871 UWkUyAuUGXf 192.168.1.100 8 192.168.1.101 0 bad_ICMP_checksum - F bro +#end 2012-04-10-16-29-23 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-18-06-50 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332785210.013051 - - - - - routing0_hdr - F bro 1332785210.013051 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 80 bad_TCP_checksum - F bro +#end 2012-03-26-18-06-50 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-17-23-00 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332782580.798420 - - - - - routing0_hdr - F bro 1332782580.798420 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 13000 bad_UDP_checksum - F bro +#end 2012-03-26-17-23-00 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-25-11 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075111.800086 - - - - - routing0_hdr - F bro 1334075111.800086 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:78:1:32::1 129 bad_ICMP_checksum - F bro +#end 2012-04-10-16-25-11 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-18-07-30 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332785250.469132 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F bro +#end 2012-03-26-18-07-30 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-17-02-22 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332781342.923813 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F bro +#end 2012-03-26-17-02-22 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-22-19 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro +#end 2012-04-10-16-22-19 diff --git a/testing/btest/Baseline/core.checksums/good.out b/testing/btest/Baseline/core.checksums/good.out index 0010974b7f..ed6c071ffc 100644 --- a/testing/btest/Baseline/core.checksums/good.out +++ b/testing/btest/Baseline/core.checksums/good.out @@ -3,54 +3,68 @@ #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-22-19 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro +#end 2012-04-10-16-22-19 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-18-05-25 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332785125.596793 - - - - - routing0_hdr - F bro +#end 2012-03-26-18-05-25 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-03-26-17-21-48 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332782508.592037 - - - - - routing0_hdr - F bro +#end 2012-03-26-17-21-48 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro +#end 2012-04-10-16-23-47 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro +#end 2012-04-10-16-23-47 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro +#end 2012-04-10-16-23-47 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro +#end 2012-04-10-16-23-47 diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log index 478cfe8667..d29456f75f 100644 --- a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log +++ b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path weird +#start 2012-04-05-21-56-51 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1333663011.602839 - - - - - unknown_protocol_135 - F bro +#end 2012-04-05-21-56-51 diff --git a/testing/btest/Baseline/core.expr-exception/reporter.log b/testing/btest/Baseline/core.expr-exception/reporter.log index 2d0441f48a..f9e33d9718 100644 --- a/testing/btest/Baseline/core.expr-exception/reporter.log +++ b/testing/btest/Baseline/core.expr-exception/reporter.log @@ -3,14 +3,16 @@ #empty_field (empty) #unset_field - #path reporter +#start 2011-03-18-19-06-08 #fields ts level message location #types time enum string string -1300475168.783842 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.915940 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.916118 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.918295 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.952193 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.952228 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.954761 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.962628 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475169.780331 Reporter::ERROR field value missing [c$ftp] /home/jsiwek/bro/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.783842 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.915940 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.916118 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.918295 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.952193 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.952228 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.954761 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.962628 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475169.780331 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/core.ipv6-frag/dns.log b/testing/btest/Baseline/core.ipv6-frag/dns.log index 251f35d789..2003d1f253 100644 --- a/testing/btest/Baseline/core.ipv6-frag/dns.log +++ b/testing/btest/Baseline/core.ipv6-frag/dns.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path dns +#start 2012-03-07-01-37-58 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] 1331084278.438444 UWkUyAuUGXf 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51850 2607:f740:b::f93 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 1331084293.592245 arKYeMETxOg 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51851 2607:f740:b::f93 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 +#end 2012-03-07-01-38-18 diff --git a/testing/btest/Baseline/core.print-bpf-filters/conn.log b/testing/btest/Baseline/core.print-bpf-filters/conn.log index b563c4a3ed..4033b64e2a 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/conn.log +++ b/testing/btest/Baseline/core.print-bpf-filters/conn.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path conn +#start 2005-10-07-23-23-57 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1128727435.450898 UWkUyAuUGXf 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) +#end 2005-10-07-23-23-57 diff --git a/testing/btest/Baseline/core.print-bpf-filters/output b/testing/btest/Baseline/core.print-bpf-filters/output index d1c2d47893..e4bc04192a 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output +++ b/testing/btest/Baseline/core.print-bpf-filters/output @@ -3,30 +3,38 @@ #empty_field (empty) #unset_field - #path packet_filter +#start 1970-01-01-00-00-00 #fields ts node filter init success #types time string string bool bool -1340229717.179155 - ip or not ip T T +1342748953.570646 - ip or not ip T T +#end #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter +#start 1970-01-01-00-00-00 #fields ts node filter init success #types time string string bool bool -1340229717.462355 - (((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (tcp port 1080)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T +1342748953.898675 - (((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (tcp port 1080)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T +#end #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter +#start 1970-01-01-00-00-00 #fields ts node filter init success #types time string string bool bool -1340229717.733007 - port 42 T T +1342748954.278211 - port 42 T T +#end #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter +#start 1970-01-01-00-00-00 #fields ts node filter init success #types time string string bool bool -1340229718.001009 - port 56730 T T +1342748954.883780 - port 56730 T T +#end 2005-10-07-23-23-57 diff --git a/testing/btest/Baseline/core.reporter-error-in-handler/output b/testing/btest/Baseline/core.reporter-error-in-handler/output index 3d8aa6ff54..83b310ab61 100644 --- a/testing/btest/Baseline/core.reporter-error-in-handler/output +++ b/testing/btest/Baseline/core.reporter-error-in-handler/output @@ -1,2 +1,2 @@ -error in /Users/robin/bro/master/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 22: no such index (a[2]) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 22: no such index (a[2]) 1st error printed on script level diff --git a/testing/btest/Baseline/core.reporter-fmt-strings/output b/testing/btest/Baseline/core.reporter-fmt-strings/output index 4842dd9fc5..bbd76f3447 100644 --- a/testing/btest/Baseline/core.reporter-fmt-strings/output +++ b/testing/btest/Baseline/core.reporter-fmt-strings/output @@ -1 +1 @@ -error in /Users/robin/bro/master/testing/btest/.tmp/core.reporter-fmt-strings/reporter-fmt-strings.bro, line 9: not an event (dont_interpret_this(%s)) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-fmt-strings/reporter-fmt-strings.bro, line 9: not an event (dont_interpret_this(%s)) diff --git a/testing/btest/Baseline/core.reporter-parse-error/output b/testing/btest/Baseline/core.reporter-parse-error/output index 7606fe5667..76535f75d1 100644 --- a/testing/btest/Baseline/core.reporter-parse-error/output +++ b/testing/btest/Baseline/core.reporter-parse-error/output @@ -1 +1 @@ -error in /Users/robin/bro/master/testing/btest/.tmp/core.reporter-parse-error/reporter-parse-error.bro, line 7: unknown identifier TESTFAILURE, at or near "TESTFAILURE" +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-parse-error/reporter-parse-error.bro, line 7: unknown identifier TESTFAILURE, at or near "TESTFAILURE" diff --git a/testing/btest/Baseline/core.reporter-runtime-error/output b/testing/btest/Baseline/core.reporter-runtime-error/output index 3a96954101..59bcc3ac9b 100644 --- a/testing/btest/Baseline/core.reporter-runtime-error/output +++ b/testing/btest/Baseline/core.reporter-runtime-error/output @@ -1 +1 @@ -error in /Users/robin/bro/master/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 12: no such index (a[1]) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 12: no such index (a[1]) diff --git a/testing/btest/Baseline/core.reporter-type-mismatch/output b/testing/btest/Baseline/core.reporter-type-mismatch/output index 4c038ea8c5..23eefd13e8 100644 --- a/testing/btest/Baseline/core.reporter-type-mismatch/output +++ b/testing/btest/Baseline/core.reporter-type-mismatch/output @@ -1,3 +1,3 @@ -error in string and /Users/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11: arithmetic mixed with non-arithmetic (string and 42) -error in /Users/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11 and string: type mismatch (42 and string) -error in /Users/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11: argument type mismatch in event invocation (foo(42)) +error in string and /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11: arithmetic mixed with non-arithmetic (string and 42) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11 and string: type mismatch (42 and string) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11: argument type mismatch in event invocation (foo(42)) diff --git a/testing/btest/Baseline/core.reporter/logger-test.log b/testing/btest/Baseline/core.reporter/logger-test.log index bc2abd142a..6f7ba1d8c7 100644 --- a/testing/btest/Baseline/core.reporter/logger-test.log +++ b/testing/btest/Baseline/core.reporter/logger-test.log @@ -1,6 +1,6 @@ -reporter_info|init test-info|/Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 8|0.000000 -reporter_warning|init test-warning|/Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 9|0.000000 -reporter_error|init test-error|/Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 10|0.000000 -reporter_info|done test-info|/Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 15|0.000000 -reporter_warning|done test-warning|/Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 16|0.000000 -reporter_error|done test-error|/Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 17|0.000000 +reporter_info|init test-info|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 8|0.000000 +reporter_warning|init test-warning|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 9|0.000000 +reporter_error|init test-error|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 10|0.000000 +reporter_info|done test-info|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 15|0.000000 +reporter_warning|done test-warning|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 16|0.000000 +reporter_error|done test-error|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 17|0.000000 diff --git a/testing/btest/Baseline/core.reporter/output b/testing/btest/Baseline/core.reporter/output index 185cabb1eb..2735adc931 100644 --- a/testing/btest/Baseline/core.reporter/output +++ b/testing/btest/Baseline/core.reporter/output @@ -1,3 +1,3 @@ -/Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 52: pre test-info -warning in /Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 53: pre test-warning -error in /Users/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 54: pre test-error +/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 52: pre test-info +warning in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 53: pre test-warning +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 54: pre test-error diff --git a/testing/btest/Baseline/core.truncation/output b/testing/btest/Baseline/core.truncation/output index 95d9073648..836f9170d4 100644 --- a/testing/btest/Baseline/core.truncation/output +++ b/testing/btest/Baseline/core.truncation/output @@ -3,30 +3,38 @@ #empty_field (empty) #unset_field - #path weird +#start 2012-04-11-16-01-35 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334160095.895421 - - - - - truncated_IP - F bro +#end 2012-04-11-16-01-35 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-11-14-57-21 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334156241.519125 - - - - - truncated_IP - F bro +#end 2012-04-11-14-57-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-04-10-21-50-48 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334094648.590126 - - - - - truncated_IP - F bro +#end 2012-04-10-21-50-48 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird +#start 2012-05-29-22-02-34 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1338328954.078361 - - - - - internally_truncated_header - F bro +#end 2012-05-29-22-02-34 diff --git a/testing/btest/Baseline/core.tunnels.ayiya/conn.log b/testing/btest/Baseline/core.tunnels.ayiya/conn.log index db54a8a475..82a3828f0d 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/conn.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/conn.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path conn +#start 2009-11-08-04-41-57 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1257655301.595604 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 tcp http 2.101052 2981 4665 S1 - 0 ShADad 10 3605 11 5329 k6kgXLOoSKl @@ -13,3 +14,4 @@ 1257655296.585188 TEfuqmmG4bh fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff00:2 130 icmp - 0.919988 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl 1257655296.585151 j4u32Pc5bif fe80::216:cbff:fe9a:4cb9 131 ff02::2:f901:d225 130 icmp - 0.719947 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl 1257655296.585034 nQcgTWjvg4c fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff9a:4cb9 130 icmp - 4.922880 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl +#end 2009-11-08-04-41-57 diff --git a/testing/btest/Baseline/core.tunnels.ayiya/http.log b/testing/btest/Baseline/core.tunnels.ayiya/http.log index 7cef1a1b8e..4fbcd508f4 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/http.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/http.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path http +#start 2009-11-08-04-41-41 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1257655301.652206 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 10102 200 OK - - - (empty) - - - text/html - - 1257655302.514424 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 2 GET ipv6.google.com /csi?v=3&s=webhp&action=&tran=undefined&e=17259,19771,21517,21766,21887,22212&ei=BUz2Su7PMJTglQfz3NzCAw&rt=prt.77,xjs.565,ol.645 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - 1257655303.603569 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 3 GET ipv6.google.com /gen_204?atyp=i&ct=fade&cad=1254&ei=BUz2Su7PMJTglQfz3NzCAw&zx=1257655303600 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - +#end 2009-11-08-04-41-57 diff --git a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log index b4ef2781c6..123ea8a792 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log @@ -3,9 +3,11 @@ #empty_field (empty) #unset_field - #path tunnel +#start 2009-11-08-04-41-33 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::AYIYA Tunnel::DISCOVER 1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::AYIYA Tunnel::DISCOVER 1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::AYIYA Tunnel::CLOSE 1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::AYIYA Tunnel::CLOSE +#end 2009-11-08-04-41-57 diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log b/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log index 4949f16e62..63a0437445 100644 --- a/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log +++ b/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path dpd +#start 2009-11-18-17-59-51 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto analyzer failure_reason #types time string addr port addr port enum string string 1258567191.486869 UWkUyAuUGXf 192.168.1.105 57696 192.168.1.1 53 udp TEREDO Teredo payload length [c\x1d\x81\x80\x00\x01\x00\x02\x00\x02\x00\x00\x04amch\x0equestionmarket\x03com\x00\x00\x01\x00...] @@ -11,3 +12,4 @@ 1258581768.898165 TEfuqmmG4bh 192.168.1.104 50798 192.168.1.1 53 udp TEREDO Teredo payload length [o\xe3\x81\x80\x00\x01\x00\x02\x00\x04\x00\x04\x03www\x0fnashuatelegraph\x03com\x00\x00\x01\x00...] 1258584478.989528 FrJExwHcSal 192.168.1.104 64963 192.168.1.1 53 udp TEREDO Teredo payload length [e\xbd\x81\x80\x00\x01\x00\x08\x00\x06\x00\x06\x08wellness\x05blogs\x04time\x03com\x00\x00\x01\x00...] 1258600683.934672 5OKnoww6xl4 192.168.1.103 59838 192.168.1.1 53 udp TEREDO Teredo payload length [h\xf0\x81\x80\x00\x01\x00\x01\x00\x02\x00\x00\x06update\x0csanasecurity\x03com\x00\x00\x01\x00...] +#end 2009-11-19-03-18-03 diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log index 0ec1d0a7cf..eb4319c7eb 100644 --- a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log +++ b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path weird +#start 2009-11-18-17-59-51 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1258567191.405770 - - - - - truncated_header_in_tunnel - F bro @@ -11,3 +12,4 @@ 1258581768.568451 - - - - - truncated_header_in_tunnel - F bro 1258584478.859853 - - - - - truncated_header_in_tunnel - F bro 1258600683.934458 - - - - - truncated_header_in_tunnel - F bro +#end 2009-11-19-03-18-03 diff --git a/testing/btest/Baseline/core.tunnels.teredo/conn.log b/testing/btest/Baseline/core.tunnels.teredo/conn.log index cefc8f3e84..2342953339 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/conn.log +++ b/testing/btest/Baseline/core.tunnels.teredo/conn.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path conn +#start 2008-05-16-15-50-57 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1210953047.736921 arKYeMETxOg 192.168.2.16 1576 75.126.130.163 80 tcp - 0.000357 0 0 SHR - 0 fA 1 40 1 40 (empty) @@ -26,3 +27,4 @@ 1210953052.324629 FrJExwHcSal fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 TEfuqmmG4bh 1210953060.829303 qCaWGmzFtM5 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.463615 4 4 OTH - 0 - 1 52 1 52 GSxOnSLghOa,nQcgTWjvg4c 1210953052.202579 j4u32Pc5bif fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 nQcgTWjvg4c +#end 2008-05-16-15-51-16 diff --git a/testing/btest/Baseline/core.tunnels.teredo/http.log b/testing/btest/Baseline/core.tunnels.teredo/http.log index b3cf832083..c0db5fc146 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/http.log +++ b/testing/btest/Baseline/core.tunnels.teredo/http.log @@ -3,9 +3,11 @@ #empty_field (empty) #unset_field - #path http +#start 2008-05-16-15-50-58 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1210953057.917183 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 1 POST download913.avast.com /cgi-bin/iavs4stats.cgi - Syncer/4.80 (av_pro-1169;f) 589 0 204 - - - (empty) - - - text/plain - - 1210953061.585996 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - 1210953073.381474 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - 1210953074.674817 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 1 GET www.wireshark.org / http://ipv6.google.com/search?hl=en&q=Wireshark+%21&btnG=Google+Search Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 11845 200 OK - - - (empty) - - - text/xml - - +#end 2008-05-16-15-51-16 diff --git a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log index 9cead25be1..ab14bf68bc 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path tunnel +#start 2008-05-16-15-50-52 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::DISCOVER @@ -11,3 +12,4 @@ 1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::CLOSE 1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::CLOSE 1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::CLOSE +#end 2008-05-16-15-51-16 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log index 6ceb4efcb3..7b9ff58624 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path conn +#start 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1340127577.354166 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 tcp http 0.052829 1675 10467 S1 - 0 ShADad 10 2279 12 11191 j4u32Pc5bif @@ -12,3 +13,4 @@ 1340127577.339015 nQcgTWjvg4c fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 k6kgXLOoSKl 1340127577.343969 TEfuqmmG4bh 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.007778 4 4 OTH - 0 - 1 52 1 52 UWkUyAuUGXf,j4u32Pc5bif 1340127577.336558 arKYeMETxOg fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 UWkUyAuUGXf +#end 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log index 869476d7db..12f0d7be7a 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path http +#start 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1340127577.361683 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - 1340127577.379360 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - +#end 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log index 30f88ed251..1a14b3edb7 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path tunnel +#start 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1340127577.336558 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::DISCOVER @@ -11,3 +12,4 @@ 1340127577.406995 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::CLOSE 1340127577.406995 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::CLOSE 1340127577.406995 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::CLOSE +#end 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log index e01fa49d45..8b252a5819 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path weird +#start 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1340127577.346849 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Teredo_bubble_with_payload - F bro 1340127577.349292 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Teredo_bubble_with_payload - F bro +#end 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.vlan-mpls/conn.log b/testing/btest/Baseline/core.vlan-mpls/conn.log index e165df621a..72e13ee9b4 100644 --- a/testing/btest/Baseline/core.vlan-mpls/conn.log +++ b/testing/btest/Baseline/core.vlan-mpls/conn.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path conn +#start 2005-10-07-23-23-55 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 952109346.874907 UWkUyAuUGXf 10.1.2.1 11001 10.34.0.1 23 tcp - 2.102560 26 0 SH - 0 SADF 11 470 0 0 (empty) 1128727435.450898 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) 1278600802.069419 k6kgXLOoSKl 10.20.80.1 50343 10.0.0.15 80 tcp - 0.004152 9 3429 SF - 0 ShADadfF 7 381 7 3801 (empty) +#end 2010-07-08-14-53-22 diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index 0f12ce4ead..8f90296b63 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts +#start 2012-07-20-01-49-31 #fields name #types string scripts/base/init-bare.bro @@ -28,3 +29,4 @@ scripts/base/init-bare.bro scripts/base/frameworks/input/./readers/raw.bro scripts/base/frameworks/input/./readers/benchmark.bro scripts/policy/misc/loaded-scripts.bro +#end 2012-07-20-01-49-31 diff --git a/testing/btest/Baseline/coverage.coverage-blacklist/output b/testing/btest/Baseline/coverage.coverage-blacklist/output index 6d3d243220..c54e4283b2 100644 --- a/testing/btest/Baseline/coverage.coverage-blacklist/output +++ b/testing/btest/Baseline/coverage.coverage-blacklist/output @@ -1,5 +1,5 @@ -1 /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 13 print cover me; -1 /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 17 print always executed; -0 /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 26 print also impossible, but included in code coverage analysis; -1 /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 29 print success; -1 /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 5 print first; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 13 print cover me; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 17 print always executed; +0 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 26 print also impossible, but included in code coverage analysis; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 29 print success; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 5 print first; diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index f1f9791fc3..6bc461ed65 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts +#start 2012-07-20-01-49-33 #fields name #types string scripts/base/init-bare.bro @@ -108,3 +109,4 @@ scripts/base/init-default.bro scripts/base/protocols/syslog/./consts.bro scripts/base/protocols/syslog/./main.bro scripts/policy/misc/loaded-scripts.bro +#end 2012-07-20-01-49-33 diff --git a/testing/btest/Baseline/istate.broccoli/bro.log b/testing/btest/Baseline/istate.broccoli/bro.log index 4fbbfc81ae..70bf23f95a 100644 --- a/testing/btest/Baseline/istate.broccoli/bro.log +++ b/testing/btest/Baseline/istate.broccoli/bro.log @@ -1,3 +1,3 @@ -ping received, seq 0, 1324314397.698781 at src, 1324314397.699240 at dest, -ping received, seq 1, 1324314398.698905 at src, 1324314398.699094 at dest, -ping received, seq 2, 1324314399.699012 at src, 1324314399.699231 at dest, +ping received, seq 0, 1342749173.594568 at src, 1342749173.637317 at dest, +ping received, seq 1, 1342749174.594948 at src, 1342749174.596551 at dest, +ping received, seq 2, 1342749175.595486 at src, 1342749175.596581 at dest, diff --git a/testing/btest/Baseline/istate.events-ssl/receiver.http.log b/testing/btest/Baseline/istate.events-ssl/receiver.http.log index 5a7912d23d..c9a996ef5b 100644 --- a/testing/btest/Baseline/istate.events-ssl/receiver.http.log +++ b/testing/btest/Baseline/istate.events-ssl/receiver.http.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path http +#start 2012-07-20-01-53-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1336588614.060989 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1342749182.906082 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +#end 2012-07-20-01-53-04 diff --git a/testing/btest/Baseline/istate.events-ssl/sender.http.log b/testing/btest/Baseline/istate.events-ssl/sender.http.log index 5a7912d23d..c9a996ef5b 100644 --- a/testing/btest/Baseline/istate.events-ssl/sender.http.log +++ b/testing/btest/Baseline/istate.events-ssl/sender.http.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path http +#start 2012-07-20-01-53-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1336588614.060989 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1342749182.906082 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +#end 2012-07-20-01-53-04 diff --git a/testing/btest/Baseline/istate.events/receiver.http.log b/testing/btest/Baseline/istate.events/receiver.http.log index 55a0189cec..566457b996 100644 --- a/testing/btest/Baseline/istate.events/receiver.http.log +++ b/testing/btest/Baseline/istate.events/receiver.http.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path http +#start 2012-07-20-01-53-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1336587178.164598 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1342749191.765740 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +#end 2012-07-20-01-53-13 diff --git a/testing/btest/Baseline/istate.events/sender.http.log b/testing/btest/Baseline/istate.events/sender.http.log index 55a0189cec..566457b996 100644 --- a/testing/btest/Baseline/istate.events/sender.http.log +++ b/testing/btest/Baseline/istate.events/sender.http.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path http +#start 2012-07-20-01-53-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1336587178.164598 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +1342749191.765740 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - +#end 2012-07-20-01-53-13 diff --git a/testing/btest/Baseline/istate.pybroccoli/bro..stdout b/testing/btest/Baseline/istate.pybroccoli/bro..stdout index a5d20b1f2a..b73d342967 100644 --- a/testing/btest/Baseline/istate.pybroccoli/bro..stdout +++ b/testing/btest/Baseline/istate.pybroccoli/bro..stdout @@ -1,7 +1,7 @@ ==== atomic -10 2 -1336411585.166009 +1342749196.619505 2.0 mins F 1.5 diff --git a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered index a44a95bd69..2f2a5978d8 100644 --- a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered +++ b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered @@ -1,7 +1,7 @@ ==== atomic a 1 ==== -4L -4 42 42 -1336411585.1711 +1342749196.6624 60.0 True True 3.14 @@ -14,7 +14,7 @@ True True ==== atomic a 2 ==== -10L -10 2 2 -1336411585.1660 +1342749196.6195 120.0 False False 1.5 @@ -27,7 +27,7 @@ False False ==== atomic b 2 ==== -10L -10 2 - 1336411585.1660 + 1342749196.6195 120.0 False False 1.5 diff --git a/testing/btest/Baseline/language.wrong-delete-field/output b/testing/btest/Baseline/language.wrong-delete-field/output index c2aae8aae3..1eefa1d2fe 100644 --- a/testing/btest/Baseline/language.wrong-delete-field/output +++ b/testing/btest/Baseline/language.wrong-delete-field/output @@ -1 +1 @@ -error in /Users/robin/bro/master/testing/btest/.tmp/language.wrong-delete-field/wrong-delete-field.bro, line 10: illegal delete statement (delete x$a) +error in /da/home/robin/bro/master/testing/btest/.tmp/language.wrong-delete-field/wrong-delete-field.bro, line 10: illegal delete statement (delete x$a) diff --git a/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log b/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log index 94e0403238..7e21ff86b7 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log +++ b/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log @@ -3,20 +3,22 @@ #empty_field (empty) #unset_field - #path communication +#start 2012-07-20-01-49-40 #fields ts peer src_name connected_peer_desc connected_peer_addr connected_peer_port level message #types time string string string addr port string string -1340904724.781527 bro parent - - - info [#1/127.0.0.1:47757] added peer -1340904724.784954 bro child - - - info [#1/127.0.0.1:47757] connected -1340904724.786168 bro parent - - - info [#1/127.0.0.1:47757] peer connected -1340904724.786168 bro parent - - - info [#1/127.0.0.1:47757] phase: version -1340904724.786168 bro script - - - info connection established -1340904724.786168 bro script - - - info requesting events matching /^?(NOTHING)$?/ -1340904724.786168 bro script - - - info accepting state -1340904724.787645 bro parent - - - info [#1/127.0.0.1:47757] phase: handshake -1340904724.787645 bro parent - - - info warning: no events to request -1340904724.788857 bro parent - - - info [#1/127.0.0.1:47757] peer_description is bro -1340904724.829480 bro parent - - - info [#1/127.0.0.1:47757] peer supports keep-in-cache; using that -1340904724.829480 bro parent - - - info [#1/127.0.0.1:47757] phase: running -1340904724.829480 bro parent - - - info terminating... -1340904724.832952 bro child - - - info terminating -1340904724.834082 bro parent - - - info [#1/127.0.0.1:47757] closing connection +1342748980.737451 bro parent - - - info [#1/127.0.0.1:47757] added peer +1342748980.747149 bro child - - - info [#1/127.0.0.1:47757] connected +1342748980.748489 bro parent - - - info [#1/127.0.0.1:47757] peer connected +1342748980.748489 bro parent - - - info [#1/127.0.0.1:47757] phase: version +1342748980.750749 bro script - - - info connection established +1342748980.750749 bro script - - - info requesting events matching /^?(NOTHING)$?/ +1342748980.750749 bro script - - - info accepting state +1342748980.752225 bro parent - - - info [#1/127.0.0.1:47757] phase: handshake +1342748980.752225 bro parent - - - info warning: no events to request +1342748980.753384 bro parent - - - info [#1/127.0.0.1:47757] peer_description is bro +1342748980.793108 bro parent - - - info [#1/127.0.0.1:47757] peer supports keep-in-cache; using that +1342748980.793108 bro parent - - - info [#1/127.0.0.1:47757] phase: running +1342748980.793108 bro parent - - - info terminating... +1342748980.796454 bro child - - - info terminating +1342748980.797536 bro parent - - - info [#1/127.0.0.1:47757] closing connection +#end 2012-07-20-01-49-40 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log b/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log index 485bfe3eba..a0359c2d70 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path ssh-new-default +#start 2012-07-20-01-49-19 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314313.140603 1.2.3.4 1234 2.3.4.5 80 success unknown -1324314313.140603 1.2.3.4 1234 2.3.4.5 80 failure US +1342748959.430282 1.2.3.4 1234 2.3.4.5 80 success unknown +1342748959.430282 1.2.3.4 1234 2.3.4.5 80 failure US +#end 2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log index 144a7a6426..0c826f9694 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log @@ -3,8 +3,10 @@ #empty_field|(empty) #unset_field|- #path|ssh +#start|2012-07-20-01-49-19 #fields|data|data2 #types|string|string abc\x0a\xffdef|DATA2 abc\x7c\xffdef|DATA2 abc\xff\x7cdef|DATA2 +#end|2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log index a2610bb522..b6e4889a21 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-empty/ssh-filtered.log @@ -5,8 +5,8 @@ PREFIX<>unset_field|NOT-SET PREFIX<>path|ssh PREFIX<>fields|t|id.orig_h|id.orig_p|id.resp_h|id.resp_p|status|country|b PREFIX<>types|time|addr|port|addr|port|string|string|bool -1342126762.852986|1.2.3.4|1234|2.3.4.5|80|success|unknown|NOT-SET -1342126762.852986|1.2.3.4|1234|2.3.4.5|80|NOT-SET|US|NOT-SET -1342126762.852986|1.2.3.4|1234|2.3.4.5|80|failure|UK|NOT-SET -1342126762.852986|1.2.3.4|1234|2.3.4.5|80|NOT-SET|BR|NOT-SET -1342126762.852986|1.2.3.4|1234|2.3.4.5|80|failure|EMPTY|T +1342748959.659721|1.2.3.4|1234|2.3.4.5|80|success|unknown|NOT-SET +1342748959.659721|1.2.3.4|1234|2.3.4.5|80|NOT-SET|US|NOT-SET +1342748959.659721|1.2.3.4|1234|2.3.4.5|80|failure|UK|NOT-SET +1342748959.659721|1.2.3.4|1234|2.3.4.5|80|NOT-SET|BR|NOT-SET +1342748959.659721|1.2.3.4|1234|2.3.4.5|80|failure|EMPTY|T diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log index c9e69994fc..b1a4ba52e2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path test +#start 2012-07-20-01-49-19 #fields x y z #types string string string \x2d - (empty) +#end 2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log index 97744b7df8..683f149317 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path http +#start 2011-09-12-03-57-36 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1315799856.264750 UWkUyAuUGXf 10.0.1.104 64216 193.40.5.162 80 1 GET lepo.it.da.ut.ee /~cect/teoreetilised seminarid_2010/arheoloogia_uurimisr\xfchma_seminar/Joyce et al - The Languages of Archaeology ~ Dialogue, Narrative and Writing.pdf - Wget/1.12 (darwin10.8.0) 0 346 404 Not Found - - - (empty) - - - text/html - - +#end 2011-09-12-03-57-37 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log index b88627c806..a03c6f954b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path test +#start 2012-07-20-01-49-19 #fields ss #types table[string] CC,AA,\x2c,\x2c\x2c +#end 2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log index 0ef81128d3..0c6a266de0 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log @@ -5,8 +5,8 @@ #path||ssh #fields||t||id.orig_h||id.orig_p||id.resp_h||id.resp_p||status||country #types||time||addr||port||addr||port||string||string -1324314313.899736||1.2.3.4||1234||2.3.4.5||80||success||unknown -1324314313.899736||1.2.3.4||1234||2.3.4.5||80||failure||US -1324314313.899736||1.2.3.4||1234||2.3.4.5||80||fa\x7c\x7cure||UK -1324314313.899736||1.2.3.4||1234||2.3.4.5||80||su\x7c\x7cess||BR -1324314313.899736||1.2.3.4||1234||2.3.4.5||80||failure||MX +1342759749.586006||1.2.3.4||1234||2.3.4.5||80||success||unknown +1342759749.586006||1.2.3.4||1234||2.3.4.5||80||failure||US +1342759749.586006||1.2.3.4||1234||2.3.4.5||80||fa\x7c\x7cure||UK +1342759749.586006||1.2.3.4||1234||2.3.4.5||80||su\x7c\x7cess||BR +1342759749.586006||1.2.3.4||1234||2.3.4.5||80||failure||MX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log index 72df0d73d4..21b81abf95 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path test -#start 2012-07-12-21-00-27 +#start 2012-07-20-01-49-22 #fields data c #types string count Test1 42 \x23Kaputt 42 Test2 42 -#end 2012-07-12-21-00-27 +#end 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-options/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-options/ssh.log index f66dec7160..6e3263673a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-options/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-options/ssh.log @@ -1,5 +1,5 @@ -1324314313.990741|1.2.3.4|1234|2.3.4.5|80|success|unknown -1324314313.990741|1.2.3.4|1234|2.3.4.5|80|failure|US -1324314313.990741|1.2.3.4|1234|2.3.4.5|80|failure|UK -1324314313.990741|1.2.3.4|1234|2.3.4.5|80|success|BR -1324314313.990741|1.2.3.4|1234|2.3.4.5|80|failure|MX +1342748960.098729|1.2.3.4|1234|2.3.4.5|80|success|unknown +1342748960.098729|1.2.3.4|1234|2.3.4.5|80|failure|US +1342748960.098729|1.2.3.4|1234|2.3.4.5|80|failure|UK +1342748960.098729|1.2.3.4|1234|2.3.4.5|80|success|BR +1342748960.098729|1.2.3.4|1234|2.3.4.5|80|failure|MX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log index 00ab6c8ca0..5fba268afa 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path test +#start 2012-07-20-01-49-20 #fields data #types time 1234567890.000000 @@ -13,3 +14,4 @@ 1234567890.000010 1234567890.000001 1234567890.000000 +#end 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log index 5acaa7b2fc..7d3bbc0774 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-20 #fields status country a1 b1 b2 #types string string count count count success unknown 1 3 4 +#end 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log index 086a4836fe..c3163dba6f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-20 #fields status country #types string string success unknown @@ -10,3 +11,4 @@ failure US failure UK success BR failure MX +#end 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt index e9640dfd9d..e6abc3f1f6 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt @@ -27,8 +27,8 @@ # Extent, type='ssh' t id.orig_h id.orig_p id.resp_h id.resp_p status country -1337216256.956476 1.2.3.4 1234 2.3.4.5 80 success unknown -1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure US -1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure UK -1337216256.956476 1.2.3.4 1234 2.3.4.5 80 success BR -1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure MX +1342748962.493341 1.2.3.4 1234 2.3.4.5 80 success unknown +1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure US +1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure UK +1342748962.493341 1.2.3.4 1234 2.3.4.5 80 success BR +1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure MX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log index 16ba17c62c..42f945bf0c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log @@ -3,10 +3,12 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-20 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314314.443785 1.2.3.4 1234 2.3.4.5 80 success unknown -1324314314.443785 1.2.3.4 1234 2.3.4.5 80 failure US -1324314314.443785 1.2.3.4 1234 2.3.4.5 80 failure UK -1324314314.443785 1.2.3.4 1234 2.3.4.5 80 success BR -1324314314.443785 1.2.3.4 1234 2.3.4.5 80 failure MX +1342748960.468458 1.2.3.4 1234 2.3.4.5 80 success unknown +1342748960.468458 1.2.3.4 1234 2.3.4.5 80 failure US +1342748960.468458 1.2.3.4 1234 2.3.4.5 80 failure UK +1342748960.468458 1.2.3.4 1234 2.3.4.5 80 success BR +1342748960.468458 1.2.3.4 1234 2.3.4.5 80 failure MX +#end 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.events/output b/testing/btest/Baseline/scripts.base.frameworks.logging.events/output index 5da27764a5..6bd153946e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.events/output +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.events/output @@ -1,2 +1,2 @@ -[t=1324314314.738385, id=[orig_h=1.2.3.4, orig_p=1234/tcp, resp_h=2.3.4.5, resp_p=80/tcp], status=success, country=unknown] -[t=1324314314.738385, id=[orig_h=1.2.3.4, orig_p=1234/tcp, resp_h=2.3.4.5, resp_p=80/tcp], status=failure, country=US] +[t=1342748960.593451, id=[orig_h=1.2.3.4, orig_p=1234/tcp, resp_h=2.3.4.5, resp_p=80/tcp], status=success, country=unknown] +[t=1342748960.593451, id=[orig_h=1.2.3.4, orig_p=1234/tcp, resp_h=2.3.4.5, resp_p=80/tcp], status=failure, country=US] diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log index 4ccf4c836a..3fe01ff913 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-20 #fields id.orig_p id.resp_h id.resp_p status country #types port addr port string string 1234 2.3.4.5 80 success unknown @@ -10,3 +11,4 @@ 1234 2.3.4.5 80 failure UK 1234 2.3.4.5 80 success BR 1234 2.3.4.5 80 failure MX +#end 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log index 4aa3d8f0a7..205f37243f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-20 #fields t f #types time file -1324314314.940195 Foo.log +1342748960.757056 Foo.log +#end 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log index 00242d65c1..cafacf9c4e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log @@ -3,10 +3,12 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-20 #fields t id.orig_h #types time addr -1324314315.040480 1.2.3.4 -1324314315.040480 1.2.3.4 -1324314315.040480 1.2.3.4 -1324314315.040480 1.2.3.4 -1324314315.040480 1.2.3.4 +1342748960.796093 1.2.3.4 +1342748960.796093 1.2.3.4 +1342748960.796093 1.2.3.4 +1342748960.796093 1.2.3.4 +1342748960.796093 1.2.3.4 +#end 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log index c2c69f3153..3240e9f824 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path local +#start 2011-03-18-19-06-13 #fields ts id.orig_h #types time addr 1300475168.859163 141.142.220.118 @@ -35,3 +36,4 @@ 1300475168.902195 141.142.220.118 1300475168.894787 141.142.220.118 1300475168.901749 141.142.220.118 +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log index b396c3fc2d..84980836c4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log @@ -3,9 +3,11 @@ #empty_field (empty) #unset_field - #path remote +#start 2011-03-18-19-06-13 #fields ts id.orig_h #types time addr 1300475169.780331 173.192.163.128 1300475167.097012 fe80::217:f2ff:fed7:cf65 1300475171.675372 fe80::3074:17d5:2052:c324 1300475173.116749 fe80::3074:17d5:2052:c324 +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output index a6b8a4e090..1c67ff52b6 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output @@ -10,54 +10,68 @@ static-prefix-2-UK.log #empty_field (empty) #unset_field - #path static-prefix-0-BR +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.385189 1.2.3.4 1234 2.3.4.5 80 success BR +1342748961.180156 1.2.3.4 1234 2.3.4.5 80 success BR +#end 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-0-MX3 +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.385189 1.2.3.4 1234 2.3.4.5 80 failure MX3 +1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure MX3 +#end 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-0-unknown +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.385189 1.2.3.4 1234 2.3.4.5 80 success unknown +1342748961.180156 1.2.3.4 1234 2.3.4.5 80 success unknown +#end 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-1-MX +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.385189 1.2.3.4 1234 2.3.4.5 80 failure MX +1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure MX +#end 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-1-US +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.385189 1.2.3.4 1234 2.3.4.5 80 failure US +1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure US +#end 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-2-MX2 +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.385189 1.2.3.4 1234 2.3.4.5 80 failure MX2 +1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure MX2 +#end 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-2-UK +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.385189 1.2.3.4 1234 2.3.4.5 80 failure UK +1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure UK +#end 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log index 733bb02847..96dede8965 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path test.failure +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.498365 1.2.3.4 1234 2.3.4.5 80 failure US +1342748961.488370 1.2.3.4 1234 2.3.4.5 80 failure US +#end 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log index 0261caeb06..85b5ca9f45 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path test.success +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314315.498365 1.2.3.4 1234 2.3.4.5 80 success unknown +1342748961.488370 1.2.3.4 1234 2.3.4.5 80 success unknown +#end 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log index d9bd34309a..aa18822daf 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log @@ -3,6 +3,8 @@ #empty_field EMPTY #unset_field - #path test +#start 1970-01-01-00-00-00 #fields b i e c p sn a d t iv s sc ss se vc ve #types bool int enum count port subnet addr double time interval string table[count] table[string] table[string] vector[count] vector[string] -T -42 Test::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1324314315.880694 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY +T -42 Test::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1342749004.579242 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY +#end 2012-07-20-01-50-05 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log index 6cb58bf4ac..36b88e496d 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path test.failure +#start 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 failure US -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 failure UK -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 failure MX +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure US +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure UK +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure MX +#end 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log index f5b79ee2c4..22d354fce4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log @@ -3,10 +3,12 @@ #empty_field (empty) #unset_field - #path test +#start 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 success unknown -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 failure US -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 failure UK -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 success BR -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 failure MX +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success unknown +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure US +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure UK +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success BR +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure MX +#end 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log index c40e56af93..888dc424b5 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path test.success +#start 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 success unknown -1324314321.061516 1.2.3.4 1234 2.3.4.5 80 success BR +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success unknown +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success BR +#end 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log index cb3d4aafb8..5a23ad2066 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path ssh.failure +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314328.196443 1.2.3.4 1234 2.3.4.5 80 failure US -1324314328.196443 1.2.3.4 1234 2.3.4.5 80 failure UK +1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure US +1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure UK +#end 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log index 38a5bb660c..cea1069748 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314328.196443 1.2.3.4 1234 2.3.4.5 80 failure US -1324314328.196443 1.2.3.4 1234 2.3.4.5 80 failure UK -1324314328.196443 1.2.3.4 1234 2.3.4.5 80 failure BR +1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure US +1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure UK +1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure BR +#end 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out index 915915f43e..91b6f5de7a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out @@ -19,11 +19,31 @@ custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_11.59.55.log, pat custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.00.05.log, path=test2, open=1299499205.0, close=1299502795.0, terminating=F] custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.59.55.log, path=test2, open=1299502795.0, close=1299502795.0, terminating=T] #empty_field (empty) +#end 2011-03-07-03-59-55 +#end 2011-03-07-04-00-05 +#end 2011-03-07-04-59-55 +#end 2011-03-07-05-00-05 +#end 2011-03-07-05-59-55 +#end 2011-03-07-06-00-05 +#end 2011-03-07-06-59-55 +#end 2011-03-07-07-00-05 +#end 2011-03-07-07-59-55 +#end 2011-03-07-08-00-05 +#end 2011-03-07-08-59-55 +#end 2011-03-07-09-00-05 +#end 2011-03-07-09-59-55 +#end 2011-03-07-10-00-05 +#end 2011-03-07-10-59-55 +#end 2011-03-07-11-00-05 +#end 2011-03-07-11-59-55 +#end 2011-03-07-12-00-05 +#end 2011-03-07-12-59-55 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #path test #path test2 #separator \x09 #set_separator , +#start 2011-03-07-03-00-05 #types time addr port addr port #unset_field - 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out index c335b5eeb9..4764ff23d0 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out @@ -14,97 +14,117 @@ test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 ascii #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 1299470395.000000 10.0.0.2 20 10.0.0.3 0 +#end 2011-03-07-04-00-05 > test.2011-03-07-04-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299470405.000000 10.0.0.1 20 10.0.0.2 1025 1299473995.000000 10.0.0.2 20 10.0.0.3 1 +#end 2011-03-07-05-00-05 > test.2011-03-07-05-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299474005.000000 10.0.0.1 20 10.0.0.2 1026 1299477595.000000 10.0.0.2 20 10.0.0.3 2 +#end 2011-03-07-06-00-05 > test.2011-03-07-06-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299477605.000000 10.0.0.1 20 10.0.0.2 1027 1299481195.000000 10.0.0.2 20 10.0.0.3 3 +#end 2011-03-07-07-00-05 > test.2011-03-07-07-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299481205.000000 10.0.0.1 20 10.0.0.2 1028 1299484795.000000 10.0.0.2 20 10.0.0.3 4 +#end 2011-03-07-08-00-05 > test.2011-03-07-08-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299484805.000000 10.0.0.1 20 10.0.0.2 1029 1299488395.000000 10.0.0.2 20 10.0.0.3 5 +#end 2011-03-07-09-00-05 > test.2011-03-07-09-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299488405.000000 10.0.0.1 20 10.0.0.2 1030 1299491995.000000 10.0.0.2 20 10.0.0.3 6 +#end 2011-03-07-10-00-05 > test.2011-03-07-10-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299492005.000000 10.0.0.1 20 10.0.0.2 1031 1299495595.000000 10.0.0.2 20 10.0.0.3 7 +#end 2011-03-07-11-00-05 > test.2011-03-07-11-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299495605.000000 10.0.0.1 20 10.0.0.2 1032 1299499195.000000 10.0.0.2 20 10.0.0.3 8 +#end 2011-03-07-12-00-05 > test.2011-03-07-12-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test +#start 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299499205.000000 10.0.0.1 20 10.0.0.2 1033 1299502795.000000 10.0.0.2 20 10.0.0.3 9 +#end 2011-03-07-12-59-55 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output b/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output index 09afe2031c..110cef054a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output @@ -3,10 +3,12 @@ #empty_field (empty) #unset_field - #path /dev/stdout +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314328.844271 1.2.3.4 1234 2.3.4.5 80 success unknown -1324314328.844271 1.2.3.4 1234 2.3.4.5 80 failure US -1324314328.844271 1.2.3.4 1234 2.3.4.5 80 failure UK -1324314328.844271 1.2.3.4 1234 2.3.4.5 80 success BR -1324314328.844271 1.2.3.4 1234 2.3.4.5 80 failure MX +1342748961.732599 1.2.3.4 1234 2.3.4.5 80 success unknown +1342748961.732599 1.2.3.4 1234 2.3.4.5 80 failure US +1342748961.732599 1.2.3.4 1234 2.3.4.5 80 failure UK +1342748961.732599 1.2.3.4 1234 2.3.4.5 80 success BR +1342748961.732599 1.2.3.4 1234 2.3.4.5 80 failure MX +#end 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log index 53292324af..c9191b666e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log @@ -3,10 +3,12 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1324314328.950525 1.2.3.4 1234 2.3.4.5 80 success unknown -1324314328.950525 1.2.3.4 1234 2.3.4.5 80 failure US -1324314328.950525 1.2.3.4 1234 2.3.4.5 80 failure UK -1324314328.950525 1.2.3.4 1234 2.3.4.5 80 success BR -1324314328.950525 1.2.3.4 1234 2.3.4.5 80 failure MX +1342748961.748481 1.2.3.4 1234 2.3.4.5 80 success unknown +1342748961.748481 1.2.3.4 1234 2.3.4.5 80 failure US +1342748961.748481 1.2.3.4 1234 2.3.4.5 80 failure UK +1342748961.748481 1.2.3.4 1234 2.3.4.5 80 success BR +1342748961.748481 1.2.3.4 1234 2.3.4.5 80 failure MX +#end 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log index 74aa0312a1..1fc29dbb4e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log @@ -3,6 +3,8 @@ #empty_field EMPTY #unset_field - #path ssh +#start 2012-07-20-01-49-22 #fields b i e c p sn a d t iv s sc ss se vc ve f #types bool int enum count port subnet addr double time interval string table[count] table[string] table[string] vector[count] vector[string] func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1324314329.051618 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1342748962.114672 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +#end 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log b/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log index 7956ad11a0..b4089aeee8 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path testing +#start 2012-07-20-01-49-22 #fields a.val1 a.val2 b #types count count count - - 6 1 2 3 +#end 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log index 65ab5592bf..ae5d6d246e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path ssh +#start 2012-07-20-01-49-22 #fields vec #types vector[string] -,2,-,-,5 +#end 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log index a22deb26e4..a3f476c1fb 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path metrics +#start 2012-07-20-01-50-41 #fields ts metric_id filter_name index.host index.str index.network value #types time enum string addr string subnet count -1328303679.867377 TEST_METRIC foo-bar 6.5.4.3 - - 4 -1328303679.867377 TEST_METRIC foo-bar 7.2.1.5 - - 2 -1328303679.867377 TEST_METRIC foo-bar 1.2.3.4 - - 6 +1342749041.601712 TEST_METRIC foo-bar 6.5.4.3 - - 4 +1342749041.601712 TEST_METRIC foo-bar 7.2.1.5 - - 2 +1342749041.601712 TEST_METRIC foo-bar 1.2.3.4 - - 6 +#end 2012-07-20-01-50-49 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log index 4bfb6964ea..b497da5194 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path metrics +#start 2012-07-20-01-49-22 #fields ts metric_id filter_name index.host index.str index.network value #types time enum string addr string subnet count -1328303763.333948 TEST_METRIC foo-bar 6.5.4.3 - - 2 -1328303763.333948 TEST_METRIC foo-bar 7.2.1.5 - - 1 -1328303763.333948 TEST_METRIC foo-bar 1.2.3.4 - - 3 +1342748962.841548 TEST_METRIC foo-bar 6.5.4.3 - - 2 +1342748962.841548 TEST_METRIC foo-bar 7.2.1.5 - - 1 +1342748962.841548 TEST_METRIC foo-bar 1.2.3.4 - - 3 +#end 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log index 59d70896fb..8f3a9dc70c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path notice +#start 2012-07-20-01-50-59 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet -1325633225.777902 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 100/100 - 1.2.3.4 - - 100 manager-1 Notice::ACTION_LOG 6 3600.000000 F - - - - - 1.2.3.4 - - +1342749059.978651 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 100/100 - 1.2.3.4 - - 100 manager-1 Notice::ACTION_LOG 6 3600.000000 F - - - - - 1.2.3.4 - - +#end 2012-07-20-01-51-08 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log index 58346b79e6..5a214b26cc 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path notice +#start 2012-07-20-01-49-23 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet -1325633274.875473 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 3/2 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 1.2.3.4 - - -1325633274.875473 - - - - - - Test_Notice Threshold crossed by metric_index(host=6.5.4.3) 2/2 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 6.5.4.3 - - +1342748963.085888 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 3/2 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 1.2.3.4 - - +1342748963.085888 - - - - - - Test_Notice Threshold crossed by metric_index(host=6.5.4.3) 2/2 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 6.5.4.3 - - +#end 2012-07-20-01-49-23 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log index 10888b21ec..4903ec0c01 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path notice +#start 2012-07-20-01-51-18 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet -1325633122.490990 - - - - - - Test_Notice test notice! - - - - - worker-1 Notice::ACTION_LOG 6 3600.000000 F - - - - - - - - +1342749078.270791 - - - - - - Test_Notice test notice! - - - - - worker-1 Notice::ACTION_LOG 6 3600.000000 F - - - - - - - - +#end 2012-07-20-01-51-27 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log index 5deac88071..bd77a90c86 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path notice +#start 2012-07-20-01-51-36 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet -1325633150.723248 - - - - - - Test_Notice test notice! - - - - - worker-2 Notice::ACTION_LOG 6 3600.000000 F - - - - - - - - +1342749096.545663 - - - - - - Test_Notice test notice! - - - - - worker-2 Notice::ACTION_LOG 6 3600.000000 F - - - - - - - - +#end 2012-07-20-01-51-45 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log index 1d168d7613..5a3cdfa69f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path notice +#start 2012-07-20-01-49-23 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double -1325633207.922993 - - - - - - Test_Notice test - - - - - bro Notice::ACTION_LOG 6 3600.000000 F - - - - - +1342748963.685754 - - - - - - Test_Notice test - - - - - bro Notice::ACTION_LOG 6 3600.000000 F - - - - - +#end 2012-07-20-01-49-23 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log index 4a20ec39b4..316056fa8c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path conn +#start 2012-02-21-16-53-13 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1329843175.736107 arKYeMETxOg 141.142.220.235 37604 199.233.217.249 56666 tcp ftp-data 0.112432 0 342 SF - 0 ShAdfFa 4 216 4 562 (empty) @@ -10,3 +11,4 @@ 1329843194.151526 nQcgTWjvg4c 199.233.217.249 61920 141.142.220.235 33582 tcp ftp-data 0.056211 342 0 SF - 0 ShADaFf 5 614 3 164 (empty) 1329843197.783443 j4u32Pc5bif 199.233.217.249 61918 141.142.220.235 37835 tcp ftp-data 0.056005 77 0 SF - 0 ShADaFf 5 349 3 164 (empty) 1329843161.968492 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 tcp ftp 38.055625 180 3146 SF - 0 ShAdDfFa 38 2164 25 4458 (empty) +#end 2012-02-21-16-53-20 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log index debc093771..cee57182ed 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path ftp +#start 2012-02-21-16-53-13 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p user password command arg mime_type mime_desc file_size reply_code reply_msg tags extraction_file #types time string addr port addr port string string string string string string count count string table[string] file 1329843179.926563 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 anonymous test RETR ftp://199.233.217.249/./robots.txt text/plain ASCII text 77 226 Transfer complete. - - 1329843197.727769 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 anonymous test RETR ftp://199.233.217.249/./robots.txt text/plain ASCII text, with CRLF line terminators 77 226 Transfer complete. - - +#end 2012-02-21-16-53-20 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log index 9d19ffaf85..299bdbc4ba 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path conn +#start 2012-02-15-17-43-15 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1329327783.316897 arKYeMETxOg 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49186 2001:470:4867:99::21 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 (empty) @@ -11,3 +12,4 @@ 1329327795.571921 j4u32Pc5bif 2001:470:4867:99::21 55785 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 (empty) 1329327777.822004 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 (empty) 1329327800.017649 TEfuqmmG4bh 2001:470:4867:99::21 55647 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 (empty) +#end 2012-02-15-17-43-24 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log index 8bc2ef2cb7..096b91df65 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path ftp +#start 2012-02-15-17-43-07 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p user password command arg mime_type mime_desc file_size reply_code reply_msg tags extraction_file #types time string addr port addr port string string string string string string count count string table[string] file 1329327787.396984 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - 1329327795.463946 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - +#end 2012-02-15-17-43-24 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log b/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log index ddcea2e9c7..c457f9b64b 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path http +#start 2009-03-19-05-21-36 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1237440095.634312 UWkUyAuUGXf 192.168.3.103 54102 128.146.216.51 80 1 POST www.osu.edu / - curl/7.17.1 (i386-apple-darwin8.11.1) libcurl/7.17.1 zlib/1.2.3 2001 60731 200 OK 100 Continue - (empty) - - - text/html - - +#end 2009-03-19-05-21-36 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log b/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log index cec098a50b..46ae431fc2 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path http +#start 2005-10-07-23-23-56 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1128727435.634189 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - http-item_141.42.64.125:56730-125.190.109.199:80_resp_1.dat +#end 2005-10-07-23-23-57 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log b/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log index d4e5679da1..69e6613a3c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path http +#start 2009-11-18-20-58-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1258577884.844956 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 1 GET www.mozilla.org /style/enhanced.css http://www.mozilla.org/projects/calendar/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2675 200 OK - - - (empty) - - - FAKE_MIME - - @@ -10,3 +11,4 @@ 1258577885.317160 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 3 GET www.mozilla.org /images/template/screen/bullet_utility.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 94 200 OK - - - (empty) - - - FAKE_MIME - - 1258577885.349639 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 4 GET www.mozilla.org /images/template/screen/key-point-top.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2349 200 OK - - - (empty) - - - image/png e0029eea80812e9a8e57b8d05d52938a - 1258577885.394612 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 5 GET www.mozilla.org /projects/calendar/images/header-sunbird.png http://www.mozilla.org/projects/calendar/calendar.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 27579 200 OK - - - (empty) - - - image/png 30aa926344f58019d047e85ba049ca1e - +#end 2009-11-18-20-58-32 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log b/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log index dfaf34acbf..6e7eb96454 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path http +#start 2009-11-18-20-58-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string file 1258577884.844956 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 1 GET www.mozilla.org /style/enhanced.css http://www.mozilla.org/projects/calendar/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2675 200 OK - - - (empty) - - - - - @@ -10,3 +11,4 @@ 1258577885.317160 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 3 GET www.mozilla.org /images/template/screen/bullet_utility.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 94 200 OK - - - (empty) - - - - - 1258577885.349639 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 4 GET www.mozilla.org /images/template/screen/key-point-top.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2349 200 OK - - - (empty) - - - - - 1258577885.394612 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 5 GET www.mozilla.org /projects/calendar/images/header-sunbird.png http://www.mozilla.org/projects/calendar/calendar.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 27579 200 OK - - - (empty) - - - - - +#end 2009-11-18-20-58-32 diff --git a/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log b/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log index b5c137bcf8..fe18751420 100644 --- a/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log +++ b/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log @@ -3,9 +3,11 @@ #empty_field (empty) #unset_field - #path irc +#start 2011-07-20-19-12-44 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p nick user command value addl dcc_file_name dcc_file_size extraction_file #types time string addr port addr port string string string string string string count file 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 - - NICK bloed - - - - 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed - USER sdkfje sdkfje Montreal.QC.CA.Undernet.org dkdkrwq - - - 1311189174.474127 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje JOIN #easymovies (empty) - - - 1311189316.326025 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje DCC #easymovies (empty) ladyvampress-default(2011-07-07)-OS.zip 42208 - +#end 2011-07-20-19-15-42 diff --git a/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log b/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log index 7513bfb9b8..8bd6bd8394 100644 --- a/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log +++ b/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log @@ -3,9 +3,11 @@ #empty_field (empty) #unset_field - #path irc +#start 2011-07-20-19-12-44 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p nick user command value addl dcc_file_name dcc_file_size dcc_mime_type extraction_file #types time string addr port addr port string string string string string string count string file 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 - - NICK bloed - - - - - 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed - USER sdkfje sdkfje Montreal.QC.CA.Undernet.org dkdkrwq - - - - 1311189174.474127 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje JOIN #easymovies (empty) - - - - 1311189316.326025 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje DCC #easymovies (empty) ladyvampress-default(2011-07-07)-OS.zip 42208 FAKE_MIME irc-dcc-item_192.168.1.77:57655-209.197.168.151:1024_1.dat +#end 2011-07-20-19-15-42 diff --git a/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log b/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log index 2c1380cb44..eca41f7d09 100644 --- a/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log +++ b/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path smtp +#start 2009-10-05-06-06-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth helo mailfrom rcptto date from to reply_to msg_id in_reply_to subject x_originating_ip first_received second_received last_reply path user_agent #types time string addr port addr port count string string table[string] string string table[string] string string string string addr string string string vector[addr] string 1254722768.219663 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 GP Mon, 5 Oct 2009 11:36:07 +0530 "Gurpartap Singh" - <000301ca4581$ef9e57f0$cedb07d0$@in> - SMTP - - - 250 OK id=1Mugho-0003Dg-Un 74.53.140.153,10.10.1.4 Microsoft Office Outlook 12.0 +#end 2009-10-05-06-06-16 diff --git a/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log b/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log index 453b55932e..9bae222897 100644 --- a/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log +++ b/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path smtp_entities +#start 2009-10-05-06-06-10 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth filename content_len mime_type md5 extraction_file excerpt #types time string addr port addr port count string count string string file string 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 79 FAKE_MIME - smtp-entity_10.10.1.4:1470-74.53.140.153:25_1.dat (empty) 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 1918 FAKE_MIME - - (empty) 1254722770.692804 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 NEWS.txt 10823 FAKE_MIME - smtp-entity_10.10.1.4:1470-74.53.140.153:25_2.dat (empty) +#end 2009-10-05-06-06-16 diff --git a/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log b/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log index 2b471782d5..5cb4bb15ef 100644 --- a/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log +++ b/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path smtp_entities +#start 2009-10-05-06-06-10 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth filename content_len mime_type md5 extraction_file excerpt #types time string addr port addr port count string count string string file string 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 79 FAKE_MIME 92bca2e6cdcde73647125da7dccbdd07 - (empty) 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 1918 FAKE_MIME - - (empty) 1254722770.692804 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 NEWS.txt 10823 FAKE_MIME a968bb0f9f9d95835b2e74c845877e87 - (empty) +#end 2009-10-05-06-06-16 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log index 08d31fdb69..960ea71720 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path socks +#start 2012-06-20-17-23-38 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p #types time string addr port addr port count string string addr string port addr string port 1340213015.276495 UWkUyAuUGXf 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688 +#end 2012-06-20-17-28-10 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log index a7068cd0da..d914b3074e 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path tunnel +#start 2012-06-20-17-23-35 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1340213015.276495 - 10.0.0.55 0 60.190.189.214 8124 Tunnel::SOCKS Tunnel::DISCOVER +#end 2012-06-20-17-28-10 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log index 8fd109f3a4..ef07cc31a5 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path socks +#start 2012-06-19-13-41-02 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p #types time string addr port addr port count string string addr string port addr string port 1340113261.914619 UWkUyAuUGXf 10.0.0.50 59580 85.194.84.197 1080 5 - succeeded - www.google.com 443 0.0.0.0 - 443 +#end 2012-06-19-13-41-05 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log index 5eac3ae7ad..10f079b888 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path tunnel +#start 2012-06-19-13-41-01 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1340113261.914619 - 10.0.0.50 0 85.194.84.197 1080 Tunnel::SOCKS Tunnel::DISCOVER +#end 2012-06-19-13-41-05 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log index 4723cb99c4..4299e302ce 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path tunnel +#start 2008-04-15-22-43-49 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1208299429.265774 - 127.0.0.1 0 127.0.0.1 1080 Tunnel::SOCKS Tunnel::DISCOVER +#end 2008-04-15-22-43-49 diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log index 74156362e5..b77925e498 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path ssl +#start 2012-04-27-14-53-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher server_name session_id subject issuer_subject not_valid_before not_valid_after last_alert #types time string addr port addr port string string string string string string time time string 1335538392.319381 UWkUyAuUGXf 192.168.1.105 62045 74.125.224.79 443 TLSv10 TLS_ECDHE_RSA_WITH_RC4_128_SHA ssl.gstatic.com - CN=*.gstatic.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority,O=Google Inc,C=US 1334102677.000000 1365639277.000000 - +#end 2012-04-27-14-53-16 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log index 0799292857..6951e4d51f 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log @@ -3,9 +3,11 @@ #empty_field (empty) #unset_field - #path known_hosts +#start 2011-03-18-19-06-08 #fields ts host #types time addr 1300475168.783842 141.142.220.118 1300475168.783842 208.80.152.118 1300475168.915940 208.80.152.3 1300475168.962628 208.80.152.2 +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log index 6fdba24d39..b70a701448 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path known_hosts +#start 2011-03-18-19-06-08 #fields ts host #types time addr 1300475168.783842 141.142.220.118 +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log index 9ef6ee47b7..8e9d8c6c79 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path known_hosts +#start 2011-03-18-19-06-08 #fields ts host #types time addr 1300475168.783842 208.80.152.118 1300475168.915940 208.80.152.3 1300475168.962628 208.80.152.2 +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log index d53da6f693..25198e92d5 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log @@ -3,6 +3,7 @@ #empty_field (empty) #unset_field - #path known_services +#start 2011-06-24-15-51-31 #fields ts host port_num port_proto service #types time addr port enum table[string] 1308930691.049431 172.16.238.131 22 tcp SSH @@ -10,3 +11,4 @@ 1308930716.462556 74.125.225.81 80 tcp HTTP 1308930718.361665 172.16.238.131 21 tcp FTP 1308930726.872485 141.142.192.39 22 tcp SSH +#end 2011-06-24-15-52-08 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log index ef1722d6a1..598f49fa65 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path known_services +#start 2011-06-24-15-51-31 #fields ts host port_num port_proto service #types time addr port enum table[string] 1308930691.049431 172.16.238.131 22 tcp SSH 1308930694.550308 172.16.238.131 80 tcp HTTP 1308930718.361665 172.16.238.131 21 tcp FTP +#end 2011-06-24-15-52-08 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log index 3fc68cdb91..c248b18146 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path known_services +#start 2011-06-24-15-51-56 #fields ts host port_num port_proto service #types time addr port enum table[string] 1308930716.462556 74.125.225.81 80 tcp HTTP 1308930726.872485 141.142.192.39 22 tcp SSH +#end 2011-06-24-15-52-08 diff --git a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log b/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log index f636093677..fb024db6d2 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log +++ b/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path dns +#start 1999-06-28-23-40-27 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs auth addl #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] table[string] table[string] 930613226.529070 UWkUyAuUGXf 212.180.42.100 25000 131.243.64.3 53 tcp 34798 - - - - - 0 NOERROR F F F T 0 4.3.2.1 31337.000000 - - +#end 1999-06-28-23-40-27 diff --git a/testing/btest/istate/events-ssl.bro b/testing/btest/istate/events-ssl.bro index afbee3f6d9..e09bf112fd 100644 --- a/testing/btest/istate/events-ssl.bro +++ b/testing/btest/istate/events-ssl.bro @@ -7,8 +7,8 @@ # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log # -# @TEST-EXEC: cat sender/http.log $SCRIPTS/diff-remove-timestamps >sender.http.log -# @TEST-EXEC: cat receiver/http.log $SCRIPTS/diff-remove-timestamps >receiver.http.log +# @TEST-EXEC: cat sender/http.log | $SCRIPTS/diff-remove-timestamps >sender.http.log +# @TEST-EXEC: cat receiver/http.log | $SCRIPTS/diff-remove-timestamps >receiver.http.log # @TEST-EXEC: cmp sender.http.log receiver.http.log # # @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' | $SCRIPTS/diff-remove-timestamps >events.snd.log diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index 9298ac1c01..70726a9f20 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -7,8 +7,8 @@ # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log # -# @TEST-EXEC: cat sender/http.log $SCRIPTS/diff-remove-timestamps >sender.http.log -# @TEST-EXEC: cat receiver/http.log $SCRIPTS/diff-remove-timestamps >receiver.http.log +# @TEST-EXEC: cat sender/http.log | $SCRIPTS/diff-remove-timestamps >sender.http.log +# @TEST-EXEC: cat receiver/http.log | $SCRIPTS/diff-remove-timestamps >receiver.http.log # @TEST-EXEC: cmp sender.http.log receiver.http.log # # @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' | $SCRIPTS/diff-remove-timestamps >events.snd.log diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro index f2c370a27a..1d0742216d 100644 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro @@ -1,5 +1,6 @@ # # @TEST-EXEC: bro -b %INPUT +# @TEST-EXEC: cat ssh.log | egrep -v '#start|#end' >ssh.log.tmp && mv ssh.log.tmp ssh.log # @TEST-EXEC: btest-diff ssh.log redef LogAscii::separator = "||"; diff --git a/testing/scripts/diff-remove-timestamps b/testing/scripts/diff-remove-timestamps index 9398c1cb4b..cbb5aa5c0e 100755 --- a/testing/scripts/diff-remove-timestamps +++ b/testing/scripts/diff-remove-timestamps @@ -3,4 +3,4 @@ # Replace anything which looks like timestamps with XXXs (including the #start/end markers in logs). sed 's/[0-9]\{10\}\.[0-9]\{2,8\}/XXXXXXXXXX.XXXXXX/g' | \ -sed 's/^#\(start\|end\).20..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' +sed 's/^#\(start\|end\).\(19\|20\)..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' From 7fde1165e931ae5007b3d2071fcd1a2e4a8f9b60 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 19 Jul 2012 09:41:44 -0700 Subject: [PATCH 495/651] Give configure a --disable-perftools option. This disables Perftools support even if found. Linking in tcmalloc can make debugging memory problems quite a bit hard (including confusing valgrind). --- CMakeLists.txt | 4 +++- configure | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 28b702ab01..bea83b0de6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -91,7 +91,9 @@ endif () set(USE_PERFTOOLS false) set(USE_PERFTOOLS_DEBUG false) -find_package(GooglePerftools) +if (NOT DISABLE_PERFTOOLS) + find_package(GooglePerftools) +endif () if (GOOGLEPERFTOOLS_FOUND) include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR}) diff --git a/configure b/configure index 3258d4abfc..2de4be62c4 100755 --- a/configure +++ b/configure @@ -33,6 +33,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl --disable-auxtools don't build or install auxiliary tools + --disable-perftools don't try to build python with Google Perftools --disable-python don't try to build python bindings for broccoli --disable-ruby don't try to build ruby bindings for broccoli @@ -105,6 +106,7 @@ append_cache_entry INSTALL_BROCCOLI BOOL true append_cache_entry INSTALL_BROCTL BOOL true append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry ENABLE_MOBILE_IPV6 BOOL false +append_cache_entry DISABLE_PERFTOOLS BOOL false # parse arguments while [ $# -ne 0 ]; do @@ -156,6 +158,9 @@ while [ $# -ne 0 ]; do --disable-auxtools) append_cache_entry INSTALL_AUX_TOOLS BOOL false ;; + --disable-perftools) + append_cache_entry DISABLE_PERFTOOLS BOOL true + ;; --disable-python) append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true ;; From db3d89d290203a0adb7ba23885198c48bb8ea026 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 20 Jul 2012 08:51:39 -0400 Subject: [PATCH 496/651] Some documentation updates for elasticsearch plugin. --- .../frameworks/logging/writers/elasticsearch.bro | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index adc675e487..2a58f95ae9 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -1,7 +1,16 @@ +##! Log writer for sending logs to an ElasticSearch server. +##! +##! Note: This module is in testing and is not yet considered stable! +##! +##! There is one known memory issue. If your elasticsearch server is +##! running slowly and taking too long to return from bulk insert +##! requests, the message queue to the writer thread will continue +##! growing larger and larger giving the appearance of a memory leak. + module LogElasticSearch; export { - ## Name of the ES cluster + ## Name of the ES cluster const cluster_name = "elasticsearch" &redef; ## ES Server @@ -18,16 +27,16 @@ export { const type_prefix = "" &redef; ## The time before an ElasticSearch transfer will timeout. + ## This is not working! const transfer_timeout = 2secs; ## The batch size is the number of messages that will be queued up before ## they are sent to be bulk indexed. - ## Note: this is mainly a memory usage parameter. const max_batch_size = 1000 &redef; ## The maximum amount of wall-clock time that is allowed to pass without ## finishing a bulk log send. This represents the maximum delay you - ## would like to have with your logs before they show up in ElasticSearch. + ## would like to have with your logs before they are sent to ElasticSearch. const max_batch_interval = 1min &redef; ## The maximum byte size for a buffered JSON string to send to the bulk From 0a681367b70e03fbb938146ec497546aa01d4ec8 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 20 Jul 2012 06:58:39 -0700 Subject: [PATCH 497/651] Revert "Fixing calc_next_rotate to use UTC based time functions." This reverts commit 6335dbb5e1cf694afea3c306012a258614d13880. --- src/util.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util.cc b/src/util.cc index abbea3e906..3cfa5fca1c 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1112,9 +1112,9 @@ double calc_next_rotate(double current, double interval, double base) time_t teatime = time_t(current); struct tm t; - t = *gmtime_r(&teatime, &t); + t = *localtime_r(&teatime, &t); t.tm_hour = t.tm_min = t.tm_sec = 0; - double startofday = timegm(&t); + double startofday = mktime(&t); if ( base < 0 ) // No base time given. To get nice timestamps, we round From 2efebcd8bea8dbbc446de02054814b0f0f9da39b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 20 Jul 2012 07:04:37 -0700 Subject: [PATCH 498/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index d5ecd1a42c..231358f166 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit d5ecd1a42c04b0dca332edc31811e5a6d0f7f2fb +Subproject commit 231358f166f61cc32201a8ac3671ea0c0f5c324e From 7bd8367076eeba1e3ef4a8c7d4d29f22355d518f Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 20 Jul 2012 11:02:09 -0400 Subject: [PATCH 499/651] More documentation updates. --- doc/logging-elasticsearch.rst | 80 ++++++++++++++++------------------- 1 file changed, 37 insertions(+), 43 deletions(-) diff --git a/doc/logging-elasticsearch.rst b/doc/logging-elasticsearch.rst index b6d22cf5fa..7571c68219 100644 --- a/doc/logging-elasticsearch.rst +++ b/doc/logging-elasticsearch.rst @@ -1,28 +1,38 @@ -======================================== +========================================= Indexed Logging Output with ElasticSearch -======================================== +========================================= .. rst-class:: opening Bro's default ASCII log format is not exactly the most efficient - way for storing and searching large volumes of data. ElasticSearch - is a new and exciting technology for dealing with tons of data. - ElasticSearch is a search engine built on top of Apache's Lucene + way for searching large volumes of data. ElasticSearch + is a new data storage technology for dealing with tons of data. + It's also a search engine built on top of Apache's Lucene project. It scales very well, both for distributed indexing and distributed searching. .. contents:: +Warning +------- + +This writer plugin is still in testing and is not yet recommended for +production use! The approach to how logs are handled in the plugin is "fire +and forget" at this time, there is no error handling if the server fails to +respond successfully to the insertion request. + Installing ElasticSearch ------------------------ -ElasticSearch requires a JRE to run. Please download the latest version -from: . Once extracted, start -ElasticSearch with:: +Download the latest version from: . +Once extracted, start ElasticSearch with:: # ./bin/elasticsearch +For more detailed information, refer to the ElasticSearch installation +documentation: http://www.elasticsearch.org/guide/reference/setup/installation.html + Compiling Bro with ElasticSearch Support ---------------------------------------- @@ -41,49 +51,32 @@ First, ensure that you have libcurl installed the run configure.:: Activating ElasticSearch ------------------------ -The direct way to use ElasticSearch is to switch *all* log files over to -ElasticSearch. To do that, just add ``redef -Log::default_writer=Log::WRITER_ELASTICSEARCH;`` to your ``local.bro``. -For testing, you can also just pass that on the command line:: +The easiest way to enable ElasticSearch output is to load the tuning/logs-to- +elasticsearch.bro script. If you are using BroControl, the following line in +local.bro will enable it. - bro -r trace.pcap Log::default_writer=Log::WRITER_ELASTICSEARCH +.. console:: -With that, Bro will now write all its output into ElasticSearch. You can -inspect these using ElasticSearch's REST-ful interface. For more -information, see: . + @load tuning/logs-to-elasticsearch -There is also a rudimentary web interface to ElasticSearch, available at: -. +With that, Bro will now write most of its logs into ElasticSearch in addition +to maintaining the Ascii logs like it would do by default. That script has +some tunable options for choosing which logs to send to ElasticSearch, refer +to the autogenerated script documentation for those options. -You can also switch only individual files over to ElasticSearch by adding -code like this to your ``local.bro``:: +There is an interface being written specifically to integrate with the data +that Bro outputs into ElasticSearch named Brownian. It can be found here:: -.. code::bro + https://github.com/grigorescu/Brownian - event bro_init() - { - local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log. - f$writer = Log::WRITER_ELASTICSEARCH; # Change writer type. - Log::add_filter(Conn::LOG, f); # Replace filter with adapted version. - } +Tuning +------ -Configuring ElasticSearch -------------------------- +A common problem encountered with ElasticSearch is too many files being held +open. The ElasticSearch website has some suggestions on how to increase the +open file limit. -Bro's ElasticSearch writer comes with a few configuration options:: - -- cluster_name: Currently unused. - -- server_host: Where to send the data. Default localhost. - -- server_port: What port to send the data to. Default 9200. - -- index_prefix: ElasticSearch indexes are like databases in a standard DB model. - This is the name of the index to which to send the data. Default bro. - -- type_prefix: ElasticSearch types are like tables in a standard DB model. This is a prefix that gets prepended to Bro log names. Example: type_prefix = "bro_" would create types "bro_dns", "bro_http", etc. Default: none. - -- batch_size: How many messages to buffer before sending to ElasticSearch. This is mainly a memory optimization - changing this doesn't seem to affect indexing performance that much. Default: 10,000. + - http://www.elasticsearch.org/tutorials/2011/04/06/too-many-open-files.html TODO ---- @@ -93,3 +86,4 @@ Lots. - Perform multicast discovery for server. - Better error detection. - Better defaults (don't index loaded-plugins, for instance). +- From c5d1aebbfe8c49ba89dd9d0c906f5ae38669497b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 20 Jul 2012 09:01:25 -0700 Subject: [PATCH 500/651] Temporarily removing tuning/logs-to-elasticsearch.bro from the test-all-policy. Loading it in there can lead to some tests not terminating. We need to fix that, it let's the coverage test fail. --- scripts/test-all-policy.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/test-all-policy.bro b/scripts/test-all-policy.bro index a7c43b14b3..c4acece25d 100644 --- a/scripts/test-all-policy.bro +++ b/scripts/test-all-policy.bro @@ -60,5 +60,5 @@ @load tuning/defaults/__load__.bro @load tuning/defaults/packet-fragments.bro @load tuning/defaults/warnings.bro -@load tuning/logs-to-elasticsearch.bro +# @load tuning/logs-to-elasticsearch.bro @load tuning/track-all-assets.bro From ce4b8dd4aca99c4e1013b5c843df30bfedc54cfd Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 20 Jul 2012 09:57:38 -0700 Subject: [PATCH 501/651] Changing HTTP DPD port 3138 to 3128. Addresses #857. --- scripts/base/protocols/http/main.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/base/protocols/http/main.bro b/scripts/base/protocols/http/main.bro index f4377e03de..21b4fb6113 100644 --- a/scripts/base/protocols/http/main.bro +++ b/scripts/base/protocols/http/main.bro @@ -114,7 +114,7 @@ event bro_init() &priority=5 # DPD configuration. const ports = { - 80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3138/tcp, + 80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3128/tcp, 8000/tcp, 8080/tcp, 8888/tcp, }; redef dpd_config += { From 5ef83900d8b30a44fe86eb97501ba8cc53c06194 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 20 Jul 2012 12:28:34 -0700 Subject: [PATCH 502/651] Sed usage in canonifier script didn't work on non-Linux systems. --- testing/scripts/diff-remove-timestamps | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/testing/scripts/diff-remove-timestamps b/testing/scripts/diff-remove-timestamps index cbb5aa5c0e..e235746f93 100755 --- a/testing/scripts/diff-remove-timestamps +++ b/testing/scripts/diff-remove-timestamps @@ -2,5 +2,13 @@ # # Replace anything which looks like timestamps with XXXs (including the #start/end markers in logs). +# Get us "modern" regexps with sed. +if [ `uname` == "Linux" ]; then + sed="sed" +else + sed="sed -E" +fi + +# The first sed uses a "basic" regexp, the 2nd a "modern:. sed 's/[0-9]\{10\}\.[0-9]\{2,8\}/XXXXXXXXXX.XXXXXX/g' | \ -sed 's/^#\(start\|end\).\(19\|20\)..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' +$sed 's/^#(start|end).(19|20)..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' From 58e2b70fc806621a833d13a88fbee4562f6753ba Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 20 Jul 2012 14:37:14 -0700 Subject: [PATCH 503/651] make version_ok return true for TLSv12 I think it is a bug that this was missing... --- src/ssl-analyzer.pac | 1 + src/ssl-defs.pac | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/ssl-analyzer.pac b/src/ssl-analyzer.pac index d1ac470284..3d9564eaab 100644 --- a/src/ssl-analyzer.pac +++ b/src/ssl-analyzer.pac @@ -93,6 +93,7 @@ function version_ok(vers : uint16) : bool case SSLv30: case TLSv10: case TLSv11: + case TLSv12: return true; default: diff --git a/src/ssl-defs.pac b/src/ssl-defs.pac index b13b7c4881..4f715bbddd 100644 --- a/src/ssl-defs.pac +++ b/src/ssl-defs.pac @@ -22,5 +22,6 @@ enum SSLVersions { SSLv20 = 0x0002, SSLv30 = 0x0300, TLSv10 = 0x0301, - TLSv11 = 0x0302 + TLSv11 = 0x0302, + TLSv12 = 0x0303 }; From 053b307e24ee247137dcef031caaeadf681f126d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sun, 22 Jul 2012 13:42:31 -0700 Subject: [PATCH 504/651] Bug fix for BasicThread. --- src/threading/BasicThread.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index d4a82316e8..9c113fb7ec 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -42,24 +42,24 @@ BasicThread::~BasicThread() delete [] strerr_buffer; } -void BasicThread::SetName(const char* name) +void BasicThread::SetName(const char* arg_name) { delete [] name; - name = copy_string(name); + name = copy_string(arg_name); } -void BasicThread::SetOSName(const char* name) +void BasicThread::SetOSName(const char* arg_name) { #ifdef HAVE_LINUX - prctl(PR_SET_NAME, name, 0, 0, 0); + prctl(PR_SET_NAME, arg_name, 0, 0, 0); #endif #ifdef __APPLE__ - pthread_setname_np(name); + pthread_setname_np(arg_name); #endif #ifdef FREEBSD - pthread_set_name_np(pthread_self(), name, name); + pthread_set_name_np(pthread_self(), arg_name, arg_name); #endif } From 71fc2a1728d430b10610b324ed92379b1bad3875 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sun, 22 Jul 2012 15:50:12 -0700 Subject: [PATCH 505/651] Another small change to MsgThread API. Threads will now reliably get a call to DoFinish() no matter how the thread terminates. This will always be called from within the thread, whereas the destructor is called from the main thread after the child thread has already terminated. Also removing debugging code. However, two problems remain with the ASCII writer (seeing them only on MacOS): - the #start/#end timestamps contain only dummy values right now. The odd thing is that once I enable strftime() to print actual timestamps, I get crashes (even though strftime() is supposed to be thread-safe). - occassionally, there's still output missing in tests. In those cases, the file descriptor apparently goes bad: a write() will suddently return EBADF for reasons I don't understand yet. --- src/logging/writers/Ascii.cc | 31 ++++++++++++++++++++----------- src/threading/BasicThread.cc | 8 ++------ src/threading/MsgThread.cc | 11 ++++++++--- src/threading/MsgThread.h | 11 +++++------ src/util.cc | 3 +++ testing/scripts/diff-canonifier | 2 +- 6 files changed, 39 insertions(+), 27 deletions(-) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index d3c210ce47..87fa5dfb3c 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -53,12 +53,11 @@ Ascii::Ascii(WriterFrontend* frontend) : WriterBackend(frontend) Ascii::~Ascii() { - //fprintf(stderr, "DTOR %p\n", this); - - // Normally, the file will be closed here already via the Finish() - // message. But when we terminate abnormally, we may still have it open. - if ( fd ) - CloseFile(0); + if ( ! ascii_done ) + { + fprintf(stderr, "internal error: finish missing\n"); + abort(); + } delete [] separator; delete [] set_separator; @@ -77,7 +76,7 @@ bool Ascii::WriteHeaderField(const string& key, const string& val) void Ascii::CloseFile(double t) { - if ( ! fd) + if ( ! fd ) return; if ( include_meta ) @@ -170,7 +169,7 @@ bool Ascii::DoFinish(double network_time) { if ( ascii_done ) { - fprintf(stderr, "duplicate finish message\n"); + fprintf(stderr, "internal error: duplicate finish\n"); abort(); } @@ -353,6 +352,7 @@ bool Ascii::DoWrite(int num_fields, const Field* const * fields, // It would so escape the first character. char buf[16]; snprintf(buf, sizeof(buf), "\\x%02x", bytes[0]); + if ( ! safe_write(fd, buf, strlen(buf)) ) goto write_error; @@ -416,14 +416,23 @@ string Ascii::LogExt() string Ascii::Timestamp(double t) { +#if 1 + return "2012-01-01-00-00-00"; +#else + // Using the version below leads to occasional crashes at least on Mac OS. + // Not sure why, all the function should be thread-safe ... + time_t teatime = time_t(t); struct tm tmbuf; struct tm* tm = localtime_r(&teatime, &tmbuf); - char buf[128]; + char tmp[128]; const char* const date_fmt = "%Y-%m-%d-%H-%M-%S"; - strftime(buf, sizeof(buf), date_fmt, tm); - return buf; + strftime(tmp, sizeof(tmp), date_fmt, tm); + + return tmp; +#endif } + diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 9c113fb7ec..c708bb79ef 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -35,7 +35,7 @@ BasicThread::BasicThread() BasicThread::~BasicThread() { - if ( buf ) + if ( buf ) free(buf); delete [] name; @@ -50,6 +50,7 @@ void BasicThread::SetName(const char* arg_name) void BasicThread::SetOSName(const char* arg_name) { + #ifdef HAVE_LINUX prctl(PR_SET_NAME, arg_name, 0, 0, 0); #endif @@ -131,16 +132,12 @@ void BasicThread::PrepareStop() void BasicThread::Stop() { - // XX fprintf(stderr, "stop1 %s %d %d\n", name, started, terminating); - if ( ! started ) return; if ( terminating ) return; - // XX fprintf(stderr, "stop2 %s\n", name); - DBG_LOG(DBG_THREADING, "Signaling thread %s to terminate ...", name); OnStop(); @@ -177,7 +174,6 @@ void BasicThread::Kill() void BasicThread::Done() { - // XX fprintf(stderr, "DONE from thread %s\n", name); DBG_LOG(DBG_THREADING, "Thread %s has finished", name); terminating = true; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 0e55b99ba1..121bec265c 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -155,8 +155,6 @@ void MsgThread::OnPrepareStop() if ( finished || Killed() ) return; - // XX fprintf(stderr, "Sending FINISH to thread %s ...\n", Name()); - // Signal thread to terminate and wait until it has acknowledged. SendIn(new FinishMessage(this, network_time), true); } @@ -356,7 +354,14 @@ void MsgThread::Run() delete msg; } - Finished(); + // In case we haven't send the finish method yet, do it now. Reading + // global network_time here should be fine, it isn't changing + // anymore. + if ( ! finished ) + { + OnFinish(network_time); + Finished(); + } } void MsgThread::GetStats(Stats* stats) diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 1d9b17c7d9..da505de6be 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -197,10 +197,6 @@ protected: */ virtual void Heartbeat(); - /** Flags that the child process has finished processing. Called from child. - */ - void Finished(); - /** Internal heartbeat processing. Called from child. */ void HeartbeatInChild(); @@ -217,8 +213,7 @@ protected: virtual bool OnHeartbeat(double network_time, double current_time) = 0; /** Triggered for execution in the child thread just before shutting threads down. - * The child thread should finish its operations and then *must* - * call this class' implementation. + * The child thread should finish its operations. */ virtual bool OnFinish(double network_time) = 0; @@ -288,6 +283,10 @@ private: */ bool MightHaveOut() { return queue_out.MaybeReady(); } + /** Flags that the child process has finished processing. Called from child. + */ + void Finished(); + Queue queue_in; Queue queue_out; diff --git a/src/util.cc b/src/util.cc index 553944c69c..cd367cf825 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1301,6 +1301,9 @@ bool safe_write(int fd, const char* data, int len) if ( errno == EINTR ) continue; + fprintf(stderr, "safe_write error: %d\n", errno); + abort(); + return false; } diff --git a/testing/scripts/diff-canonifier b/testing/scripts/diff-canonifier index 4d04b3372c..3cb213a3f7 100755 --- a/testing/scripts/diff-canonifier +++ b/testing/scripts/diff-canonifier @@ -2,4 +2,4 @@ # # Default canonifier used with the tests in testing/btest/*. -`dirname $0`/diff-remove-timestamps | grep -v XXX +`dirname $0`/diff-remove-timestamps From 775961ee1525c9d245ec27d7a82816a3fc0c34b2 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sun, 22 Jul 2012 15:57:26 -0700 Subject: [PATCH 506/651] Updating test base line. (Due to removing the debugging helper in canonification script.) --- .../out | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out index 91b6f5de7a..e2b8a8b377 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out @@ -19,31 +19,13 @@ custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_11.59.55.log, pat custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.00.05.log, path=test2, open=1299499205.0, close=1299502795.0, terminating=F] custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.59.55.log, path=test2, open=1299502795.0, close=1299502795.0, terminating=T] #empty_field (empty) -#end 2011-03-07-03-59-55 -#end 2011-03-07-04-00-05 -#end 2011-03-07-04-59-55 -#end 2011-03-07-05-00-05 -#end 2011-03-07-05-59-55 -#end 2011-03-07-06-00-05 -#end 2011-03-07-06-59-55 -#end 2011-03-07-07-00-05 -#end 2011-03-07-07-59-55 -#end 2011-03-07-08-00-05 -#end 2011-03-07-08-59-55 -#end 2011-03-07-09-00-05 -#end 2011-03-07-09-59-55 -#end 2011-03-07-10-00-05 -#end 2011-03-07-10-59-55 -#end 2011-03-07-11-00-05 -#end 2011-03-07-11-59-55 -#end 2011-03-07-12-00-05 -#end 2011-03-07-12-59-55 +#end 2012-01-01-00-00-00 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #path test #path test2 #separator \x09 #set_separator , -#start 2011-03-07-03-00-05 +#start 2012-01-01-00-00-00 #types time addr port addr port #unset_field - 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 From f2e60a76a81360a64fec78d3693bea2a22ec389a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sun, 22 Jul 2012 21:04:59 -0700 Subject: [PATCH 507/651] Script fix for Linux. --- testing/scripts/diff-remove-timestamps | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/scripts/diff-remove-timestamps b/testing/scripts/diff-remove-timestamps index e235746f93..84bd21aa60 100755 --- a/testing/scripts/diff-remove-timestamps +++ b/testing/scripts/diff-remove-timestamps @@ -4,7 +4,7 @@ # Get us "modern" regexps with sed. if [ `uname` == "Linux" ]; then - sed="sed" + sed="sed -r" else sed="sed -E" fi From 336990e234e2903d9e5a596fc1b53f000181cef8 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 23 Jul 2012 11:27:08 -0700 Subject: [PATCH 508/651] make reading ascii logfiles work when the input separator is different from \t. (Wrong escape character was used for reading header fields). --- src/input/readers/Ascii.cc | 4 ++-- testing/btest/scripts/base/frameworks/input/event.bro | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 73821d7cb6..297f8a7136 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -144,7 +144,7 @@ bool Ascii::ReadHeader(bool useCached) pos++; } - //printf("Updating fields from description %s\n", line.c_str()); + // printf("Updating fields from description %s\n", line.c_str()); columnMap.clear(); for ( int i = 0; i < NumFields(); i++ ) @@ -199,7 +199,7 @@ bool Ascii::GetLine(string& str) if ( str[0] != '#' ) return true; - if ( str.compare(0,8, "#fields\t") == 0 ) + if ( ( str.compare(0,7, "#fields") == 0 ) && ( str[7] == separator[0] ) ) { str = str.substr(8); return true; diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro index d275cee59c..f07ca0c43e 100644 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -48,7 +48,7 @@ event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: b event bro_init() { try = 0; - outfile = open("../out"); + outfile = open("../out"); Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line]); Input::remove("input"); } From 8e453663dd4d9540789614582ddce84f877a8b50 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 23 Jul 2012 12:43:42 -0700 Subject: [PATCH 509/651] Input framework now accepts escaped ascii values as input. I managed to completely forget to add unescaping to the input framework - this should fix it. It now works with the exact same escaping that is used by the writers (\x##). Includes one testcase that seems to work - everything else still passes. --- src/input/readers/Ascii.cc | 2 ++ src/util.cc | 70 ++++++++++++++++++++++++++++++-------- src/util.h | 1 + 3 files changed, 58 insertions(+), 15 deletions(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 297f8a7136..aaa124f0c1 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -438,6 +438,8 @@ bool Ascii::DoUpdate() if ( ! getline(splitstream, s, separator[0]) ) break; + s = get_unescaped_string(s); + stringfields[pos] = s; pos++; } diff --git a/src/util.cc b/src/util.cc index cd367cf825..544ba1b573 100644 --- a/src/util.cc +++ b/src/util.cc @@ -42,6 +42,46 @@ #include "Net.h" #include "Reporter.h" +/** + * Takes a string, unescapes all characters that are escaped as hex codes + * (\x##) and turns them into the equivalent ascii-codes. Returns a string + * containing no escaped values + * + * @param str string to unescape + * @return A str::string without escaped characters. + */ +std::string get_unescaped_string(const std::string& str) + { + char* buf = new char [str.length() + 1]; // it will at most have the same length as str. + char* bufpos = buf; + size_t pos = 0; + + while ( pos < str.length() ) + { + if ( str[pos] == '\\' && str[pos+1] == 'x' && + isxdigit(str[pos+2]) && isxdigit(str[pos+3]) ) + { + *bufpos = (decode_hex(str[pos+2]) << 4) + + decode_hex(str[pos+3]); + + pos += 4; + bufpos++; + } + else + { + *bufpos = str[pos]; + bufpos++; + pos++; + } + } + + *bufpos = 0; + + string outstring (buf, bufpos - buf); + delete [] buf; + return outstring; + } + /** * Takes a string, escapes characters into equivalent hex codes (\x##), and * returns a string containing all escaped values. @@ -53,25 +93,25 @@ * @return A std::string containing a list of escaped hex values of the form * \x## */ std::string get_escaped_string(const std::string& str, bool escape_all) -{ - char tbuf[16]; - string esc = ""; + { + char tbuf[16]; + string esc = ""; - for ( size_t i = 0; i < str.length(); ++i ) - { - char c = str[i]; + for ( size_t i = 0; i < str.length(); ++i ) + { + char c = str[i]; - if ( escape_all || isspace(c) || ! isascii(c) || ! isprint(c) ) - { - snprintf(tbuf, sizeof(tbuf), "\\x%02x", str[i]); - esc += tbuf; + if ( escape_all || isspace(c) || ! isascii(c) || ! isprint(c) ) + { + snprintf(tbuf, sizeof(tbuf), "\\x%02x", str[i]); + esc += tbuf; + } + else + esc += c; } - else - esc += c; - } - return esc; -} + return esc; + } char* copy_string(const char* s) { diff --git a/src/util.h b/src/util.h index a695c6df6a..fc4b60792b 100644 --- a/src/util.h +++ b/src/util.h @@ -90,6 +90,7 @@ void delete_each(T* t) delete *it; } +std::string get_unescaped_string(const std::string& str); std::string get_escaped_string(const std::string& str, bool escape_all); extern char* copy_string(const char* s); From 3163e8462928a0294605d690ed176ed528a64813 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 23 Jul 2012 12:46:09 -0700 Subject: [PATCH 510/651] and like nearly always - forgot the baseline. --- .../btest/Baseline/scripts.base.frameworks.input.binary/out | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.binary/out diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.binary/out b/testing/btest/Baseline/scripts.base.frameworks.input.binary/out new file mode 100644 index 0000000000..deab902925 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.binary/out @@ -0,0 +1,6 @@ +abc^J\xffdef +DATA2 +abc|\xffdef +DATA2 +abc\xff|def +DATA2 From 90735c3164019bd124b26b14f522d4bc16e71f50 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 23 Jul 2012 12:51:07 -0700 Subject: [PATCH 511/651] and just to be a little bit careful - add check if the field description is long enough. Otherwise there might possibly be an access of uninitialized memory, when someone reads a file that contains just #fields without any following field descriptions. --- src/input/readers/Ascii.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index aaa124f0c1..fd936b07b6 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -199,7 +199,7 @@ bool Ascii::GetLine(string& str) if ( str[0] != '#' ) return true; - if ( ( str.compare(0,7, "#fields") == 0 ) && ( str[7] == separator[0] ) ) + if ( ( str.length() > 8 ) && ( str.compare(0,7, "#fields") == 0 ) && ( str[7] == separator[0] ) ) { str = str.substr(8); return true; From 9b0fe744f2805555d5ab0312b2098b1049f2ed31 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 23 Jul 2012 16:47:44 -0500 Subject: [PATCH 512/651] Fix WriterBackend::WriterInfo serialization, reenable ascii start/end tags. Instantiations of WriterInfo in RemoteSerializer::ProcessLogCreateWriter() would leave the network_time member uninitialized which could later cause localtime_r() calls in Ascii::Timestamp() to return a null pointer due to the bizarre input and giving that to strftime() causes it to segfault. --- src/logging/WriterBackend.cc | 2 ++ src/logging/WriterBackend.h | 4 ++-- src/logging/writers/Ascii.cc | 9 +-------- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 8f119d6f8f..87db8e4437 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -76,6 +76,7 @@ bool WriterBackend::WriterInfo::Read(SerializationFormat* fmt) if ( ! (fmt->Read(&tmp_path, "path") && fmt->Read(&rotation_base, "rotation_base") && fmt->Read(&rotation_interval, "rotation_interval") && + fmt->Read(&network_time, "network_time") && fmt->Read(&size, "config_size")) ) return false; @@ -105,6 +106,7 @@ bool WriterBackend::WriterInfo::Write(SerializationFormat* fmt) const if ( ! (fmt->Write(path, "path") && fmt->Write(rotation_base, "rotation_base") && fmt->Write(rotation_interval, "rotation_interval") && + fmt->Write(network_time, "network_time") && fmt->Write(size, "config_size")) ) return false; diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index a59cd1893e..1ca5650057 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -81,9 +81,9 @@ public: */ config_map config; - WriterInfo() + WriterInfo() : path(0), rotation_interval(0.0), rotation_base(0.0), + network_time(0.0) { - path = 0; } WriterInfo(const WriterInfo& other) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 87fa5dfb3c..c77e680a92 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -416,23 +416,16 @@ string Ascii::LogExt() string Ascii::Timestamp(double t) { -#if 1 - return "2012-01-01-00-00-00"; -#else - // Using the version below leads to occasional crashes at least on Mac OS. - // Not sure why, all the function should be thread-safe ... - time_t teatime = time_t(t); struct tm tmbuf; struct tm* tm = localtime_r(&teatime, &tmbuf); - char tmp[128]; + char tmp[128]; const char* const date_fmt = "%Y-%m-%d-%H-%M-%S"; strftime(tmp, sizeof(tmp), date_fmt, tm); return tmp; -#endif } From 5d33e22b4d6516da6060b5b1fb12e804c3255600 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 23 Jul 2012 16:20:59 -0700 Subject: [PATCH 513/651] Updating NEWS. --- CHANGES | 29 +++++++++++++++++++++++++++++ NEWS | 28 +++++++++++++++++++++------- VERSION | 2 +- 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index 950a2abad6..ed5e58c206 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,33 @@ +2.0-844 | 2012-07-23 16:20:59 -0700 + + * Reworking parts of the internal threading/logging/input APIs for + thread-safety. (Robin Sommer) + + * Bugfix for SSL version check. (Bernhard Amann) + + * Changing a HTTP DPD from port 3138 to 3128. Addresses #857. (Robin + Sommer) + + * ElasticSearch logging writer. See logging-elasticsearch.rst for + more information. (Vlad Grigorescu and Seth Hall). + + * Give configure a --disable-perftools option to disable Perftools + support even if found. (Robin Sommer) + + * The ASCII log writer now includes "#start " and "#end + lines in the each file. (Robin Sommer) + + * Renamed ASCII logger "header" options to "meta". (Robin Sommer) + + * ASCII logs now escape '#' at the beginning of log lines. Addresses + #763. (Robin Sommer) + + * Fix bug, where in dns.log rcode always was set to 0/NOERROR when + no reply package was seen. (Bernhard Amann) + + * Updating to Mozilla's current certificate bundle. (Seth Hall) + 2.0-769 | 2012-07-13 16:17:33 -0700 * Fix some Info:Record field documentation. (Vlad Grigorescu) diff --git a/NEWS b/NEWS index 0798920d8a..00aeb62132 100644 --- a/NEWS +++ b/NEWS @@ -56,13 +56,6 @@ New Functionality "reader plugins" that make it easy to interface to different data sources. We will add more in the future. -- Bro's default ASCII log format is not exactly the most efficient way - for storing and searching large volumes of data. An an alternative, - Bro now comes with experimental support for DataSeries output, an - efficient binary format for recording structured bulk data. - DataSeries is developed and maintained at HP Labs. See - doc/logging-dataseries for more information. - - BroControl now has built-in support for host-based load-balancing when using either PF_RING, Myricom cards, or individual interfaces. Instead of adding a separate worker entry in node.cfg for each Bro @@ -78,6 +71,24 @@ New Functionality "lb_method=interfaces" to specify which interfaces to load-balance on). +- Bro's default ASCII log format is not exactly the most efficient way + for storing and searching large volumes of data. An alternatives, + Bro now comes with experimental support for two alternative output + formats: + + * DataSeries: an efficient binary format for recording structured + bulk data. DataSeries is developed and maintained at HP Labs. + See doc/logging-dataseries for more information. + + * ElasticSearch: a distributed RESTful, storage engine and search + engine built on top of Apache Lucene. It scales very well, both + for distributed indexing and distributed searching. + + Note that at this point, we consider Bro's support for these two + formats as prototypes for collecting experience with alternative + outputs. We do not yet recommend them for production (but welcome + feedback!) + Changed Functionality ~~~~~~~~~~~~~~~~~~~~~ @@ -146,6 +157,9 @@ the full set. renamed to LogAscii::meta_prefix and LogAscii::include_meta, respectively. +- The ASCII writers "header_*" options have been renamed to "meta_*" + (because there's now also a footer). + Bro 2.0 ------- diff --git a/VERSION b/VERSION index 99ff0a1495..7868a9d201 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-769 +2.0-844 From c6c2d4d5d610c8df37dd1817a3fed314d95361a0 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 23 Jul 2012 16:59:51 -0700 Subject: [PATCH 514/651] Baseline update. --- .../out | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out index e2b8a8b377..91b6f5de7a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out @@ -19,13 +19,31 @@ custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_11.59.55.log, pat custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.00.05.log, path=test2, open=1299499205.0, close=1299502795.0, terminating=F] custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.59.55.log, path=test2, open=1299502795.0, close=1299502795.0, terminating=T] #empty_field (empty) -#end 2012-01-01-00-00-00 +#end 2011-03-07-03-59-55 +#end 2011-03-07-04-00-05 +#end 2011-03-07-04-59-55 +#end 2011-03-07-05-00-05 +#end 2011-03-07-05-59-55 +#end 2011-03-07-06-00-05 +#end 2011-03-07-06-59-55 +#end 2011-03-07-07-00-05 +#end 2011-03-07-07-59-55 +#end 2011-03-07-08-00-05 +#end 2011-03-07-08-59-55 +#end 2011-03-07-09-00-05 +#end 2011-03-07-09-59-55 +#end 2011-03-07-10-00-05 +#end 2011-03-07-10-59-55 +#end 2011-03-07-11-00-05 +#end 2011-03-07-11-59-55 +#end 2011-03-07-12-00-05 +#end 2011-03-07-12-59-55 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #path test #path test2 #separator \x09 #set_separator , -#start 2012-01-01-00-00-00 +#start 2011-03-07-03-00-05 #types time addr port addr port #unset_field - 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 From f887535f1c706a727f683c2450114d4c5e322808 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 23 Jul 2012 17:28:27 -0700 Subject: [PATCH 515/651] fix problem with possible access to unititialized memory (thanks robin :) ) --- src/util.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/util.cc b/src/util.cc index 544ba1b573..da046133a6 100644 --- a/src/util.cc +++ b/src/util.cc @@ -50,13 +50,14 @@ * @param str string to unescape * @return A str::string without escaped characters. */ -std::string get_unescaped_string(const std::string& str) +std::string get_unescaped_string(const std::string& arg_str) { - char* buf = new char [str.length() + 1]; // it will at most have the same length as str. + const char* str = arg_str.c_str(); + char* buf = new char [arg_str.length() + 1]; // it will at most have the same length as str. char* bufpos = buf; size_t pos = 0; - while ( pos < str.length() ) + while ( pos < arg_str.length() ) { if ( str[pos] == '\\' && str[pos+1] == 'x' && isxdigit(str[pos+2]) && isxdigit(str[pos+3]) ) From 3f21764d0029b0cae72e2613f914ea69569d8ad4 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Jul 2012 09:09:08 -0700 Subject: [PATCH 516/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/binpac b/aux/binpac index 4ad8d15b63..4f01ea4081 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 4ad8d15b6395925c9875c9d2912a6cc3b4918e0a +Subproject commit 4f01ea40817ad232a96535c64fce7dc16d4e2fff From 3f4b4c88a6e4fc7f14c4620fe9093a11f9b7dd61 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 24 Jul 2012 11:18:32 -0500 Subject: [PATCH 517/651] Fix initialization of WriterFrontend names. The string representation of the writer looked up based on the stream's enum value instead of the writer's enum value, often causing this component of the name to be "(null)" since a null pointer was returned from the lookup. --- src/logging/WriterFrontend.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index fc237d6f6e..7c8f6861cf 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -112,7 +112,7 @@ WriterFrontend::WriterFrontend(const WriterBackend::WriterInfo& arg_info, EnumVa write_buffer_pos = 0; info = new WriterBackend::WriterInfo(arg_info); - const char* w = arg_writer->Type()->AsEnumType()->Lookup(arg_stream->InternalInt()); + const char* w = arg_writer->Type()->AsEnumType()->Lookup(arg_writer->InternalInt()); name = copy_string(fmt("%s/%s", arg_info.path, w)); if ( local ) From 13952154a109a69f665a1549b613721384f2599f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 24 Jul 2012 09:19:20 -0700 Subject: [PATCH 518/651] add comparator functor to the info maps of readerbackend and readerwriteend. This is required, because after the recent changes the info map containst a char* as key. Without the comparator the map will compare the char addresses for all operations - which is not really what we want. --- src/input/ReaderBackend.h | 2 +- src/logging/WriterBackend.h | 2 +- src/util.h | 11 +++++++++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 7626cc25ed..8ee14c808a 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -74,7 +74,7 @@ public: struct ReaderInfo { // Structure takes ownership of the strings. - typedef std::map config_map; + typedef std::map config_map; /** * A string left to the interpretation of the reader diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 1ca5650057..d5f2be225e 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -49,7 +49,7 @@ public: struct WriterInfo { // Structure takes ownership of these strings. - typedef std::map config_map; + typedef std::map config_map; /** * A string left to the interpretation of the writer diff --git a/src/util.h b/src/util.h index a695c6df6a..030a704092 100644 --- a/src/util.h +++ b/src/util.h @@ -345,4 +345,15 @@ inline int safe_vsnprintf(char* str, size_t size, const char* format, va_list al extern void get_memory_usage(unsigned int* total, unsigned int* malloced); +// class to be used as a third argument for stl maps to be able to use +// char*'s as keys. Otherwise the pointer values will be compared instead +// of the actual string values. +struct CompareString + { + bool operator()(char const *a, char const *b) const + { + return std::strcmp(a, b) < 0; + } + }; + #endif From cfa8769a422fa9ec1eeb4592f8b2eea6ef5a2a58 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 24 Jul 2012 11:22:51 -0500 Subject: [PATCH 519/651] Fix memory leak when processing a thread's input message fails. The message is reclaimed in both success/fail cases now. --- src/threading/MsgThread.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index b7a8f4922c..48c7253885 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -342,14 +342,14 @@ void MsgThread::Run() bool result = msg->Process(); + delete msg; + if ( ! result ) { string s = Fmt("%s failed, terminating thread (MsgThread)", Name()); Error(s.c_str()); break; } - - delete msg; } // In case we haven't send the finish method yet, do it now. Reading From 0d748c117d73351daa6157d634d493e1691251eb Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Jul 2012 11:06:16 -0700 Subject: [PATCH 520/651] Adding missing include needed on some systems. --- CHANGES | 4 ++++ VERSION | 2 +- src/logging/writers/Ascii.cc | 1 + src/logging/writers/None.cc | 2 +- src/threading/MsgThread.cc | 1 + 5 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 8fbd067fbc..e5be483e77 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.0-849 | 2012-07-24 11:06:16 -0700 + + * Adding missing include needed on some systems. (Robin Sommer) + 2.0-846 | 2012-07-23 16:36:37 -0700 * Fix WriterBackend::WriterInfo serialization, reenable ascii diff --git a/VERSION b/VERSION index 500ff3b4cd..3ccfd995f9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-846 +2.0-849 diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index c77e680a92..3866c48b64 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -3,6 +3,7 @@ #include #include #include +#include #include "NetVar.h" #include "threading/SerialTypes.h" diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index 9b91b82199..cf383899a1 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -39,7 +39,7 @@ bool None::DoInit(const WriterInfo& info, int num_fields, std::cout << std::endl; } - return true; + return false; } bool None::DoRotate(const char* rotated_path, double open, double close, bool terminating) diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index b7a8f4922c..fea9c2a532 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -346,6 +346,7 @@ void MsgThread::Run() { string s = Fmt("%s failed, terminating thread (MsgThread)", Name()); Error(s.c_str()); + Kill(); break; } From 43752b3d9f4efadebda9d342921e186ee09bcbde Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Jul 2012 11:16:03 -0700 Subject: [PATCH 521/651] Reverting accidentally committed changes. Thanks, Bernhard! --- src/logging/writers/None.cc | 2 +- src/threading/MsgThread.cc | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index cf383899a1..9b91b82199 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -39,7 +39,7 @@ bool None::DoInit(const WriterInfo& info, int num_fields, std::cout << std::endl; } - return false; + return true; } bool None::DoRotate(const char* rotated_path, double open, double close, bool terminating) diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index fea9c2a532..b7a8f4922c 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -346,7 +346,6 @@ void MsgThread::Run() { string s = Fmt("%s failed, terminating thread (MsgThread)", Name()); Error(s.c_str()); - Kill(); break; } From c36a449c76cc442f64b97d1a7c11febf454304d9 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Jul 2012 15:04:14 -0700 Subject: [PATCH 522/651] New built-in function to_double(s: string). Closes #859. --- CHANGES | 4 ++++ VERSION | 2 +- src/bro.bif | 23 +++++++++++++++++++ .../Baseline/bifs.to_double_from_string/error | 2 ++ .../bifs.to_double_from_string/output | 5 ++++ testing/btest/bifs/to_double_from_string.bro | 16 +++++++++++++ 6 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/bifs.to_double_from_string/error create mode 100644 testing/btest/Baseline/bifs.to_double_from_string/output create mode 100644 testing/btest/bifs/to_double_from_string.bro diff --git a/CHANGES b/CHANGES index e5be483e77..87a537c1e9 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.0-851 | 2012-07-24 15:04:14 -0700 + + * New built-in function to_double(s: string). (Scott Campbell) + 2.0-849 | 2012-07-24 11:06:16 -0700 * Adding missing include needed on some systems. (Robin Sommer) diff --git a/VERSION b/VERSION index 3ccfd995f9..c4f46b78c9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-849 +2.0-851 diff --git a/src/bro.bif b/src/bro.bif index f18d3ba1b5..2c22626c99 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -2604,6 +2604,29 @@ function to_subnet%(sn: string%): subnet return ret; %} +## Converts a :bro:type:`string` to a :bro:type:`double`. +## +## str: The :bro:type:`string` to convert. +## +## Returns: The :bro:type:`string` *str* as double, or 0 if *str* has +## an invalid format. +## +function to_double%(str: string%): double + %{ + const char* s = str->CheckString(); + char* end_s; + + double d = strtod(s, &end_s); + + if ( s[0] == '\0' || end_s[0] != '\0' ) + { + builtin_error("bad conversion to count", @ARG@[0]); + d = 0; + } + + return new Val(d, TYPE_DOUBLE); + %} + ## Converts a :bro:type:`count` to an :bro:type:`addr`. ## ## ip: The :bro:type:`count` to convert. diff --git a/testing/btest/Baseline/bifs.to_double_from_string/error b/testing/btest/Baseline/bifs.to_double_from_string/error new file mode 100644 index 0000000000..5ba5997101 --- /dev/null +++ b/testing/btest/Baseline/bifs.to_double_from_string/error @@ -0,0 +1,2 @@ +error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 15: bad conversion to count (to_double(d) and NotADouble) +error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 16: bad conversion to count (to_double(d) and ) diff --git a/testing/btest/Baseline/bifs.to_double_from_string/output b/testing/btest/Baseline/bifs.to_double_from_string/output new file mode 100644 index 0000000000..661d2b1479 --- /dev/null +++ b/testing/btest/Baseline/bifs.to_double_from_string/output @@ -0,0 +1,5 @@ +to_double(3.14) = 3.14 (SUCCESS) +to_double(-3.14) = -3.14 (SUCCESS) +to_double(0) = 0.0 (SUCCESS) +to_double(NotADouble) = 0.0 (SUCCESS) +to_double() = 0.0 (SUCCESS) diff --git a/testing/btest/bifs/to_double_from_string.bro b/testing/btest/bifs/to_double_from_string.bro new file mode 100644 index 0000000000..88af6758f0 --- /dev/null +++ b/testing/btest/bifs/to_double_from_string.bro @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro -b %INPUT >output 2>error +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff error + +function test_to_double(d: string, expect: double) + { + local result = to_double(d); + print fmt("to_double(%s) = %s (%s)", d, result, + result == expect ? "SUCCESS" : "FAILURE"); + } + +test_to_double("3.14", 3.14); +test_to_double("-3.14", -3.14); +test_to_double("0", 0); +test_to_double("NotADouble", 0); +test_to_double("", 0); From b9a76d7ed0f16390a7cfd4da7e3a21cc404c9c5b Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 24 Jul 2012 17:21:30 -0500 Subject: [PATCH 523/651] Fix file permissions of log files A recent commit was erroneously causing new log files to be created with execute permissions. --- src/logging/writers/Ascii.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 3866c48b64..4d2f59ea72 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -101,7 +101,7 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * fname = IsSpecial(path) ? path : path + "." + LogExt(); - fd = open(fname.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0777); + fd = open(fname.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0666); if ( fd < 0 ) { From 3a8f812f1c11be204b2e8451bb24a47eb02db7bf Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 24 Jul 2012 17:32:04 -0500 Subject: [PATCH 524/651] Correct a typo --- configure | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure b/configure index 2de4be62c4..bfe54123f0 100755 --- a/configure +++ b/configure @@ -33,7 +33,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl --disable-auxtools don't build or install auxiliary tools - --disable-perftools don't try to build python with Google Perftools + --disable-perftools don't try to build with Google Perftools --disable-python don't try to build python bindings for broccoli --disable-ruby don't try to build ruby bindings for broccoli From 5af131e3035a7057b0c5f321b5e1007a102548f8 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 24 Jul 2012 16:10:52 -0700 Subject: [PATCH 525/651] Compile fix. --- src/util.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util.h b/src/util.h index 4435b830dd..048ec384e3 100644 --- a/src/util.h +++ b/src/util.h @@ -353,7 +353,7 @@ struct CompareString { bool operator()(char const *a, char const *b) const { - return std::strcmp(a, b) < 0; + return strcmp(a, b) < 0; } }; From 91522e78365491ac9c784c8eaa146011fb9e4610 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 25 Jul 2012 12:10:47 -0500 Subject: [PATCH 526/651] Fix tests and error message for to_double BIF --- src/bro.bif | 2 +- testing/btest/Baseline/bifs.to_double_from_string/error | 4 ++-- testing/btest/bifs/to_double_from_string.bro | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index 2c22626c99..2a37429ad6 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -2620,7 +2620,7 @@ function to_double%(str: string%): double if ( s[0] == '\0' || end_s[0] != '\0' ) { - builtin_error("bad conversion to count", @ARG@[0]); + builtin_error("bad conversion to double", @ARG@[0]); d = 0; } diff --git a/testing/btest/Baseline/bifs.to_double_from_string/error b/testing/btest/Baseline/bifs.to_double_from_string/error index 5ba5997101..d6c6c0c75b 100644 --- a/testing/btest/Baseline/bifs.to_double_from_string/error +++ b/testing/btest/Baseline/bifs.to_double_from_string/error @@ -1,2 +1,2 @@ -error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 15: bad conversion to count (to_double(d) and NotADouble) -error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 16: bad conversion to count (to_double(d) and ) +error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 15: bad conversion to double (to_double(d) and NotADouble) +error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 16: bad conversion to double (to_double(d) and ) diff --git a/testing/btest/bifs/to_double_from_string.bro b/testing/btest/bifs/to_double_from_string.bro index 88af6758f0..781261084f 100644 --- a/testing/btest/bifs/to_double_from_string.bro +++ b/testing/btest/bifs/to_double_from_string.bro @@ -1,6 +1,6 @@ # @TEST-EXEC: bro -b %INPUT >output 2>error # @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff error +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff error function test_to_double(d: string, expect: double) { From 2fafadd9300b2abdf9195f7270071d9549850084 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 25 Jul 2012 12:20:12 -0500 Subject: [PATCH 527/651] Fix differing log filters of streams from writing to same writer/path. Since WriterFrontend objects are looked up internally by writer type and path, and they also expect to write consistent field arguments, it could be the case that more than one filter of a given stream attempts to write to the same path (derived either from $path or $path_func fields of the filter) with the same writer type. This won't work, so now WriterFrontend objects are bound to the filter that instantiated them so that we can warn about other filters attempting to write to the conflicting writer/path and the write can be skipped. Remote logs don't appear to suffer the same issue due to pre-filtering. Addresses #842. --- src/logging/Manager.cc | 17 ++++++++++++-- src/logging/Manager.h | 2 +- src/logging/WriterBackend.cc | 5 ++-- .../http.log | 23 +++++++++++++++++++ .../reporter.log | 23 +++++++++++++++++++ .../logging/writer-path-conflict.bro | 14 +++++++++++ 6 files changed, 78 insertions(+), 6 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log create mode 100755 testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log create mode 100644 testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index c4245680a6..3499d55f74 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -86,6 +86,7 @@ struct Manager::WriterInfo { Func* postprocessor; WriterFrontend* writer; WriterBackend::WriterInfo* info; + string instantiating_filter; }; struct Manager::Stream { @@ -764,8 +765,18 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) WriterFrontend* writer = 0; if ( w != stream->writers.end() ) + { + if ( w->second->instantiating_filter != filter->name ) + { + reporter->Warning("Skipping write to filter '%s' on path '%s'" + " because filter '%s' has already instantiated the same" + " writer type for that path", filter->name.c_str(), + filter->path.c_str(), w->second->instantiating_filter.c_str()); + continue; + } // We know this writer already. writer = w->second->writer; + } else { @@ -800,7 +811,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) writer = CreateWriter(stream->id, filter->writer, info, filter->num_fields, - arg_fields, filter->local, filter->remote); + arg_fields, filter->local, filter->remote, filter->name); if ( ! writer ) { @@ -999,7 +1010,8 @@ threading::Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, } WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, WriterBackend::WriterInfo* info, - int num_fields, const threading::Field* const* fields, bool local, bool remote) + int num_fields, const threading::Field* const* fields, bool local, bool remote, + const string& instantiating_filter) { Stream* stream = FindStream(id); @@ -1023,6 +1035,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, WriterBacken winfo->interval = 0; winfo->postprocessor = 0; winfo->info = info; + winfo->instantiating_filter = instantiating_filter; // Search for a corresponding filter for the writer/path pair and use its // rotation settings. If no matching filter is found, fall back on diff --git a/src/logging/Manager.h b/src/logging/Manager.h index ae7a1796ba..d2041592c1 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -165,7 +165,7 @@ protected: // Takes ownership of fields and info. WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, WriterBackend::WriterInfo* info, int num_fields, const threading::Field* const* fields, - bool local, bool remote); + bool local, bool remote, const string& instantiating_filter=""); // Takes ownership of values.. bool Write(EnumVal* id, EnumVal* writer, string path, diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 87db8e4437..2933062eff 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -201,7 +201,6 @@ bool WriterBackend::Write(int arg_num_fields, int num_writes, Value*** vals) return false; } -#ifdef DEBUG // Double-check all the types match. for ( int j = 0; j < num_writes; j++ ) { @@ -209,17 +208,17 @@ bool WriterBackend::Write(int arg_num_fields, int num_writes, Value*** vals) { if ( vals[j][i]->type != fields[i]->type ) { +#ifdef DEBUG const char* msg = Fmt("Field type doesn't match in WriterBackend::Write() (%d vs. %d)", vals[j][i]->type, fields[i]->type); Debug(DBG_LOGGING, msg); - +#endif DisableFrontend(); DeleteVals(num_writes, vals); return false; } } } -#endif bool success = true; diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log new file mode 100644 index 0000000000..9ac9b6304c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log @@ -0,0 +1,23 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http +#start 2011-03-18-19-06-08 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file +1300475168.784020 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 1 GET bits.wikimedia.org /skins-1.5/monobook/main.css http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.916018 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/6/63/Wikipedia-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.916183 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/b/bb/Wikipedia_wordmark.svg/174px-Wikipedia_wordmark.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.918358 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/b/bd/Bookshelf-40x201_6.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.952307 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/8/8a/Wikinews-logo.png/35px-Wikinews-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.952296 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/4/4a/Wiktionary-logo-en-35px.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.954820 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/35px-Wikiquote-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.962687 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 1 GET meta.wikimedia.org /images/wikimedia-button.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.975934 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikibooks-logo.svg/35px-Wikibooks-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.976436 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/d/df/Wikispecies-logo.svg/35px-Wikispecies-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475168.979264 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4c/Wikisource-logo.svg/35px-Wikisource-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475169.014619 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4a/Commons-logo.svg/35px-Commons-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475169.014593 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/9/91/Wikiversity-logo.svg/35px-Wikiversity-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +1300475169.014927 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/7/75/Wikimedia_Community_Logo.svg/35px-Wikimedia_Community_Logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log new file mode 100755 index 0000000000..7a4225d718 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log @@ -0,0 +1,23 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path reporter +#start 2011-03-18-19-06-08 +#fields ts level message location +#types time enum string string +1300475168.843894 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475168.975800 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475168.976327 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475168.979160 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.012666 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.012730 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.014860 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.022665 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.036294 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.036798 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.039923 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.074793 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.074938 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475169.075065 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +#end 2011-03-18-19-06-13 diff --git a/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro b/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro new file mode 100644 index 0000000000..be6c0e9e9e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro @@ -0,0 +1,14 @@ +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff reporter.log +# @TEST-EXEC: btest-diff http.log + +@load base/protocols/http + +event bro_init() + { + # Both the default filter for the http stream and this new one will + # attempt to have the same writer write to path "http", which will + # be reported as a warning and the write skipped. + local filter: Log::Filter = [$name="host-only", $include=set("host")]; + Log::add_filter(HTTP::LOG, filter); + } From 4abcfa1f66b2dc9f82b0a40d591ecb39bfaa1fd7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 25 Jul 2012 12:42:46 -0500 Subject: [PATCH 528/651] Fix complaint from valgrind about uninitialized memory usage. --- src/util.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/util.cc b/src/util.cc index a34f41dadb..be560928d6 100644 --- a/src/util.cc +++ b/src/util.cc @@ -691,6 +691,7 @@ void init_random_seed(uint32 seed, const char* read_file, const char* write_file { static const int bufsiz = 16; uint32 buf[bufsiz]; + memset(buf, 0, sizeof(buf)); int pos = 0; // accumulates entropy bool seeds_done = false; From 7e228f1d6b8cbd0f1b096c77953fb4339a895d7e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 25 Jul 2012 13:58:08 -0700 Subject: [PATCH 529/651] Silencing compiler warnings. --- src/input/Manager.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index f38613a6f8..90d7eae2f4 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1816,7 +1816,7 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) case TYPE_ADDR: { - int length; + int length = 0; switch ( val->val.addr_val.family ) { case IPv4: length = sizeof(val->val.addr_val.in.in4); @@ -1837,7 +1837,7 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) case TYPE_SUBNET: { - int length; + int length = 0; switch ( val->val.subnet_val.prefix.family ) { case IPv4: length = sizeof(val->val.addr_val.in.in4); @@ -1968,7 +1968,7 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) case TYPE_ADDR: { - IPAddr* addr; + IPAddr* addr = 0; switch ( val->val.addr_val.family ) { case IPv4: addr = new IPAddr(val->val.addr_val.in.in4); @@ -1989,7 +1989,7 @@ Val* Manager::ValueToVal(const Value* val, BroType* request_type) case TYPE_SUBNET: { - IPAddr* addr; + IPAddr* addr = 0; switch ( val->val.subnet_val.prefix.family ) { case IPv4: addr = new IPAddr(val->val.subnet_val.prefix.in.in4); From a33e9a69417a1ae4a8e54d1bc929967d4cd1f0df Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 25 Jul 2012 13:58:23 -0700 Subject: [PATCH 530/651] Fixing FreeBSD compiler error. --- src/logging/writers/ElasticSearch.cc | 5 +++-- src/util.h | 13 +++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index b7edcf6aa6..2da79ed7b9 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -3,16 +3,17 @@ // This is experimental code that is not yet ready for production usage. // + #include "config.h" #ifdef USE_ELASTICSEARCH +#include "util.h" // Needs to come first for stdint.h + #include #include -#include "util.h" #include "BroString.h" - #include "NetVar.h" #include "threading/SerialTypes.h" diff --git a/src/util.h b/src/util.h index 048ec384e3..5d1bdf188a 100644 --- a/src/util.h +++ b/src/util.h @@ -3,6 +3,13 @@ #ifndef util_h #define util_h +// Expose C99 functionality from inttypes.h, which would otherwise not be +// available in C++. +#define __STDC_FORMAT_MACROS +#define __STDC_LIMIT_MACROS +#include +#include + #include #include #include @@ -10,12 +17,6 @@ #include #include "config.h" -// Expose C99 functionality from inttypes.h, which would otherwise not be -// available in C++. -#define __STDC_FORMAT_MACROS -#define __STDC_LIMIT_MACROS -#include - #if __STDC__ #define myattribute __attribute__ #else From f2a0afad3c6dbb274e5631680fe238ec841ed37f Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 25 Jul 2012 17:01:47 -0400 Subject: [PATCH 531/651] Fixes to elasticsearch plugin to make libcurl handle http responses correctly. --- src/logging/writers/ElasticSearch.cc | 4 ++-- src/logging/writers/ElasticSearch.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 2da79ed7b9..cc6f8b1c4f 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -359,10 +359,10 @@ CURL* ElasticSearch::HTTPSetup() return handle; } -bool ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata) +size_t ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata) { //TODO: Do some verification on the result? - return true; + return size; } bool ElasticSearch::HTTPSend(CURL *handle) diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h index 0d863f2f19..0e88bf3e88 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/ElasticSearch.h @@ -45,7 +45,7 @@ private: bool UpdateIndex(double now, double rinterval, double rbase); CURL* HTTPSetup(); - bool HTTPReceive(void* ptr, int size, int nmemb, void* userdata); + size_t HTTPReceive(void* ptr, int size, int nmemb, void* userdata); bool HTTPSend(CURL *handle); // Buffers, etc. From c3aba199f6f6d580392b28f87f93f7aa6c2d2e9f Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 25 Jul 2012 17:40:21 -0500 Subject: [PATCH 532/651] Fix build warnings --- scripts/base/frameworks/logging/writers/elasticsearch.bro | 2 +- scripts/policy/tuning/logs-to-elasticsearch.bro | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index a6a485226a..b0e8fac40e 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -23,7 +23,7 @@ export { const index_prefix = "bro" &redef; ## The ES type prefix comes before the name of the related log. - ## e.g. prefix = "bro_" would create types of bro_dns, bro_software, etc. + ## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc. const type_prefix = "" &redef; ## The time before an ElasticSearch transfer will timeout. diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro index c3cc9d5002..b4d16a19a1 100644 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ b/scripts/policy/tuning/logs-to-elasticsearch.bro @@ -6,13 +6,13 @@ export { ## An elasticsearch specific rotation interval. const rotation_interval = 24hr &redef; - ## Optionally ignore any :bro:enum:`Log::ID` from being sent to + ## Optionally ignore any :bro:type:`Log::ID` from being sent to ## ElasticSearch with this script. const excluded_log_ids: set[string] = set("Communication::LOG") &redef; - ## If you want to explicitly only send certain :bro:enum:`Log::ID` + ## If you want to explicitly only send certain :bro:type:`Log::ID` ## streams, add them to this set. If the set remains empty, all will - ## be sent. The :bro:id:`excluded_log_ids` option will remain in + ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option will remain in ## effect as well. const send_logs: set[string] = set() &redef; } @@ -42,4 +42,4 @@ event bro_init() &priority=-5 { Log::add_filter(id, filter); } - } \ No newline at end of file + } From c48a16664b521bbcaa0fa60e37ae65b49202b168 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 25 Jul 2012 18:05:42 -0500 Subject: [PATCH 533/651] Fix double close() in FilerSerializer class. --- src/Serializer.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Serializer.cc b/src/Serializer.cc index 06bbf73f48..97ee8f743c 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -742,9 +742,10 @@ FileSerializer::~FileSerializer() io->Flush(); delete [] file; - delete io; - if ( fd >= 0 ) + if ( io ) + delete io; // destructor will call close() on fd + else if ( fd >= 0 ) close(fd); } @@ -808,7 +809,7 @@ void FileSerializer::CloseFile() if ( io ) io->Flush(); - if ( fd >= 0 ) + if ( fd >= 0 && ! io ) // destructor of io calls close() on fd close(fd); fd = -1; From 84399c5d7dae83ae252c08b7a2766f3bb212c1e4 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 26 Jul 2012 08:58:12 -0700 Subject: [PATCH 534/651] add testcase for subrecords to input framework tests --- .../out | 14 ++++ .../base/frameworks/input/subrecord.bro | 70 +++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.subrecord/out create mode 100644 testing/btest/scripts/base/frameworks/input/subrecord.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.subrecord/out b/testing/btest/Baseline/scripts.base.frameworks.input.subrecord/out new file mode 100644 index 0000000000..c7e46dfacd --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.subrecord/out @@ -0,0 +1,14 @@ +{ +[-42] = [sub=[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, two=[a=1.2.3.4, d=3.14]], t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} diff --git a/testing/btest/scripts/base/frameworks/input/subrecord.bro b/testing/btest/scripts/base/frameworks/input/subrecord.bro new file mode 100644 index 0000000000..8c845a1842 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/subrecord.bro @@ -0,0 +1,70 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields sub.b i sub.e sub.c sub.p sub.sn sub.two.a sub.two.d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +@load frameworks/communication/listen + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type SubVal2: record { + a: addr; + d: double; +}; + +type SubVal: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + two: SubVal2; +}; + +type Val: record { + sub: SubVal; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); + } + +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } From 734e5f68d377679df9106e534e20f923cffaf99c Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 26 Jul 2012 12:40:12 -0500 Subject: [PATCH 535/651] Add more error handling for close() calls. --- src/ChunkedIO.cc | 6 +++--- src/FlowSrc.cc | 2 +- src/RemoteSerializer.cc | 22 ++++++++++++---------- src/Serializer.cc | 4 ++-- src/logging/writers/Ascii.cc | 2 +- src/util.cc | 27 +++++++++++++++++++++++++-- src/util.h | 3 +++ 7 files changed, 47 insertions(+), 19 deletions(-) diff --git a/src/ChunkedIO.cc b/src/ChunkedIO.cc index f5bcb4b7c1..2c766c7eb1 100644 --- a/src/ChunkedIO.cc +++ b/src/ChunkedIO.cc @@ -76,7 +76,7 @@ void ChunkedIO::DumpDebugData(const char* basefnname, bool want_reads) ChunkedIOFd io(fd, "dump-file"); io.Write(*i); io.Flush(); - close(fd); + safe_close(fd); } l->clear(); @@ -127,7 +127,7 @@ ChunkedIOFd::~ChunkedIOFd() delete [] read_buffer; delete [] write_buffer; - close(fd); + safe_close(fd); if ( partial ) { @@ -686,7 +686,7 @@ ChunkedIOSSL::~ChunkedIOSSL() ssl = 0; } - close(socket); + safe_close(socket); } diff --git a/src/FlowSrc.cc b/src/FlowSrc.cc index fe6998ea79..59ce3fd6a4 100644 --- a/src/FlowSrc.cc +++ b/src/FlowSrc.cc @@ -58,7 +58,7 @@ void FlowSrc::Process() void FlowSrc::Close() { - close(selectable_fd); + safe_close(selectable_fd); } diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 7ed8b9318e..4e9ccb7dd2 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -647,7 +647,7 @@ void RemoteSerializer::Fork() exit(1); // FIXME: Better way to handle this? } - close(pipe[1]); + safe_close(pipe[1]); return; } @@ -664,12 +664,12 @@ void RemoteSerializer::Fork() } child.SetParentIO(io); - close(pipe[0]); + safe_close(pipe[0]); // Close file descriptors. - close(0); - close(1); - close(2); + safe_close(0); + safe_close(1); + safe_close(2); // Be nice. setpriority(PRIO_PROCESS, 0, 5); @@ -4001,7 +4001,7 @@ bool SocketComm::Connect(Peer* peer) if ( connect(sockfd, res->ai_addr, res->ai_addrlen) < 0 ) { Error(fmt("connect failed: %s", strerror(errno)), peer); - close(sockfd); + safe_close(sockfd); sockfd = -1; continue; } @@ -4174,16 +4174,18 @@ bool SocketComm::Listen() { Error(fmt("can't bind to %s:%s, %s", l_addr_str.c_str(), port_str, strerror(errno))); - close(fd); if ( errno == EADDRINUSE ) { // Abandon completely this attempt to set up listening sockets, // try again later. + safe_close(fd); CloseListenFDs(); listen_next_try = time(0) + bind_retry_interval; return false; } + + safe_close(fd); continue; } @@ -4191,7 +4193,7 @@ bool SocketComm::Listen() { Error(fmt("can't listen on %s:%s, %s", l_addr_str.c_str(), port_str, strerror(errno))); - close(fd); + safe_close(fd); continue; } @@ -4227,7 +4229,7 @@ bool SocketComm::AcceptConnection(int fd) { Error(fmt("accept fail, unknown address family %d", client.ss.ss_family)); - close(clientfd); + safe_close(clientfd); return false; } @@ -4298,7 +4300,7 @@ const char* SocketComm::MakeLogString(const char* msg, Peer* peer) void SocketComm::CloseListenFDs() { for ( size_t i = 0; i < listen_fds.size(); ++i ) - close(listen_fds[i]); + safe_close(listen_fds[i]); listen_fds.clear(); } diff --git a/src/Serializer.cc b/src/Serializer.cc index 97ee8f743c..fc6d00d06c 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -746,7 +746,7 @@ FileSerializer::~FileSerializer() if ( io ) delete io; // destructor will call close() on fd else if ( fd >= 0 ) - close(fd); + safe_close(fd); } bool FileSerializer::Open(const char* file, bool pure) @@ -810,7 +810,7 @@ void FileSerializer::CloseFile() io->Flush(); if ( fd >= 0 && ! io ) // destructor of io calls close() on fd - close(fd); + safe_close(fd); fd = -1; delete [] file; diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 4d2f59ea72..0ccdd1f569 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -86,7 +86,7 @@ void Ascii::CloseFile(double t) WriteHeaderField("end", ts); } - close(fd); + safe_close(fd); fd = 0; } diff --git a/src/util.cc b/src/util.cc index be560928d6..171fcdce37 100644 --- a/src/util.cc +++ b/src/util.cc @@ -722,7 +722,7 @@ void init_random_seed(uint32 seed, const char* read_file, const char* write_file { int amt = read(fd, buf + pos, sizeof(uint32) * (bufsiz - pos)); - close(fd); + safe_close(fd); if ( amt > 0 ) pos += amt / sizeof(uint32); @@ -1204,7 +1204,7 @@ void _set_processing_status(const char* status) len -= n; } - close(fd); + safe_close(fd); errno = old_errno; } @@ -1353,6 +1353,29 @@ bool safe_write(int fd, const char* data, int len) return true; } +void safe_close(int fd) + { + /* + * Failure cases of close(2) are ... + * EBADF: Indicative of programming logic error that needs to be fixed, we + * should always be attempting to close a valid file descriptor. + * EINTR: Ignore signal interruptions, most implementations will actually + * reclaim the open descriptor and POSIX standard doesn't leave many + * options by declaring the state of the descriptor as "unspecified". + * Attempting to inspect actual state or re-attempt close() is not + * thread safe. + * EIO: Again the state of descriptor is "unspecified", but don't recover + * from an I/O error, safe_write() won't either. + */ + if ( close(fd) < 0 && errno != EINTR ) + { + char buf[128]; + strerror_r(errno, buf, sizeof(buf)); + fprintf(stderr, "safe_close error %d: %s\n", errno, buf); + abort(); + } + } + void out_of_memory(const char* where) { reporter->FatalError("out of memory in %s.\n", where); diff --git a/src/util.h b/src/util.h index 5d1bdf188a..e69167abce 100644 --- a/src/util.h +++ b/src/util.h @@ -297,6 +297,9 @@ inline size_t pad_size(size_t size) // thread-safe as long as no two threads write to the same descriptor. extern bool safe_write(int fd, const char* data, int len); +// Wraps close(2) to emit error messages and abort on unrecoverable errors. +extern void safe_close(int fd); + extern void out_of_memory(const char* where); inline void* safe_realloc(void* ptr, size_t size) From 1a49363bbec6e7b6576fc16b780a0728dc99a7c4 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 26 Jul 2012 12:12:54 -0700 Subject: [PATCH 536/651] add testcase for subrecords and events add missing binary testcase (Baseline is in master, testcase is missing for some reason) make error output for nonmatching event types much more verbose --- src/input/Manager.cc | 6 +- .../out | 12 +++ .../scripts/base/frameworks/input/binary.bro | 56 ++++++++++++++ .../base/frameworks/input/subrecord-event.bro | 77 +++++++++++++++++++ 4 files changed, 150 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.subrecord-event/out create mode 100644 testing/btest/scripts/base/frameworks/input/binary.bro create mode 100644 testing/btest/scripts/base/frameworks/input/subrecord-event.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 90d7eae2f4..40e3c413bb 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -443,7 +443,11 @@ bool Manager::CreateEventStream(RecordVal* fval) if ( !same_type((*args)[2], fields ) ) { - reporter->Error("Incompatible type for event"); + ODesc desc1; + ODesc desc2; + (*args)[2]->Describe(&desc1); + fields->Describe(&desc2); + reporter->Error("Incompatible type '%s':%s for event which needs type '%s':%s\n", type_name((*args)[2]->Tag()), desc1.Bytes(), type_name(fields->Tag()), desc2.Bytes()); return false; } diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.subrecord-event/out b/testing/btest/Baseline/scripts.base.frameworks.input.subrecord-event/out new file mode 100644 index 0000000000..197cb54df9 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.subrecord-event/out @@ -0,0 +1,12 @@ +[sub=[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, two=[a=1.2.3.4, d=3.14]], t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] diff --git a/testing/btest/scripts/base/frameworks/input/binary.bro b/testing/btest/scripts/base/frameworks/input/binary.bro new file mode 100644 index 0000000000..86e02196b5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/binary.bro @@ -0,0 +1,56 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +redef InputAscii::separator = "|"; +redef InputAscii::set_separator = ","; +redef InputAscii::empty_field = "(empty)"; +redef InputAscii::unset_field = "-"; + +@TEST-START-FILE input.log +#separator | +#set_separator|, +#empty_field|(empty) +#unset_field|- +#path|ssh +#start|2012-07-20-01-49-19 +#fields|data|data2 +#types|string|string +abc\x0a\xffdef|DATA2 +abc\x7c\xffdef|DATA2 +abc\xff\x7cdef|DATA2 +#end|2012-07-20-01-49-19 +@TEST-END-FILE + +@load frameworks/communication/listen + +global outfile: file; +global try: count; + +type Val: record { + data: string; + data2: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, a: string, b: string) + { + print outfile, a; + print outfile, b; + try = try + 1; + if ( try == 3 ) + { + close(outfile); + terminate(); + } + } + +event bro_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line]); + Input::remove("input"); + } diff --git a/testing/btest/scripts/base/frameworks/input/subrecord-event.bro b/testing/btest/scripts/base/frameworks/input/subrecord-event.bro new file mode 100644 index 0000000000..244eefbc3b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/subrecord-event.bro @@ -0,0 +1,77 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields sub.b i sub.e sub.c sub.p sub.sn sub.two.a sub.two.d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +@load frameworks/communication/listen + +global outfile: file; +global try: count; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type SubVal2: record { + a: addr; + d: double; +}; + +type SubVal: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + two: SubVal2; +}; + +type Val: record { + sub: SubVal; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + + + +event line(description: Input::EventDescription, tpe: Input::Event, value: Val) + { + print outfile, value; + try = try + 1; + if ( try == 7 ) + { + close(outfile); + terminate(); + } + } + +event bro_init() + { + try = 0; + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_event([$source="../input.log", $name="ssh", $fields=Val, $ev=line, $want_record=T]); + Input::remove("ssh"); + print "Hi"; + } From 8633d91c4021194334bbd06a05483e5ec6ab82db Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 26 Jul 2012 12:15:06 -0700 Subject: [PATCH 537/651] and remove superflous print. Yes, I know, look at the diff before committing... --- .../btest/scripts/base/frameworks/input/subrecord-event.bro | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/testing/btest/scripts/base/frameworks/input/subrecord-event.bro b/testing/btest/scripts/base/frameworks/input/subrecord-event.bro index 244eefbc3b..4e7dc1690a 100644 --- a/testing/btest/scripts/base/frameworks/input/subrecord-event.bro +++ b/testing/btest/scripts/base/frameworks/input/subrecord-event.bro @@ -59,7 +59,7 @@ event line(description: Input::EventDescription, tpe: Input::Event, value: Val) { print outfile, value; try = try + 1; - if ( try == 7 ) + if ( try == 1 ) { close(outfile); terminate(); @@ -70,8 +70,6 @@ event bro_init() { try = 0; outfile = open("../out"); - # first read in the old stuff into the table... Input::add_event([$source="../input.log", $name="ssh", $fields=Val, $ev=line, $want_record=T]); Input::remove("ssh"); - print "Hi"; } From 63e8bf72edad62d4118e22be1e61e32404d03f30 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 26 Jul 2012 16:55:49 -0500 Subject: [PATCH 538/651] Change path conflicts between log filters to be auto-corrected. This change makes it so when differing logging filters on the same stream attempt to write to the same writer/path combination, the path of the filter doing the later write will be automatically adjusted so that it does not conflict with the other. The path is adjusted by appending "-N", where N is the smallest integer greater or equal to 2 required to resolve the path name conflict. Addresses #842. --- scripts/base/frameworks/logging/main.bro | 11 ++++- src/logging/Manager.cc | 41 ++++++++++++++----- .../http-2-2.log | 23 +++++++++++ .../http-2.log | 23 +++++++++++ .../http-3.log | 23 +++++++++++ .../reporter.log | 17 ++------ .../logging/writer-path-conflict.bro | 12 +++++- 7 files changed, 124 insertions(+), 26 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log create mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index cc0d341605..79c9884f9d 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -96,6 +96,12 @@ export { ## file name. Generally, filenames are expected to given ## without any extensions; writers will add appropiate ## extensions automatically. + ## + ## If this path is found to conflict with another filter's + ## for the same writer type, it is automatically corrected + ## by appending "-N", where N is the smallest integer greater + ## or equal to 2 that allows the corrected path name to not + ## conflict with another filter's. path: string &optional; ## A function returning the output path for recording entries @@ -115,7 +121,10 @@ export { ## rec: An instance of the streams's ``columns`` type with its ## fields set to the values to be logged. ## - ## Returns: The path to be used for the filter. + ## Returns: The path to be used for the filter, which will be subject + ## to the same automatic correction rules as the *path* + ## field of :bro:type:`Log::Filter` in the case of conflicts + ## with other filters trying to use the same writer/path pair. path_func: function(id: ID, path: string, rec: any): string &optional; ## Subset of column names to record. If not given, all diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 3499d55f74..b1b289a478 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -758,22 +758,43 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) #endif } + Stream::WriterPathPair wpp(filter->writer->AsEnum(), path); + // See if we already have a writer for this path. - Stream::WriterMap::iterator w = - stream->writers.find(Stream::WriterPathPair(filter->writer->AsEnum(), path)); + Stream::WriterMap::iterator w = stream->writers.find(wpp); + + if ( w != stream->writers.end() && + w->second->instantiating_filter != filter->name ) + { + // Auto-correct path due to conflict with another filter over the + // same writer/path pair + string instantiator = w->second->instantiating_filter; + string new_path; + unsigned int i = 2; + + do { + char num[32]; + snprintf(num, sizeof(num), "-%u", i++); + new_path = path + num; + wpp.second = new_path; + w = stream->writers.find(wpp); + } while ( w != stream->writers.end()); + + Unref(filter->path_val); + filter->path_val = new StringVal(new_path.c_str()); + + reporter->Warning("Write using filter '%s' on path '%s' changed to" + " use new path '%s' to avoid conflict with filter '%s'", + filter->name.c_str(), path.c_str(), new_path.c_str(), + instantiator.c_str()); + + path = filter->path = filter->path_val->AsString()->CheckString(); + } WriterFrontend* writer = 0; if ( w != stream->writers.end() ) { - if ( w->second->instantiating_filter != filter->name ) - { - reporter->Warning("Skipping write to filter '%s' on path '%s'" - " because filter '%s' has already instantiated the same" - " writer type for that path", filter->name.c_str(), - filter->path.c_str(), w->second->instantiating_filter.c_str()); - continue; - } // We know this writer already. writer = w->second->writer; } diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log new file mode 100644 index 0000000000..1e41aca795 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log @@ -0,0 +1,23 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http-2-2 +#start 2011-03-18-19-06-08 +#fields status_code +#types count +304 +304 +304 +304 +304 +304 +304 +304 +304 +304 +304 +304 +304 +304 +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log new file mode 100644 index 0000000000..4d3622c7a0 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log @@ -0,0 +1,23 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http-2 +#start 2011-03-18-19-06-08 +#fields host +#types string +bits.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +meta.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +upload.wikimedia.org +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log new file mode 100644 index 0000000000..727a6c02fa --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log @@ -0,0 +1,23 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path http-3 +#start 2011-03-18-19-06-08 +#fields uri +#types string +/skins-1.5/monobook/main.css +/wikipedia/commons/6/63/Wikipedia-logo.png +/wikipedia/commons/thumb/b/bb/Wikipedia_wordmark.svg/174px-Wikipedia_wordmark.svg.png +/wikipedia/commons/b/bd/Bookshelf-40x201_6.png +/wikipedia/commons/thumb/8/8a/Wikinews-logo.png/35px-Wikinews-logo.png +/wikipedia/commons/4/4a/Wiktionary-logo-en-35px.png +/wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/35px-Wikiquote-logo.svg.png +/images/wikimedia-button.png +/wikipedia/commons/thumb/f/fa/Wikibooks-logo.svg/35px-Wikibooks-logo.svg.png +/wikipedia/commons/thumb/d/df/Wikispecies-logo.svg/35px-Wikispecies-logo.svg.png +/wikipedia/commons/thumb/4/4c/Wikisource-logo.svg/35px-Wikisource-logo.svg.png +/wikipedia/commons/thumb/4/4a/Commons-logo.svg/35px-Commons-logo.svg.png +/wikipedia/commons/thumb/9/91/Wikiversity-logo.svg/35px-Wikiversity-logo.svg.png +/wikipedia/commons/thumb/7/75/Wikimedia_Community_Logo.svg/35px-Wikimedia_Community_Logo.svg.png +#end 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log index 7a4225d718..3514ca5134 100755 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log @@ -6,18 +6,7 @@ #start 2011-03-18-19-06-08 #fields ts level message location #types time enum string string -1300475168.843894 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475168.975800 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475168.976327 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475168.979160 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.012666 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.012730 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.014860 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.022665 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.036294 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.036798 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.039923 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.074793 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.074938 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) -1300475169.075065 Reporter::WARNING Skipping write to filter 'host-only' on path 'http' because filter 'default' has already instantiated the same writer type for that path (empty) +1300475168.843894 Reporter::WARNING Write using filter 'host-only' on path 'http' changed to use new path 'http-2' to avoid conflict with filter 'default' (empty) +1300475168.843894 Reporter::WARNING Write using filter 'uri-only' on path 'http' changed to use new path 'http-3' to avoid conflict with filter 'default' (empty) +1300475168.843894 Reporter::WARNING Write using filter 'status-only' on path 'http-2' changed to use new path 'http-2-2' to avoid conflict with filter 'host-only' (empty) #end 2011-03-18-19-06-13 diff --git a/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro b/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro index be6c0e9e9e..908fb43c72 100644 --- a/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro +++ b/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro @@ -1,6 +1,9 @@ # @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT # @TEST-EXEC: btest-diff reporter.log # @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff http-2.log +# @TEST-EXEC: btest-diff http-3.log +# @TEST-EXEC: btest-diff http-2-2.log @load base/protocols/http @@ -8,7 +11,14 @@ event bro_init() { # Both the default filter for the http stream and this new one will # attempt to have the same writer write to path "http", which will - # be reported as a warning and the write skipped. + # be reported as a warning and the path auto-corrected to "http-2" local filter: Log::Filter = [$name="host-only", $include=set("host")]; + # Same deal here, but should be auto-corrected to "http-3". + local filter2: Log::Filter = [$name="uri-only", $include=set("uri")]; + # Conflict between auto-correct paths needs to be corrected, too, this + # time it will be "http-2-2". + local filter3: Log::Filter = [$path="http-2", $name="status-only", $include=set("status_code")]; Log::add_filter(HTTP::LOG, filter); + Log::add_filter(HTTP::LOG, filter2); + Log::add_filter(HTTP::LOG, filter3); } From 412bebb7031d7954a1ce20deef3d6a2f2face192 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 26 Jul 2012 15:24:27 -0700 Subject: [PATCH 539/651] Tweaking the custom-rotate test to produce stable output. There seems to be a race condition in capturing the external shell's stdout output reliably. As far as I can tell, Bro's doing everything correctly though, the log postprocessors gets executed as expected. So I rewrote the test to capture the output in a separate file first, and that seems to solve the test failures. --- .../btest/scripts/base/frameworks/logging/rotate-custom.bro | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro index 8a7f16d182..07fc8cef7c 100644 --- a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro +++ b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro @@ -1,5 +1,6 @@ # -#@TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT | egrep "test|test2" | sort >out +# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT | egrep "test|test2" | sort >out.tmp +# @TEST-EXEC: cat out.tmp pp.log | sort >out # @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | uniq >>out # @TEST-EXEC: btest-diff out # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff .stderr @@ -19,7 +20,7 @@ export { } redef Log::default_rotation_interval = 1hr; -redef Log::default_rotation_postprocessor_cmd = "echo 1st"; +redef Log::default_rotation_postprocessor_cmd = "echo 1st >>pp.log"; function custom_rotate(info: Log::RotationInfo) : bool { From ef3b75129f393728f74ba039a71183063b313b02 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 26 Jul 2012 15:38:12 -0700 Subject: [PATCH 540/651] Updating baseline for custom-rotate test. --- .../.stderr | 10 ---------- .../scripts.base.frameworks.logging.rotate-custom/out | 10 ++++++++++ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr index e1958d67ad..e69de29bb2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr @@ -1,10 +0,0 @@ -1st test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 ascii -1st test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 ascii -1st test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 ascii -1st test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 ascii -1st test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 ascii -1st test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 ascii -1st test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 ascii -1st test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 ascii -1st test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 ascii -1st test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 ascii diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out index 91b6f5de7a..19354f8df2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out @@ -1,3 +1,13 @@ +1st test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 ascii +1st test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 ascii +1st test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 ascii +1st test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 ascii +1st test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 ascii +1st test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 ascii +1st test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 ascii +1st test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 ascii +1st test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 ascii +1st test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 ascii custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_03.00.05.log, path=test2, open=1299466805.0, close=1299470395.0, terminating=F] custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_03.59.55.log, path=test2, open=1299470395.0, close=1299470405.0, terminating=F] custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_04.00.05.log, path=test2, open=1299470405.0, close=1299473995.0, terminating=F] From 743fc1680dc9d4c04f38ca80c7ef4e5b88e8f4cb Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 26 Jul 2012 16:31:20 -0700 Subject: [PATCH 541/651] Improving error handling for threads. If a thread command fails (like the input framework not finding a file), that now (1) no longer hangs Bro, and (2) even allows for propagating error messages back before the thread is stops. (Actually, the thread doesn't really "stop"; the thread manager keeps threads around independent of their success; but it no longer polls them for input.) Closes #858. --- src/threading/Manager.cc | 14 +++++++++++--- src/threading/Manager.h | 17 ++++++++++------- src/threading/MsgThread.cc | 22 ++++++++++++++++++---- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 53c11f2ee9..cfc44596e1 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -80,8 +80,10 @@ double Manager::NextTimestamp(double* network_time) for ( msg_thread_list::iterator i = msg_threads.begin(); i != msg_threads.end(); i++ ) { - if ( (*i)->MightHaveOut() ) - return timer_mgr->Time(); + MsgThread* t = *i; + + if ( (*i)->MightHaveOut() && ! t->Killed() ) + return timer_mgr->Time(); } return -1.0; @@ -95,6 +97,12 @@ void Manager::KillThreads() (*i)->Kill(); } +void Manager::KillThread(BasicThread* thread) + { + DBG_LOG(DBG_THREADING, "Killing thread %s ...", thread->Name()); + thread->Kill(); + } + void Manager::Process() { bool do_beat = false; @@ -114,7 +122,7 @@ void Manager::Process() if ( do_beat ) t->Heartbeat(); - while ( t->HasOut() ) + while ( t->HasOut() && ! t->Killed() ) { Message* msg = t->RetrieveOut(); diff --git a/src/threading/Manager.h b/src/threading/Manager.h index be81c69ba0..b46a06a46e 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -74,6 +74,16 @@ public: */ void ForceProcessing() { Process(); } + /** + * Signals a specific threads to terminate immediately. + */ + void KillThread(BasicThread* thread); + + /** + * Signals all threads to terminate immediately. + */ + void KillThreads(); + protected: friend class BasicThread; friend class MsgThread; @@ -106,13 +116,6 @@ protected: */ virtual double NextTimestamp(double* network_time); - /** - * Kills all thread immediately. Note that this may cause race conditions - * if a child thread currently holds a lock that might block somebody - * else. - */ - virtual void KillThreads(); - /** * Part of the IOSource interface. */ diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 48c7253885..e0f3fd8b0c 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -70,6 +70,16 @@ private: Type type; }; +// A message from the the child to the main process, requesting suicide. +class KillMeMessage : public OutputMessage +{ +public: + KillMeMessage(MsgThread* thread) + : OutputMessage("ReporterMessage", thread) {} + + virtual bool Process() { thread_mgr->KillThread(Object()); return true; } +}; + #ifdef DEBUG // A debug message from the child to be passed on to the DebugLogger. class DebugMessage : public OutputMessage @@ -346,16 +356,20 @@ void MsgThread::Run() if ( ! result ) { - string s = Fmt("%s failed, terminating thread (MsgThread)", Name()); - Error(s.c_str()); - break; + Error("terminating thread"); + + // This will eventually kill this thread, but only + // after all other outgoing messages (in particular + // error messages have been processed by then main + // thread). + SendOut(new KillMeMessage(this)); } } // In case we haven't send the finish method yet, do it now. Reading // global network_time here should be fine, it isn't changing // anymore. - if ( ! finished ) + if ( ! finished && ! Killed() ) { OnFinish(network_time); Finished(); From 86ae7d8b7c6500cde05fd478ea4f011168c25aec Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 26 Jul 2012 16:38:03 -0700 Subject: [PATCH 542/651] Test for input framework failing to find a file. The output isn't the nicest yet ... --- .../bro..stderr | 5 ++++ .../base/frameworks/input/missing-file.bro | 30 +++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.missing-file/bro..stderr create mode 100644 testing/btest/scripts/base/frameworks/input/missing-file.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.missing-file/bro..stderr b/testing/btest/Baseline/scripts.base.frameworks.input.missing-file/bro..stderr new file mode 100644 index 0000000000..4380007b93 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.missing-file/bro..stderr @@ -0,0 +1,5 @@ +error: does-not-exist.dat/Input::READER_ASCII: Init: cannot open does-not-exist.dat +error: does-not-exist.dat/Input::READER_ASCII: Init failed +warning: Stream input is already queued for removal. Ignoring remove. +error: does-not-exist.dat/Input::READER_ASCII: terminating thread +received termination signal diff --git a/testing/btest/scripts/base/frameworks/input/missing-file.bro b/testing/btest/scripts/base/frameworks/input/missing-file.bro new file mode 100644 index 0000000000..269e287acc --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/missing-file.bro @@ -0,0 +1,30 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff bro/.stderr + +@load frameworks/communication/listen + +global outfile: file; +global try: count; + +module A; + +type Val: record { + i: int; + b: bool; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) + { + } + +event bro_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="does-not-exist.dat", $name="input", $fields=Val, $ev=line]); + Input::remove("input"); + } From f5862fb01408884079b84467cf139aad6046e3f1 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 26 Jul 2012 17:15:10 -0700 Subject: [PATCH 543/651] Preventing writers/readers from receiving further messages after a failure. Once a writer/reader Do* method has returned false, no further ones will be executed anymore. This is primarily a safety mechanism to make it easier for writer/reader authors as otherwise they would often need to track the failure state themselves (because with the now delayed termination from the earlier commit, furhter messages can now still arrive for a little bit). --- CHANGES | 13 +++++++++++++ VERSION | 2 +- src/input/ReaderBackend.cc | 13 ++++++++++++- src/logging/WriterBackend.cc | 29 +++++++++++++++++++++++++---- src/logging/WriterBackend.h | 2 ++ src/logging/writers/Ascii.cc | 2 +- src/threading/MsgThread.cc | 2 ++ src/threading/MsgThread.h | 7 +++++++ 8 files changed, 63 insertions(+), 7 deletions(-) diff --git a/CHANGES b/CHANGES index 3fe0fa2b73..44a3edc3c6 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,17 @@ +2.0-891 | 2012-07-26 17:15:10 -0700 + + * Reader/writer API: preventing plugins from receiving further + messages after a failure. (Robin Sommer) + + * New test for input framework that fails to find a file. (Robin + Sommer) + + * Improving error handling for threads. (Robin Sommer) + + * Tweaking the custom-rotate test to produce stable output. (Robin + Sommer) + 2.0-884 | 2012-07-26 14:33:21 -0700 * Add comprehensive error handling for close() calls. (Jon Siwek) diff --git a/VERSION b/VERSION index ced5c78870..b97bde7b8d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-884 +2.0-891 diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 88a78c3cd7..81060be7d5 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -191,6 +191,9 @@ void ReaderBackend::SendEntry(Value* *vals) bool ReaderBackend::Init(const int arg_num_fields, const threading::Field* const* arg_fields) { + if ( Failed() ) + return true; + num_fields = arg_num_fields; fields = arg_fields; @@ -210,7 +213,9 @@ bool ReaderBackend::Init(const int arg_num_fields, bool ReaderBackend::OnFinish(double network_time) { - DoClose(); + if ( ! Failed() ) + DoClose(); + disabled = true; // frontend disables itself when it gets the Close-message. SendOut(new ReaderClosedMessage(frontend)); @@ -231,6 +236,9 @@ bool ReaderBackend::Update() if ( disabled ) return false; + if ( Failed() ) + return true; + bool success = DoUpdate(); if ( ! success ) DisableFrontend(); @@ -248,6 +256,9 @@ void ReaderBackend::DisableFrontend() bool ReaderBackend::OnHeartbeat(double network_time, double current_time) { + if ( Failed() ) + return true; + return DoHeartbeat(network_time, current_time); } diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 2933062eff..afdc4b99c5 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -174,6 +174,9 @@ bool WriterBackend::Init(int arg_num_fields, const Field* const* arg_fields) num_fields = arg_num_fields; fields = arg_fields; + if ( Failed() ) + return true; + if ( ! DoInit(*info, arg_num_fields, arg_fields) ) { DisableFrontend(); @@ -222,12 +225,15 @@ bool WriterBackend::Write(int arg_num_fields, int num_writes, Value*** vals) bool success = true; - for ( int j = 0; j < num_writes; j++ ) + if ( ! Failed() ) { - success = DoWrite(num_fields, fields, vals[j]); + for ( int j = 0; j < num_writes; j++ ) + { + success = DoWrite(num_fields, fields, vals[j]); - if ( ! success ) - break; + if ( ! success ) + break; + } } DeleteVals(num_writes, vals); @@ -244,6 +250,9 @@ bool WriterBackend::SetBuf(bool enabled) // No change. return true; + if ( Failed() ) + return true; + buffering = enabled; if ( ! DoSetBuf(enabled) ) @@ -258,6 +267,9 @@ bool WriterBackend::SetBuf(bool enabled) bool WriterBackend::Rotate(const char* rotated_path, double open, double close, bool terminating) { + if ( Failed() ) + return true; + if ( ! DoRotate(rotated_path, open, close, terminating) ) { DisableFrontend(); @@ -269,6 +281,9 @@ bool WriterBackend::Rotate(const char* rotated_path, double open, bool WriterBackend::Flush(double network_time) { + if ( Failed() ) + return true; + if ( ! DoFlush(network_time) ) { DisableFrontend(); @@ -280,11 +295,17 @@ bool WriterBackend::Flush(double network_time) bool WriterBackend::OnFinish(double network_time) { + if ( Failed() ) + return true; + return DoFinish(network_time); } bool WriterBackend::OnHeartbeat(double network_time, double current_time) { + if ( Failed() ) + return true; + SendOut(new FlushWriteBufferMessage(frontend)); return DoHeartbeat(network_time, current_time); } diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index d5f2be225e..77dbe71f45 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -182,6 +182,8 @@ public: /** * Disables the frontend that has instantiated this backend. Once * disabled,the frontend will not send any further message over. + * + * TODO: Do we still need this method (and the corresponding message)? */ void DisableFrontend(); diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 0ccdd1f569..c471b3db0c 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -92,7 +92,7 @@ void Ascii::CloseFile(double t) bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * fields) { - assert(! fd); + assert(! fd); string path = info.path; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index e0f3fd8b0c..6c63c5a287 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -154,6 +154,7 @@ MsgThread::MsgThread() : BasicThread(), queue_in(this, 0), queue_out(0, this) { cnt_sent_in = cnt_sent_out = 0; finished = false; + failed = false; thread_mgr->AddMsgThread(this); } @@ -363,6 +364,7 @@ void MsgThread::Run() // error messages have been processed by then main // thread). SendOut(new KillMeMessage(this)); + failed = true; } } diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index da505de6be..e3e7c8500f 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -201,6 +201,12 @@ protected: */ void HeartbeatInChild(); + /** Returns true if a child command has reported a failure. In that case, we'll + * be in the process of killing this thread and no further activity + * should carried out. To be called only from this child thread. + */ + bool Failed() const { return failed; } + /** * Regulatly triggered for execution in the child thread. * @@ -294,6 +300,7 @@ private: uint64_t cnt_sent_out; // Counts message sent by child. bool finished; // Set to true by Finished message. + bool failed; // Set to true when a command failed. }; /** From 76ea1823877677612e159c54edf1958898e7ceb2 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 26 Jul 2012 21:13:49 -0700 Subject: [PATCH 544/651] make want_record=T the default for events --- scripts/base/frameworks/input/main.bro | 2 +- testing/btest/scripts/base/frameworks/input/binary.bro | 2 +- testing/btest/scripts/base/frameworks/input/event.bro | 2 +- testing/btest/scripts/base/frameworks/input/executeraw.bro | 2 +- testing/btest/scripts/base/frameworks/input/raw.bro | 2 +- testing/btest/scripts/base/frameworks/input/rereadraw.bro | 2 +- testing/btest/scripts/base/frameworks/input/streamraw.bro | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index c31f92dba5..7f015402bc 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -84,7 +84,7 @@ export { ## If want_record if false (default), the event receives each value in fields as a seperate argument. ## If it is set to true, the event receives all fields in a signle record value. - want_record: bool &default=F; + want_record: bool &default=T; ## The event that is rised each time a new line is received from the reader. ## The event will receive an Input::Event enum as the first element, and the fields as the following arguments. diff --git a/testing/btest/scripts/base/frameworks/input/binary.bro b/testing/btest/scripts/base/frameworks/input/binary.bro index 86e02196b5..ce7f66a01d 100644 --- a/testing/btest/scripts/base/frameworks/input/binary.bro +++ b/testing/btest/scripts/base/frameworks/input/binary.bro @@ -51,6 +51,6 @@ event bro_init() { try = 0; outfile = open("../out"); - Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line]); + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=F]); Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro index f07ca0c43e..d0088472e7 100644 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ b/testing/btest/scripts/base/frameworks/input/event.bro @@ -49,6 +49,6 @@ event bro_init() { try = 0; outfile = open("../out"); - Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line]); + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=F]); Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/executeraw.bro b/testing/btest/scripts/base/frameworks/input/executeraw.bro index 222b4256d1..626b9cdfd2 100644 --- a/testing/btest/scripts/base/frameworks/input/executeraw.bro +++ b/testing/btest/scripts/base/frameworks/input/executeraw.bro @@ -37,6 +37,6 @@ event line(description: Input::EventDescription, tpe: Input::Event, s: string) event bro_init() { outfile = open("../out.tmp"); - Input::add_event([$source="wc -l ../input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line]); + Input::add_event([$source="wc -l ../input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line, $want_record=F]); Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/raw.bro b/testing/btest/scripts/base/frameworks/input/raw.bro index cb19213173..d15aec22bb 100644 --- a/testing/btest/scripts/base/frameworks/input/raw.bro +++ b/testing/btest/scripts/base/frameworks/input/raw.bro @@ -44,6 +44,6 @@ event bro_init() { try = 0; outfile = open("../out"); - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/rereadraw.bro b/testing/btest/scripts/base/frameworks/input/rereadraw.bro index 1051351c2b..2fdcdc8f9e 100644 --- a/testing/btest/scripts/base/frameworks/input/rereadraw.bro +++ b/testing/btest/scripts/base/frameworks/input/rereadraw.bro @@ -44,7 +44,7 @@ event bro_init() { try = 0; outfile = open("../out"); - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::REREAD, $name="input", $fields=Val, $ev=line]); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::REREAD, $name="input", $fields=Val, $ev=line, $want_record=F]); Input::force_update("input"); Input::remove("input"); } diff --git a/testing/btest/scripts/base/frameworks/input/streamraw.bro b/testing/btest/scripts/base/frameworks/input/streamraw.bro index a6aba88c5f..3bc06f7dea 100644 --- a/testing/btest/scripts/base/frameworks/input/streamraw.bro +++ b/testing/btest/scripts/base/frameworks/input/streamraw.bro @@ -58,5 +58,5 @@ event bro_init() { outfile = open("../out"); try = 0; - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line]); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); } From f02ed65878b81dfde81c2483887223bab99ad2e8 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 26 Jul 2012 21:51:29 -0700 Subject: [PATCH 545/651] Fix crash when encountering an InterpreterException in a predicate in logging or input Framework. Inputframework: did not contain any error handling for this case. Logging framework: tried to catch the interpreter-exception. However the exception already was caught by the call-function and not propagated. Instead, call returns a 0-pointer in this case, which lead to a segmentation fault. --- src/input/Manager.cc | 9 ++++++--- src/logging/Manager.cc | 21 ++++++--------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 40e3c413bb..d278933125 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1544,7 +1544,7 @@ bool Manager::Delete(ReaderFrontend* reader, Value* *vals) bool Manager::CallPred(Func* pred_func, const int numvals, ...) { - bool result; + bool result = false; val_list vl(numvals); va_list lP; @@ -1555,8 +1555,11 @@ bool Manager::CallPred(Func* pred_func, const int numvals, ...) va_end(lP); Val* v = pred_func->Call(&vl); - result = v->AsBool(); - Unref(v); + if ( v ) + { + result = v->AsBool(); + Unref(v); + } return(result); } diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index b1b289a478..6729ec24d2 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -686,16 +686,13 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) int result = 1; - try + Val* v = filter->pred->Call(&vl); + if ( v ) { - Val* v = filter->pred->Call(&vl); result = v->AsBool(); Unref(v); } - catch ( InterpreterException& e ) - { /* Already reported. */ } - if ( ! result ) continue; } @@ -726,12 +723,9 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) Val* v = 0; - try - { - v = filter->path_func->Call(&vl); - } + v = filter->path_func->Call(&vl); - catch ( InterpreterException& e ) + if ( !v ) { return false; } @@ -1381,16 +1375,13 @@ bool Manager::FinishedRotation(WriterFrontend* writer, const char* new_name, con int result = 0; - try + Val* v = func->Call(&vl); + if ( v ) { - Val* v = func->Call(&vl); result = v->AsBool(); Unref(v); } - catch ( InterpreterException& e ) - { /* Already reported. */ } - return result; } From a3798070da5dbfd95469c784a6fcae5efdf8203a Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 27 Jul 2012 07:33:04 -0700 Subject: [PATCH 546/651] update input framework documentation to reflect want_record change. --- scripts/base/frameworks/input/main.bro | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 7f015402bc..55da6ae7ec 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -82,11 +82,11 @@ export { ## Record describing the fields to be retrieved from the source input. fields: any; - ## If want_record if false (default), the event receives each value in fields as a seperate argument. - ## If it is set to true, the event receives all fields in a signle record value. + ## If want_record if false, the event receives each value in fields as a separate argument. + ## If it is set to true (default), the event receives all fields in a single record value. want_record: bool &default=T; - ## The event that is rised each time a new line is received from the reader. + ## The event that is raised each time a new line is received from the reader. ## The event will receive an Input::Event enum as the first element, and the fields as the following arguments. ev: any; From 2a9993619f6637ac6afcb8a6e4fd3afcba34a676 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 27 Jul 2012 13:49:49 -0400 Subject: [PATCH 547/651] Script-level rotation postprocessor fix. - This fixes a problem with writers that don't have a postprocessor. Jon is still looking into the rotation problem in the core. --- scripts/base/frameworks/logging/main.bro | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index 79c9884f9d..db79324d0d 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -341,8 +341,9 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool { if ( info$writer in default_rotation_postprocessors ) return default_rotation_postprocessors[info$writer](info); - - return F; + else + # Return T by default so that postprocessor-less writers don't shutdown. + return T; } function default_path_func(id: ID, path: string, rec: any) : string From 76520645bb6e134e28adab59d9af93129150db3f Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 27 Jul 2012 13:51:03 -0400 Subject: [PATCH 548/651] Small (potential performance) improvement for logging framework. --- scripts/base/frameworks/logging/main.bro | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index db79324d0d..c29215fd86 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -348,16 +348,16 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool function default_path_func(id: ID, path: string, rec: any) : string { + # The suggested path value is a previous result of this function + # or a filter path explicitly set by the user, so continue using it. + if ( path != "" ) + return path; + local id_str = fmt("%s", id); local parts = split1(id_str, /::/); if ( |parts| == 2 ) { - # The suggested path value is a previous result of this function - # or a filter path explicitly set by the user, so continue using it. - if ( path != "" ) - return path; - # Example: Notice::LOG -> "notice" if ( parts[2] == "LOG" ) { From 1fd0d7a607ddfc2b06a82aa085abcd082841463b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 27 Jul 2012 12:15:21 -0700 Subject: [PATCH 549/651] Changing the start/end markers in logs to open/close now reflecting wall clock. Triggers lots of (simple) baseline updates. --- NEWS | 6 +-- src/logging/writers/Ascii.cc | 19 +++++---- src/logging/writers/Ascii.h | 2 +- testing/btest/Baseline/core.checksums/bad.out | 40 +++++++++---------- .../btest/Baseline/core.checksums/good.out | 28 ++++++------- .../core.disable-mobile-ipv6/weird.log | 4 +- .../Baseline/core.expr-exception/reporter.log | 4 +- testing/btest/Baseline/core.ipv6-frag/dns.log | 4 +- .../Baseline/core.print-bpf-filters/conn.log | 4 +- .../Baseline/core.print-bpf-filters/output | 24 +++++------ testing/btest/Baseline/core.truncation/output | 16 ++++---- .../Baseline/core.tunnels.ayiya/conn.log | 4 +- .../Baseline/core.tunnels.ayiya/http.log | 4 +- .../Baseline/core.tunnels.ayiya/tunnel.log | 4 +- .../core.tunnels.false-teredo/dpd.log | 4 +- .../core.tunnels.false-teredo/weird.log | 4 +- .../Baseline/core.tunnels.teredo/conn.log | 4 +- .../Baseline/core.tunnels.teredo/http.log | 4 +- .../Baseline/core.tunnels.teredo/tunnel.log | 4 +- .../conn.log | 4 +- .../http.log | 4 +- .../tunnel.log | 4 +- .../weird.log | 4 +- .../btest/Baseline/core.vlan-mpls/conn.log | 4 +- .../canonified_loaded_scripts.log | 4 +- .../canonified_loaded_scripts.log | 4 +- .../istate.events-ssl/receiver.http.log | 4 +- .../istate.events-ssl/sender.http.log | 4 +- .../Baseline/istate.events/receiver.http.log | 4 +- .../Baseline/istate.events/sender.http.log | 4 +- .../send.log | 4 +- .../ssh-new-default.log | 4 +- .../ssh.log | 4 +- .../test.log | 4 +- .../http.log | 4 +- .../test.log | 4 +- .../ssh.log | 12 +++--- .../test.log | 4 +- .../test.log | 4 +- .../ssh.log | 4 +- .../ssh.log | 4 +- .../ssh.log | 4 +- .../ssh.log | 4 +- .../ssh.log | 4 +- .../ssh.log | 4 +- .../local.log | 4 +- .../remote.log | 4 +- .../output | 28 ++++++------- .../test.failure.log | 4 +- .../test.success.log | 4 +- .../receiver.test.log | 4 +- .../sender.test.failure.log | 4 +- .../sender.test.log | 4 +- .../sender.test.success.log | 4 +- .../ssh.failure.log | 4 +- .../ssh.log | 4 +- .../out | 22 +--------- .../out | 40 +++++++++---------- .../output | 4 +- .../ssh.log | 4 +- .../ssh.log | 4 +- .../testing.log | 4 +- .../ssh.log | 4 +- .../http-2-2.log | 4 +- .../http-2.log | 4 +- .../http-3.log | 4 +- .../http.log | 4 +- .../reporter.log | 4 +- .../manager-1.metrics.log | 4 +- .../metrics.log | 4 +- .../manager-1.notice.log | 4 +- .../notice.log | 4 +- .../manager-1.notice.log | 4 +- .../manager-1.notice.log | 4 +- .../notice.log | 4 +- .../conn.log | 4 +- .../ftp.log | 4 +- .../conn.log | 4 +- .../ftp.log | 4 +- .../http.log | 4 +- .../http.log | 4 +- .../http.log | 4 +- .../http.log | 4 +- .../scripts.base.protocols.irc.basic/irc.log | 4 +- .../irc.log | 4 +- .../smtp.log | 4 +- .../smtp_entities.log | 4 +- .../smtp_entities.log | 4 +- .../socks.log | 4 +- .../tunnel.log | 4 +- .../socks.log | 4 +- .../tunnel.log | 4 +- .../tunnel.log | 4 +- .../scripts.base.protocols.ssl.basic/ssl.log | 4 +- .../knownhosts-all.log | 4 +- .../knownhosts-local.log | 4 +- .../knownhosts-remote.log | 4 +- .../knownservices-all.log | 4 +- .../knownservices-local.log | 4 +- .../knownservices-remote.log | 4 +- .../dns.log | 4 +- testing/scripts/diff-remove-timestamps | 2 +- 102 files changed, 294 insertions(+), 305 deletions(-) mode change 100755 => 100644 testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log diff --git a/NEWS b/NEWS index 00aeb62132..7b60a05ccd 100644 --- a/NEWS +++ b/NEWS @@ -152,9 +152,9 @@ the full set. understands. - ASCII logs now record the time when they were opened/closed at the - beginning and end of the file, respectively. The options - LogAscii::header_prefix and LogAscii::include_header have been - renamed to LogAscii::meta_prefix and LogAscii::include_meta, + beginning and end of the file, respectively (wall clock). The + options LogAscii::header_prefix and LogAscii::include_header have + been renamed to LogAscii::meta_prefix and LogAscii::include_meta, respectively. - The ASCII writers "header_*" options have been renamed to "meta_*" diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index c471b3db0c..c4c6b06563 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -81,10 +81,7 @@ void Ascii::CloseFile(double t) return; if ( include_meta ) - { - string ts = t ? Timestamp(t) : string(""); - WriteHeaderField("end", ts); - } + WriteHeaderField("close", Timestamp(0)); safe_close(fd); fd = 0; @@ -124,8 +121,6 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * if ( ! safe_write(fd, str.c_str(), str.length()) ) goto write_error; - string ts = Timestamp(info.network_time); - if ( ! (WriteHeaderField("set_separator", get_escaped_string( string(set_separator, set_separator_len), false)) && WriteHeaderField("empty_field", get_escaped_string( @@ -133,7 +128,7 @@ bool Ascii::DoInit(const WriterInfo& info, int num_fields, const Field* const * WriteHeaderField("unset_field", get_escaped_string( string(unset_field, unset_field_len), false)) && WriteHeaderField("path", get_escaped_string(path, false)) && - WriteHeaderField("start", ts)) ) + WriteHeaderField("open", Timestamp(0))) ) goto write_error; for ( int i = 0; i < num_fields; ++i ) @@ -419,6 +414,16 @@ string Ascii::Timestamp(double t) { time_t teatime = time_t(t); + if ( ! teatime ) + { + // Use wall clock. + struct timeval tv; + if ( gettimeofday(&tv, 0) < 0 ) + Error("gettimeofday failed"); + else + teatime = tv.tv_sec; + } + struct tm tmbuf; struct tm* tm = localtime_r(&teatime, &tmbuf); diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/Ascii.h index cb82860cb7..cf0190aa80 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/Ascii.h @@ -35,7 +35,7 @@ private: bool DoWriteOne(ODesc* desc, threading::Value* val, const threading::Field* field); bool WriteHeaderField(const string& key, const string& value); void CloseFile(double t); - string Timestamp(double t); + string Timestamp(double t); // Uses current time if t is zero. int fd; string fname; diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index de4538e32b..94b141c9e1 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -3,101 +3,101 @@ #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-18-03-01 +#open 2012-03-26-18-03-01 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332784981.078396 - - - - - bad_IP_checksum - F bro -#end 2012-03-26-18-03-01 +#close 2012-03-26-18-03-01 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-18-01-25 +#open 2012-03-26-18-01-25 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332784885.686428 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 80 bad_TCP_checksum - F bro -#end 2012-03-26-18-01-25 +#close 2012-03-26-18-01-25 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-18-02-13 +#open 2012-03-26-18-02-13 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332784933.501023 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 13000 bad_UDP_checksum - F bro -#end 2012-03-26-18-02-13 +#close 2012-03-26-18-02-13 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-29-23 +#open 2012-04-10-16-29-23 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075363.536871 UWkUyAuUGXf 192.168.1.100 8 192.168.1.101 0 bad_ICMP_checksum - F bro -#end 2012-04-10-16-29-23 +#close 2012-04-10-16-29-23 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-18-06-50 +#open 2012-03-26-18-06-50 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332785210.013051 - - - - - routing0_hdr - F bro 1332785210.013051 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 80 bad_TCP_checksum - F bro -#end 2012-03-26-18-06-50 +#close 2012-03-26-18-06-50 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-17-23-00 +#open 2012-03-26-17-23-00 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332782580.798420 - - - - - routing0_hdr - F bro 1332782580.798420 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 13000 bad_UDP_checksum - F bro -#end 2012-03-26-17-23-00 +#close 2012-03-26-17-23-00 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-25-11 +#open 2012-04-10-16-25-11 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075111.800086 - - - - - routing0_hdr - F bro 1334075111.800086 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:78:1:32::1 129 bad_ICMP_checksum - F bro -#end 2012-04-10-16-25-11 +#close 2012-04-10-16-25-11 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-18-07-30 +#open 2012-03-26-18-07-30 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332785250.469132 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F bro -#end 2012-03-26-18-07-30 +#close 2012-03-26-18-07-30 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-17-02-22 +#open 2012-03-26-17-02-22 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332781342.923813 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F bro -#end 2012-03-26-17-02-22 +#close 2012-03-26-17-02-22 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-22-19 +#open 2012-04-10-16-22-19 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro -#end 2012-04-10-16-22-19 +#close 2012-04-10-16-22-19 diff --git a/testing/btest/Baseline/core.checksums/good.out b/testing/btest/Baseline/core.checksums/good.out index ed6c071ffc..a47931a15c 100644 --- a/testing/btest/Baseline/core.checksums/good.out +++ b/testing/btest/Baseline/core.checksums/good.out @@ -3,68 +3,68 @@ #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-22-19 +#open 2012-04-10-16-22-19 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro -#end 2012-04-10-16-22-19 +#close 2012-04-10-16-22-19 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-18-05-25 +#open 2012-03-26-18-05-25 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332785125.596793 - - - - - routing0_hdr - F bro -#end 2012-03-26-18-05-25 +#close 2012-03-26-18-05-25 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-03-26-17-21-48 +#open 2012-03-26-17-21-48 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1332782508.592037 - - - - - routing0_hdr - F bro -#end 2012-03-26-17-21-48 +#close 2012-03-26-17-21-48 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-23-47 +#open 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro -#end 2012-04-10-16-23-47 +#close 2012-04-10-16-23-47 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-23-47 +#open 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro -#end 2012-04-10-16-23-47 +#close 2012-04-10-16-23-47 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-23-47 +#open 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro -#end 2012-04-10-16-23-47 +#close 2012-04-10-16-23-47 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-16-23-47 +#open 2012-04-10-16-23-47 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334075027.053380 - - - - - routing0_hdr - F bro -#end 2012-04-10-16-23-47 +#close 2012-04-10-16-23-47 diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log index d29456f75f..9da1a8d3ba 100644 --- a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log +++ b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path weird -#start 2012-04-05-21-56-51 +#open 2012-04-05-21-56-51 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1333663011.602839 - - - - - unknown_protocol_135 - F bro -#end 2012-04-05-21-56-51 +#close 2012-04-05-21-56-51 diff --git a/testing/btest/Baseline/core.expr-exception/reporter.log b/testing/btest/Baseline/core.expr-exception/reporter.log index f9e33d9718..d6e07b42b3 100644 --- a/testing/btest/Baseline/core.expr-exception/reporter.log +++ b/testing/btest/Baseline/core.expr-exception/reporter.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path reporter -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields ts level message location #types time enum string string 1300475168.783842 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 @@ -15,4 +15,4 @@ 1300475168.954761 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 1300475168.962628 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 1300475169.780331 Reporter::ERROR field value missing [c$ftp] /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/core.ipv6-frag/dns.log b/testing/btest/Baseline/core.ipv6-frag/dns.log index 2003d1f253..d763fc4fee 100644 --- a/testing/btest/Baseline/core.ipv6-frag/dns.log +++ b/testing/btest/Baseline/core.ipv6-frag/dns.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path dns -#start 2012-03-07-01-37-58 +#open 2012-03-07-01-37-58 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] 1331084278.438444 UWkUyAuUGXf 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51850 2607:f740:b::f93 53 udp 3903 txtpadding_323.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 1331084293.592245 arKYeMETxOg 2001:470:1f11:81f:d138:5f55:6d4:1fe2 51851 2607:f740:b::f93 53 udp 40849 txtpadding_3230.n1.netalyzr.icsi.berkeley.edu 1 C_INTERNET 16 TXT 0 NOERROR T F T F 0 This TXT record should be ignored 1.000000 -#end 2012-03-07-01-38-18 +#close 2012-03-07-01-38-18 diff --git a/testing/btest/Baseline/core.print-bpf-filters/conn.log b/testing/btest/Baseline/core.print-bpf-filters/conn.log index 4033b64e2a..0fd86b8dc4 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/conn.log +++ b/testing/btest/Baseline/core.print-bpf-filters/conn.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path conn -#start 2005-10-07-23-23-57 +#open 2005-10-07-23-23-57 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1128727435.450898 UWkUyAuUGXf 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) -#end 2005-10-07-23-23-57 +#close 2005-10-07-23-23-57 diff --git a/testing/btest/Baseline/core.print-bpf-filters/output b/testing/btest/Baseline/core.print-bpf-filters/output index e4bc04192a..c55952ffed 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output +++ b/testing/btest/Baseline/core.print-bpf-filters/output @@ -3,38 +3,38 @@ #empty_field (empty) #unset_field - #path packet_filter -#start 1970-01-01-00-00-00 +#open 2012-07-27-19-14-29 #fields ts node filter init success #types time string string bool bool -1342748953.570646 - ip or not ip T T -#end +1343416469.508262 - ip or not ip T T +#close 2012-07-27-19-14-29 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter -#start 1970-01-01-00-00-00 +#open 2012-07-27-19-14-29 #fields ts node filter init success #types time string string bool bool -1342748953.898675 - (((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (tcp port 1080)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T -#end +1343416469.888870 - (((((((((((((((((((((((((port 53) or (tcp port 989)) or (tcp port 443)) or (port 6669)) or (udp and port 5353)) or (port 6668)) or (tcp port 1080)) or (udp and port 5355)) or (tcp port 22)) or (tcp port 995)) or (port 21)) or (tcp port 25 or tcp port 587)) or (port 6667)) or (tcp port 614)) or (tcp port 990)) or (udp port 137)) or (tcp port 993)) or (tcp port 5223)) or (port 514)) or (tcp port 585)) or (tcp port 992)) or (tcp port 563)) or (tcp port 994)) or (tcp port 636)) or (tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888))) or (port 6666) T T +#close 2012-07-27-19-14-29 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter -#start 1970-01-01-00-00-00 +#open 2012-07-27-19-14-30 #fields ts node filter init success #types time string string bool bool -1342748954.278211 - port 42 T T -#end +1343416470.252918 - port 42 T T +#close 2012-07-27-19-14-30 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter -#start 1970-01-01-00-00-00 +#open 2012-07-27-19-14-30 #fields ts node filter init success #types time string string bool bool -1342748954.883780 - port 56730 T T -#end 2005-10-07-23-23-57 +1343416470.614962 - port 56730 T T +#close 2012-07-27-19-14-30 diff --git a/testing/btest/Baseline/core.truncation/output b/testing/btest/Baseline/core.truncation/output index 836f9170d4..9243c2f873 100644 --- a/testing/btest/Baseline/core.truncation/output +++ b/testing/btest/Baseline/core.truncation/output @@ -3,38 +3,38 @@ #empty_field (empty) #unset_field - #path weird -#start 2012-04-11-16-01-35 +#open 2012-04-11-16-01-35 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334160095.895421 - - - - - truncated_IP - F bro -#end 2012-04-11-16-01-35 +#close 2012-04-11-16-01-35 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-11-14-57-21 +#open 2012-04-11-14-57-21 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334156241.519125 - - - - - truncated_IP - F bro -#end 2012-04-11-14-57-21 +#close 2012-04-11-14-57-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-04-10-21-50-48 +#open 2012-04-10-21-50-48 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1334094648.590126 - - - - - truncated_IP - F bro -#end 2012-04-10-21-50-48 +#close 2012-04-10-21-50-48 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#start 2012-05-29-22-02-34 +#open 2012-05-29-22-02-34 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1338328954.078361 - - - - - internally_truncated_header - F bro -#end 2012-05-29-22-02-34 +#close 2012-05-29-22-02-34 diff --git a/testing/btest/Baseline/core.tunnels.ayiya/conn.log b/testing/btest/Baseline/core.tunnels.ayiya/conn.log index 82a3828f0d..7646fa574a 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/conn.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#start 2009-11-08-04-41-57 +#open 2009-11-08-04-41-57 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1257655301.595604 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 tcp http 2.101052 2981 4665 S1 - 0 ShADad 10 3605 11 5329 k6kgXLOoSKl @@ -14,4 +14,4 @@ 1257655296.585188 TEfuqmmG4bh fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff00:2 130 icmp - 0.919988 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl 1257655296.585151 j4u32Pc5bif fe80::216:cbff:fe9a:4cb9 131 ff02::2:f901:d225 130 icmp - 0.719947 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl 1257655296.585034 nQcgTWjvg4c fe80::216:cbff:fe9a:4cb9 131 ff02::1:ff9a:4cb9 130 icmp - 4.922880 32 0 OTH - 0 - 2 144 0 0 k6kgXLOoSKl -#end 2009-11-08-04-41-57 +#close 2009-11-08-04-41-57 diff --git a/testing/btest/Baseline/core.tunnels.ayiya/http.log b/testing/btest/Baseline/core.tunnels.ayiya/http.log index 4fbcd508f4..2a97fd9b69 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/http.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/http.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path http -#start 2009-11-08-04-41-41 +#open 2009-11-08-04-41-41 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1257655301.652206 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 10102 200 OK - - - (empty) - - - text/html - - 1257655302.514424 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 2 GET ipv6.google.com /csi?v=3&s=webhp&action=&tran=undefined&e=17259,19771,21517,21766,21887,22212&ei=BUz2Su7PMJTglQfz3NzCAw&rt=prt.77,xjs.565,ol.645 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - 1257655303.603569 5OKnoww6xl4 2001:4978:f:4c::2 53382 2001:4860:b002::68 80 3 GET ipv6.google.com /gen_204?atyp=i&ct=fade&cad=1254&ei=BUz2Su7PMJTglQfz3NzCAw&zx=1257655303600 http://ipv6.google.com/ Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.15pre) Gecko/2009091516 Camino/2.0b4 (like Firefox/3.0.15pre) 0 0 204 No Content - - - (empty) - - - - - - -#end 2009-11-08-04-41-57 +#close 2009-11-08-04-41-57 diff --git a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log index 123ea8a792..60e0a4a108 100644 --- a/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.ayiya/tunnel.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path tunnel -#start 2009-11-08-04-41-33 +#open 2009-11-08-04-41-33 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1257655293.629048 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::AYIYA Tunnel::DISCOVER 1257655296.585034 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::AYIYA Tunnel::DISCOVER 1257655317.464035 k6kgXLOoSKl 192.168.3.101 53859 216.14.98.22 5072 Tunnel::AYIYA Tunnel::CLOSE 1257655317.464035 UWkUyAuUGXf 192.168.3.101 53796 216.14.98.22 5072 Tunnel::AYIYA Tunnel::CLOSE -#end 2009-11-08-04-41-57 +#close 2009-11-08-04-41-57 diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log b/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log index 63a0437445..3300a3ef95 100644 --- a/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log +++ b/testing/btest/Baseline/core.tunnels.false-teredo/dpd.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path dpd -#start 2009-11-18-17-59-51 +#open 2009-11-18-17-59-51 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto analyzer failure_reason #types time string addr port addr port enum string string 1258567191.486869 UWkUyAuUGXf 192.168.1.105 57696 192.168.1.1 53 udp TEREDO Teredo payload length [c\x1d\x81\x80\x00\x01\x00\x02\x00\x02\x00\x00\x04amch\x0equestionmarket\x03com\x00\x00\x01\x00...] @@ -12,4 +12,4 @@ 1258581768.898165 TEfuqmmG4bh 192.168.1.104 50798 192.168.1.1 53 udp TEREDO Teredo payload length [o\xe3\x81\x80\x00\x01\x00\x02\x00\x04\x00\x04\x03www\x0fnashuatelegraph\x03com\x00\x00\x01\x00...] 1258584478.989528 FrJExwHcSal 192.168.1.104 64963 192.168.1.1 53 udp TEREDO Teredo payload length [e\xbd\x81\x80\x00\x01\x00\x08\x00\x06\x00\x06\x08wellness\x05blogs\x04time\x03com\x00\x00\x01\x00...] 1258600683.934672 5OKnoww6xl4 192.168.1.103 59838 192.168.1.1 53 udp TEREDO Teredo payload length [h\xf0\x81\x80\x00\x01\x00\x01\x00\x02\x00\x00\x06update\x0csanasecurity\x03com\x00\x00\x01\x00...] -#end 2009-11-19-03-18-03 +#close 2009-11-19-03-18-03 diff --git a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log index eb4319c7eb..a84d469660 100644 --- a/testing/btest/Baseline/core.tunnels.false-teredo/weird.log +++ b/testing/btest/Baseline/core.tunnels.false-teredo/weird.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path weird -#start 2009-11-18-17-59-51 +#open 2009-11-18-17-59-51 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1258567191.405770 - - - - - truncated_header_in_tunnel - F bro @@ -12,4 +12,4 @@ 1258581768.568451 - - - - - truncated_header_in_tunnel - F bro 1258584478.859853 - - - - - truncated_header_in_tunnel - F bro 1258600683.934458 - - - - - truncated_header_in_tunnel - F bro -#end 2009-11-19-03-18-03 +#close 2009-11-19-03-18-03 diff --git a/testing/btest/Baseline/core.tunnels.teredo/conn.log b/testing/btest/Baseline/core.tunnels.teredo/conn.log index 2342953339..657e86b8b3 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/conn.log +++ b/testing/btest/Baseline/core.tunnels.teredo/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#start 2008-05-16-15-50-57 +#open 2008-05-16-15-50-57 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1210953047.736921 arKYeMETxOg 192.168.2.16 1576 75.126.130.163 80 tcp - 0.000357 0 0 SHR - 0 fA 1 40 1 40 (empty) @@ -27,4 +27,4 @@ 1210953052.324629 FrJExwHcSal fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 TEfuqmmG4bh 1210953060.829303 qCaWGmzFtM5 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.463615 4 4 OTH - 0 - 1 52 1 52 GSxOnSLghOa,nQcgTWjvg4c 1210953052.202579 j4u32Pc5bif fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 nQcgTWjvg4c -#end 2008-05-16-15-51-16 +#close 2008-05-16-15-51-16 diff --git a/testing/btest/Baseline/core.tunnels.teredo/http.log b/testing/btest/Baseline/core.tunnels.teredo/http.log index c0db5fc146..c77297c58d 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/http.log +++ b/testing/btest/Baseline/core.tunnels.teredo/http.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path http -#start 2008-05-16-15-50-58 +#open 2008-05-16-15-50-58 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1210953057.917183 3PKsZ2Uye21 192.168.2.16 1578 75.126.203.78 80 1 POST download913.avast.com /cgi-bin/iavs4stats.cgi - Syncer/4.80 (av_pro-1169;f) 589 0 204 - - - (empty) - - - text/plain - - 1210953061.585996 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - 1210953073.381474 70MGiRM1Qf4 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - 1210953074.674817 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 1 GET www.wireshark.org / http://ipv6.google.com/search?hl=en&q=Wireshark+%21&btnG=Google+Search Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 11845 200 OK - - - (empty) - - - text/xml - - -#end 2008-05-16-15-51-16 +#close 2008-05-16-15-51-16 diff --git a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log index ab14bf68bc..120089caa0 100644 --- a/testing/btest/Baseline/core.tunnels.teredo/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.teredo/tunnel.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path tunnel -#start 2008-05-16-15-50-52 +#open 2008-05-16-15-50-52 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::DISCOVER @@ -12,4 +12,4 @@ 1210953076.058333 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::CLOSE 1210953076.058333 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::CLOSE 1210953076.058333 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::CLOSE -#end 2008-05-16-15-51-16 +#close 2008-05-16-15-51-16 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log index 7b9ff58624..757eaf62ca 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#start 2012-06-19-17-39-37 +#open 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1340127577.354166 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 tcp http 0.052829 1675 10467 S1 - 0 ShADad 10 2279 12 11191 j4u32Pc5bif @@ -13,4 +13,4 @@ 1340127577.339015 nQcgTWjvg4c fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 k6kgXLOoSKl 1340127577.343969 TEfuqmmG4bh 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.007778 4 4 OTH - 0 - 1 52 1 52 UWkUyAuUGXf,j4u32Pc5bif 1340127577.336558 arKYeMETxOg fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 UWkUyAuUGXf -#end 2012-06-19-17-39-37 +#close 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log index 12f0d7be7a..e0b223d114 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/http.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path http -#start 2012-06-19-17-39-37 +#open 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1340127577.361683 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 1 GET ipv6.google.com / - Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 6640 200 OK - - - (empty) - - - text/html - - 1340127577.379360 FrJExwHcSal 2001:0:4137:9e50:8000:f12a:b9c8:2815 1286 2001:4860:0:2001::68 80 2 GET ipv6.google.com /search?hl=en&q=Wireshark+!&btnG=Google+Search http://ipv6.google.com/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b5) Gecko/2008032620 Firefox/3.0b5 0 25119 200 OK - - - (empty) - - - text/html - - -#end 2012-06-19-17-39-37 +#close 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log index 1a14b3edb7..86c2c94c04 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/tunnel.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path tunnel -#start 2012-06-19-17-39-37 +#open 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1340127577.336558 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::DISCOVER @@ -12,4 +12,4 @@ 1340127577.406995 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Tunnel::TEREDO Tunnel::CLOSE 1340127577.406995 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Tunnel::TEREDO Tunnel::CLOSE 1340127577.406995 k6kgXLOoSKl 192.168.2.16 3797 65.55.158.81 3544 Tunnel::TEREDO Tunnel::CLOSE -#end 2012-06-19-17-39-37 +#close 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log index 8b252a5819..4ead29302f 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path weird -#start 2012-06-19-17-39-37 +#open 2012-06-19-17-39-37 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1340127577.346849 UWkUyAuUGXf 192.168.2.16 3797 65.55.158.80 3544 Teredo_bubble_with_payload - F bro 1340127577.349292 j4u32Pc5bif 192.168.2.16 3797 83.170.1.38 32900 Teredo_bubble_with_payload - F bro -#end 2012-06-19-17-39-37 +#close 2012-06-19-17-39-37 diff --git a/testing/btest/Baseline/core.vlan-mpls/conn.log b/testing/btest/Baseline/core.vlan-mpls/conn.log index 72e13ee9b4..d4cc8370a5 100644 --- a/testing/btest/Baseline/core.vlan-mpls/conn.log +++ b/testing/btest/Baseline/core.vlan-mpls/conn.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path conn -#start 2005-10-07-23-23-55 +#open 2005-10-07-23-23-55 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 952109346.874907 UWkUyAuUGXf 10.1.2.1 11001 10.34.0.1 23 tcp - 2.102560 26 0 SH - 0 SADF 11 470 0 0 (empty) 1128727435.450898 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 tcp http 1.733303 98 9417 SF - 0 ShADdFaf 12 730 10 9945 (empty) 1278600802.069419 k6kgXLOoSKl 10.20.80.1 50343 10.0.0.15 80 tcp - 0.004152 9 3429 SF - 0 ShADadfF 7 381 7 3801 (empty) -#end 2010-07-08-14-53-22 +#close 2010-07-08-14-53-22 diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index ca8749956f..41209a4084 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#start 2012-07-20-14-34-11 +#open 2012-07-20-14-34-11 #fields name #types string scripts/base/init-bare.bro @@ -30,4 +30,4 @@ scripts/base/init-bare.bro scripts/base/frameworks/input/./readers/raw.bro scripts/base/frameworks/input/./readers/benchmark.bro scripts/policy/misc/loaded-scripts.bro -#end 2012-07-20-14-34-11 +#close 2012-07-20-14-34-11 diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index b464c916f2..b2afadc0fe 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#start 2012-07-20-14-34-40 +#open 2012-07-20-14-34-40 #fields name #types string scripts/base/init-bare.bro @@ -110,4 +110,4 @@ scripts/base/init-default.bro scripts/base/protocols/syslog/./consts.bro scripts/base/protocols/syslog/./main.bro scripts/policy/misc/loaded-scripts.bro -#end 2012-07-20-14-34-40 +#close 2012-07-20-14-34-40 diff --git a/testing/btest/Baseline/istate.events-ssl/receiver.http.log b/testing/btest/Baseline/istate.events-ssl/receiver.http.log index c9a996ef5b..3fc7f1b66f 100644 --- a/testing/btest/Baseline/istate.events-ssl/receiver.http.log +++ b/testing/btest/Baseline/istate.events-ssl/receiver.http.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path http -#start 2012-07-20-01-53-03 +#open 2012-07-20-01-53-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1342749182.906082 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - -#end 2012-07-20-01-53-04 +#close 2012-07-20-01-53-04 diff --git a/testing/btest/Baseline/istate.events-ssl/sender.http.log b/testing/btest/Baseline/istate.events-ssl/sender.http.log index c9a996ef5b..3fc7f1b66f 100644 --- a/testing/btest/Baseline/istate.events-ssl/sender.http.log +++ b/testing/btest/Baseline/istate.events-ssl/sender.http.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path http -#start 2012-07-20-01-53-03 +#open 2012-07-20-01-53-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1342749182.906082 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - -#end 2012-07-20-01-53-04 +#close 2012-07-20-01-53-04 diff --git a/testing/btest/Baseline/istate.events/receiver.http.log b/testing/btest/Baseline/istate.events/receiver.http.log index 566457b996..6862c08b98 100644 --- a/testing/btest/Baseline/istate.events/receiver.http.log +++ b/testing/btest/Baseline/istate.events/receiver.http.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path http -#start 2012-07-20-01-53-12 +#open 2012-07-20-01-53-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1342749191.765740 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - -#end 2012-07-20-01-53-13 +#close 2012-07-20-01-53-13 diff --git a/testing/btest/Baseline/istate.events/sender.http.log b/testing/btest/Baseline/istate.events/sender.http.log index 566457b996..6862c08b98 100644 --- a/testing/btest/Baseline/istate.events/sender.http.log +++ b/testing/btest/Baseline/istate.events/sender.http.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path http -#start 2012-07-20-01-53-12 +#open 2012-07-20-01-53-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1342749191.765740 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - - -#end 2012-07-20-01-53-13 +#close 2012-07-20-01-53-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log b/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log index 7e21ff86b7..c6a19029b6 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log +++ b/testing/btest/Baseline/scripts.base.frameworks.communication.communication_log_baseline/send.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path communication -#start 2012-07-20-01-49-40 +#open 2012-07-20-01-49-40 #fields ts peer src_name connected_peer_desc connected_peer_addr connected_peer_port level message #types time string string string addr port string string 1342748980.737451 bro parent - - - info [#1/127.0.0.1:47757] added peer @@ -21,4 +21,4 @@ 1342748980.793108 bro parent - - - info terminating... 1342748980.796454 bro child - - - info terminating 1342748980.797536 bro parent - - - info [#1/127.0.0.1:47757] closing connection -#end 2012-07-20-01-49-40 +#close 2012-07-20-01-49-40 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log b/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log index a0359c2d70..655d9a5fbd 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.adapt-filter/ssh-new-default.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path ssh-new-default -#start 2012-07-20-01-49-19 +#open 2012-07-20-01-49-19 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748959.430282 1.2.3.4 1234 2.3.4.5 80 success unknown 1342748959.430282 1.2.3.4 1234 2.3.4.5 80 failure US -#end 2012-07-20-01-49-19 +#close 2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log index 0c826f9694..b2528467a1 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-binary/ssh.log @@ -3,10 +3,10 @@ #empty_field|(empty) #unset_field|- #path|ssh -#start|2012-07-20-01-49-19 +#open|2012-07-20-01-49-19 #fields|data|data2 #types|string|string abc\x0a\xffdef|DATA2 abc\x7c\xffdef|DATA2 abc\xff\x7cdef|DATA2 -#end|2012-07-20-01-49-19 +#close|2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log index b1a4ba52e2..b77541d35e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-notset-str/test.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path test -#start 2012-07-20-01-49-19 +#open 2012-07-20-01-49-19 #fields x y z #types string string string \x2d - (empty) -#end 2012-07-20-01-49-19 +#close 2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log index 683f149317..f1ff4db3b8 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-odd-url/http.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path http -#start 2011-09-12-03-57-36 +#open 2011-09-12-03-57-36 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1315799856.264750 UWkUyAuUGXf 10.0.1.104 64216 193.40.5.162 80 1 GET lepo.it.da.ut.ee /~cect/teoreetilised seminarid_2010/arheoloogia_uurimisr\xfchma_seminar/Joyce et al - The Languages of Archaeology ~ Dialogue, Narrative and Writing.pdf - Wget/1.12 (darwin10.8.0) 0 346 404 Not Found - - - (empty) - - - text/html - - -#end 2011-09-12-03-57-37 +#close 2011-09-12-03-57-37 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log index a03c6f954b..25e9319eec 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape-set-separator/test.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path test -#start 2012-07-20-01-49-19 +#open 2012-07-20-01-49-19 #fields ss #types table[string] CC,AA,\x2c,\x2c\x2c -#end 2012-07-20-01-49-19 +#close 2012-07-20-01-49-19 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log index 0c6a266de0..7a448ce6c1 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log @@ -3,10 +3,12 @@ #empty_field||(empty) #unset_field||- #path||ssh +#open||2012-07-27-19-14-35 #fields||t||id.orig_h||id.orig_p||id.resp_h||id.resp_p||status||country #types||time||addr||port||addr||port||string||string -1342759749.586006||1.2.3.4||1234||2.3.4.5||80||success||unknown -1342759749.586006||1.2.3.4||1234||2.3.4.5||80||failure||US -1342759749.586006||1.2.3.4||1234||2.3.4.5||80||fa\x7c\x7cure||UK -1342759749.586006||1.2.3.4||1234||2.3.4.5||80||su\x7c\x7cess||BR -1342759749.586006||1.2.3.4||1234||2.3.4.5||80||failure||MX +1343416475.837726||1.2.3.4||1234||2.3.4.5||80||success||unknown +1343416475.837726||1.2.3.4||1234||2.3.4.5||80||failure||US +1343416475.837726||1.2.3.4||1234||2.3.4.5||80||fa\x7c\x7cure||UK +1343416475.837726||1.2.3.4||1234||2.3.4.5||80||su\x7c\x7cess||BR +1343416475.837726||1.2.3.4||1234||2.3.4.5||80||failure||MX +#close||2012-07-27-19-14-35 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log index 21b81abf95..0f825462ab 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-line-like-comment/test.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path test -#start 2012-07-20-01-49-22 +#open 2012-07-20-01-49-22 #fields data c #types string count Test1 42 \x23Kaputt 42 Test2 42 -#end 2012-07-20-01-49-22 +#close 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log index 5fba268afa..c644dab007 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-timestamps/test.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path test -#start 2012-07-20-01-49-20 +#open 2012-07-20-01-49-20 #fields data #types time 1234567890.000000 @@ -14,4 +14,4 @@ 1234567890.000010 1234567890.000001 1234567890.000000 -#end 2012-07-20-01-49-20 +#close 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log index 7d3bbc0774..9eb2f0e663 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.attr-extend/ssh.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-20 +#open 2012-07-20-01-49-20 #fields status country a1 b1 b2 #types string string count count count success unknown 1 3 4 -#end 2012-07-20-01-49-20 +#close 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log index c3163dba6f..bcedd1174e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.attr/ssh.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-20 +#open 2012-07-20-01-49-20 #fields status country #types string string success unknown @@ -11,4 +11,4 @@ failure US failure UK success BR failure MX -#end 2012-07-20-01-49-20 +#close 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log index 42f945bf0c..b255ac3489 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.empty-event/ssh.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-20 +#open 2012-07-20-01-49-20 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748960.468458 1.2.3.4 1234 2.3.4.5 80 success unknown @@ -11,4 +11,4 @@ 1342748960.468458 1.2.3.4 1234 2.3.4.5 80 failure UK 1342748960.468458 1.2.3.4 1234 2.3.4.5 80 success BR 1342748960.468458 1.2.3.4 1234 2.3.4.5 80 failure MX -#end 2012-07-20-01-49-20 +#close 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log index 3fe01ff913..f795159a16 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.exclude/ssh.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-20 +#open 2012-07-20-01-49-20 #fields id.orig_p id.resp_h id.resp_p status country #types port addr port string string 1234 2.3.4.5 80 success unknown @@ -11,4 +11,4 @@ 1234 2.3.4.5 80 failure UK 1234 2.3.4.5 80 success BR 1234 2.3.4.5 80 failure MX -#end 2012-07-20-01-49-20 +#close 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log index 205f37243f..34d5f28b82 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.file/ssh.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-20 +#open 2012-07-20-01-49-20 #fields t f #types time file 1342748960.757056 Foo.log -#end 2012-07-20-01-49-20 +#close 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log index cafacf9c4e..8935046687 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.include/ssh.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-20 +#open 2012-07-20-01-49-20 #fields t id.orig_h #types time addr 1342748960.796093 1.2.3.4 @@ -11,4 +11,4 @@ 1342748960.796093 1.2.3.4 1342748960.796093 1.2.3.4 1342748960.796093 1.2.3.4 -#end 2012-07-20-01-49-20 +#close 2012-07-20-01-49-20 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log index 3240e9f824..819b7b9bc2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path local -#start 2011-03-18-19-06-13 +#open 2011-03-18-19-06-13 #fields ts id.orig_h #types time addr 1300475168.859163 141.142.220.118 @@ -36,4 +36,4 @@ 1300475168.902195 141.142.220.118 1300475168.894787 141.142.220.118 1300475168.901749 141.142.220.118 -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log index 84980836c4..41f575ef63 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/remote.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path remote -#start 2011-03-18-19-06-13 +#open 2011-03-18-19-06-13 #fields ts id.orig_h #types time addr 1300475169.780331 173.192.163.128 1300475167.097012 fe80::217:f2ff:fed7:cf65 1300475171.675372 fe80::3074:17d5:2052:c324 1300475173.116749 fe80::3074:17d5:2052:c324 -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output index 1c67ff52b6..c67a12e1d9 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func/output @@ -10,68 +10,68 @@ static-prefix-2-UK.log #empty_field (empty) #unset_field - #path static-prefix-0-BR -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.180156 1.2.3.4 1234 2.3.4.5 80 success BR -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-0-MX3 -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure MX3 -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-0-unknown -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.180156 1.2.3.4 1234 2.3.4.5 80 success unknown -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-1-MX -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure MX -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-1-US -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure US -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-2-MX2 -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure MX2 -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path static-prefix-2-UK -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.180156 1.2.3.4 1234 2.3.4.5 80 failure UK -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log index 96dede8965..a362135318 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.failure.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path test.failure -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.488370 1.2.3.4 1234 2.3.4.5 80 failure US -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log index 85b5ca9f45..dd9c300429 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.pred/test.success.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path test.success -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.488370 1.2.3.4 1234 2.3.4.5 80 success unknown -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log index aa18822daf..13364f8e77 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote-types/receiver.test.log @@ -3,8 +3,8 @@ #empty_field EMPTY #unset_field - #path test -#start 1970-01-01-00-00-00 +#open 1970-01-01-00-00-00 #fields b i e c p sn a d t iv s sc ss se vc ve #types bool int enum count port subnet addr double time interval string table[count] table[string] table[string] vector[count] vector[string] T -42 Test::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1342749004.579242 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY -#end 2012-07-20-01-50-05 +#close 2012-07-20-01-50-05 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log index 36b88e496d..71e1d18c73 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.failure.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path test.failure -#start 2012-07-20-01-50-18 +#open 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure US 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure UK 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure MX -#end 2012-07-20-01-50-18 +#close 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log index 22d354fce4..bc3dac5a1a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path test -#start 2012-07-20-01-50-18 +#open 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success unknown @@ -11,4 +11,4 @@ 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure UK 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success BR 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure MX -#end 2012-07-20-01-50-18 +#close 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log index 888dc424b5..f0b26454b4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remote/sender.test.success.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path test.success -#start 2012-07-20-01-50-18 +#open 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success unknown 1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success BR -#end 2012-07-20-01-50-18 +#close 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log index 5a23ad2066..de324c337f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.failure.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path ssh.failure -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure US 1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure UK -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log index cea1069748..ed0a118cac 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.remove/ssh.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure US 1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure UK 1342748961.521536 1.2.3.4 1234 2.3.4.5 80 failure BR -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out index 19354f8df2..3acce6f1ce 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/out @@ -28,32 +28,14 @@ custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_11.00.05.log, pat custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_11.59.55.log, path=test2, open=1299499195.0, close=1299499205.0, terminating=F] custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.00.05.log, path=test2, open=1299499205.0, close=1299502795.0, terminating=F] custom rotate, [writer=Log::WRITER_ASCII, fname=test2-11-03-07_12.59.55.log, path=test2, open=1299502795.0, close=1299502795.0, terminating=T] +#close 2012-07-27-19-14-39 #empty_field (empty) -#end 2011-03-07-03-59-55 -#end 2011-03-07-04-00-05 -#end 2011-03-07-04-59-55 -#end 2011-03-07-05-00-05 -#end 2011-03-07-05-59-55 -#end 2011-03-07-06-00-05 -#end 2011-03-07-06-59-55 -#end 2011-03-07-07-00-05 -#end 2011-03-07-07-59-55 -#end 2011-03-07-08-00-05 -#end 2011-03-07-08-59-55 -#end 2011-03-07-09-00-05 -#end 2011-03-07-09-59-55 -#end 2011-03-07-10-00-05 -#end 2011-03-07-10-59-55 -#end 2011-03-07-11-00-05 -#end 2011-03-07-11-59-55 -#end 2011-03-07-12-00-05 -#end 2011-03-07-12-59-55 #fields t id.orig_h id.orig_p id.resp_h id.resp_p +#open 2012-07-27-19-14-39 #path test #path test2 #separator \x09 #set_separator , -#start 2011-03-07-03-00-05 #types time addr port addr port #unset_field - 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out index 4764ff23d0..b26d2fcd1b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out @@ -14,117 +14,117 @@ test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 ascii #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 1299470395.000000 10.0.0.2 20 10.0.0.3 0 -#end 2011-03-07-04-00-05 +#close 2011-03-07-04-00-05 > test.2011-03-07-04-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299470405.000000 10.0.0.1 20 10.0.0.2 1025 1299473995.000000 10.0.0.2 20 10.0.0.3 1 -#end 2011-03-07-05-00-05 +#close 2011-03-07-05-00-05 > test.2011-03-07-05-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299474005.000000 10.0.0.1 20 10.0.0.2 1026 1299477595.000000 10.0.0.2 20 10.0.0.3 2 -#end 2011-03-07-06-00-05 +#close 2011-03-07-06-00-05 > test.2011-03-07-06-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299477605.000000 10.0.0.1 20 10.0.0.2 1027 1299481195.000000 10.0.0.2 20 10.0.0.3 3 -#end 2011-03-07-07-00-05 +#close 2011-03-07-07-00-05 > test.2011-03-07-07-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299481205.000000 10.0.0.1 20 10.0.0.2 1028 1299484795.000000 10.0.0.2 20 10.0.0.3 4 -#end 2011-03-07-08-00-05 +#close 2011-03-07-08-00-05 > test.2011-03-07-08-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299484805.000000 10.0.0.1 20 10.0.0.2 1029 1299488395.000000 10.0.0.2 20 10.0.0.3 5 -#end 2011-03-07-09-00-05 +#close 2011-03-07-09-00-05 > test.2011-03-07-09-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299488405.000000 10.0.0.1 20 10.0.0.2 1030 1299491995.000000 10.0.0.2 20 10.0.0.3 6 -#end 2011-03-07-10-00-05 +#close 2011-03-07-10-00-05 > test.2011-03-07-10-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299492005.000000 10.0.0.1 20 10.0.0.2 1031 1299495595.000000 10.0.0.2 20 10.0.0.3 7 -#end 2011-03-07-11-00-05 +#close 2011-03-07-11-00-05 > test.2011-03-07-11-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299495605.000000 10.0.0.1 20 10.0.0.2 1032 1299499195.000000 10.0.0.2 20 10.0.0.3 8 -#end 2011-03-07-12-00-05 +#close 2011-03-07-12-00-05 > test.2011-03-07-12-00-05.log #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path test -#start 2011-03-07-03-00-05 +#open 2011-03-07-03-00-05 #fields t id.orig_h id.orig_p id.resp_h id.resp_p #types time addr port addr port 1299499205.000000 10.0.0.1 20 10.0.0.2 1033 1299502795.000000 10.0.0.2 20 10.0.0.3 9 -#end 2011-03-07-12-59-55 +#close 2011-03-07-12-59-55 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output b/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output index 110cef054a..6ff5237afa 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.stdout/output @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path /dev/stdout -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.732599 1.2.3.4 1234 2.3.4.5 80 success unknown @@ -11,4 +11,4 @@ 1342748961.732599 1.2.3.4 1234 2.3.4.5 80 failure UK 1342748961.732599 1.2.3.4 1234 2.3.4.5 80 success BR 1342748961.732599 1.2.3.4 1234 2.3.4.5 80 failure MX -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log index c9191b666e..d2d484e02f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.test-logging/ssh.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-21 +#open 2012-07-20-01-49-21 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string 1342748961.748481 1.2.3.4 1234 2.3.4.5 80 success unknown @@ -11,4 +11,4 @@ 1342748961.748481 1.2.3.4 1234 2.3.4.5 80 failure UK 1342748961.748481 1.2.3.4 1234 2.3.4.5 80 success BR 1342748961.748481 1.2.3.4 1234 2.3.4.5 80 failure MX -#end 2012-07-20-01-49-21 +#close 2012-07-20-01-49-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log index 1fc29dbb4e..6b75d056cf 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.types/ssh.log @@ -3,8 +3,8 @@ #empty_field EMPTY #unset_field - #path ssh -#start 2012-07-20-01-49-22 +#open 2012-07-20-01-49-22 #fields b i e c p sn a d t iv s sc ss se vc ve f #types bool int enum count port subnet addr double time interval string table[count] table[string] table[string] vector[count] vector[string] func T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1342748962.114672 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -#end 2012-07-20-01-49-22 +#close 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log b/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log index b4089aeee8..0ebe8838ad 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.unset-record/testing.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path testing -#start 2012-07-20-01-49-22 +#open 2012-07-20-01-49-22 #fields a.val1 a.val2 b #types count count count - - 6 1 2 3 -#end 2012-07-20-01-49-22 +#close 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log index ae5d6d246e..3e8e1e737e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.vec/ssh.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path ssh -#start 2012-07-20-01-49-22 +#open 2012-07-20-01-49-22 #fields vec #types vector[string] -,2,-,-,5 -#end 2012-07-20-01-49-22 +#close 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log index 1e41aca795..cbc90d9926 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2-2.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path http-2-2 -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields status_code #types count 304 @@ -20,4 +20,4 @@ 304 304 304 -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log index 4d3622c7a0..8f66184146 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-2.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path http-2 -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields host #types string bits.wikimedia.org @@ -20,4 +20,4 @@ upload.wikimedia.org upload.wikimedia.org upload.wikimedia.org upload.wikimedia.org -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log index 727a6c02fa..d64b9aa128 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http-3.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path http-3 -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields uri #types string /skins-1.5/monobook/main.css @@ -20,4 +20,4 @@ /wikipedia/commons/thumb/4/4a/Commons-logo.svg/35px-Commons-logo.svg.png /wikipedia/commons/thumb/9/91/Wikiversity-logo.svg/35px-Wikiversity-logo.svg.png /wikipedia/commons/thumb/7/75/Wikimedia_Community_Logo.svg/35px-Wikimedia_Community_Logo.svg.png -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log index 9ac9b6304c..97273995bc 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/http.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path http -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1300475168.784020 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 1 GET bits.wikimedia.org /skins-1.5/monobook/main.css http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - @@ -20,4 +20,4 @@ 1300475169.014619 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4a/Commons-logo.svg/35px-Commons-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - 1300475169.014593 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/9/91/Wikiversity-logo.svg/35px-Wikiversity-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - 1300475169.014927 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/7/75/Wikimedia_Community_Logo.svg/35px-Wikimedia_Community_Logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log old mode 100755 new mode 100644 index 3514ca5134..35e9134583 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.writer-path-conflict/reporter.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path reporter -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields ts level message location #types time enum string string 1300475168.843894 Reporter::WARNING Write using filter 'host-only' on path 'http' changed to use new path 'http-2' to avoid conflict with filter 'default' (empty) 1300475168.843894 Reporter::WARNING Write using filter 'uri-only' on path 'http' changed to use new path 'http-3' to avoid conflict with filter 'default' (empty) 1300475168.843894 Reporter::WARNING Write using filter 'status-only' on path 'http-2' changed to use new path 'http-2-2' to avoid conflict with filter 'host-only' (empty) -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log index a3f476c1fb..cb1bd5af01 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path metrics -#start 2012-07-20-01-50-41 +#open 2012-07-20-01-50-41 #fields ts metric_id filter_name index.host index.str index.network value #types time enum string addr string subnet count 1342749041.601712 TEST_METRIC foo-bar 6.5.4.3 - - 4 1342749041.601712 TEST_METRIC foo-bar 7.2.1.5 - - 2 1342749041.601712 TEST_METRIC foo-bar 1.2.3.4 - - 6 -#end 2012-07-20-01-50-49 +#close 2012-07-20-01-50-49 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log index b497da5194..fb6476ee88 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path metrics -#start 2012-07-20-01-49-22 +#open 2012-07-20-01-49-22 #fields ts metric_id filter_name index.host index.str index.network value #types time enum string addr string subnet count 1342748962.841548 TEST_METRIC foo-bar 6.5.4.3 - - 2 1342748962.841548 TEST_METRIC foo-bar 7.2.1.5 - - 1 1342748962.841548 TEST_METRIC foo-bar 1.2.3.4 - - 3 -#end 2012-07-20-01-49-22 +#close 2012-07-20-01-49-22 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log index 8f3a9dc70c..217b3ed49b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#start 2012-07-20-01-50-59 +#open 2012-07-20-01-50-59 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet 1342749059.978651 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 100/100 - 1.2.3.4 - - 100 manager-1 Notice::ACTION_LOG 6 3600.000000 F - - - - - 1.2.3.4 - - -#end 2012-07-20-01-51-08 +#close 2012-07-20-01-51-08 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log index 5a214b26cc..ba6c680e27 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path notice -#start 2012-07-20-01-49-23 +#open 2012-07-20-01-49-23 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet 1342748963.085888 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 3/2 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 1.2.3.4 - - 1342748963.085888 - - - - - - Test_Notice Threshold crossed by metric_index(host=6.5.4.3) 2/2 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 6.5.4.3 - - -#end 2012-07-20-01-49-23 +#close 2012-07-20-01-49-23 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log index 4903ec0c01..6c93cb875e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#start 2012-07-20-01-51-18 +#open 2012-07-20-01-51-18 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet 1342749078.270791 - - - - - - Test_Notice test notice! - - - - - worker-1 Notice::ACTION_LOG 6 3600.000000 F - - - - - - - - -#end 2012-07-20-01-51-27 +#close 2012-07-20-01-51-27 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log index bd77a90c86..88f25b066f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#start 2012-07-20-01-51-36 +#open 2012-07-20-01-51-36 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet 1342749096.545663 - - - - - - Test_Notice test notice! - - - - - worker-2 Notice::ACTION_LOG 6 3600.000000 F - - - - - - - - -#end 2012-07-20-01-51-45 +#close 2012-07-20-01-51-45 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log index 5a3cdfa69f..7c7254f87e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#start 2012-07-20-01-49-23 +#open 2012-07-20-01-49-23 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude #types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double 1342748963.685754 - - - - - - Test_Notice test - - - - - bro Notice::ACTION_LOG 6 3600.000000 F - - - - - -#end 2012-07-20-01-49-23 +#close 2012-07-20-01-49-23 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log index 316056fa8c..3520980833 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#start 2012-02-21-16-53-13 +#open 2012-02-21-16-53-13 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1329843175.736107 arKYeMETxOg 141.142.220.235 37604 199.233.217.249 56666 tcp ftp-data 0.112432 0 342 SF - 0 ShAdfFa 4 216 4 562 (empty) @@ -11,4 +11,4 @@ 1329843194.151526 nQcgTWjvg4c 199.233.217.249 61920 141.142.220.235 33582 tcp ftp-data 0.056211 342 0 SF - 0 ShADaFf 5 614 3 164 (empty) 1329843197.783443 j4u32Pc5bif 199.233.217.249 61918 141.142.220.235 37835 tcp ftp-data 0.056005 77 0 SF - 0 ShADaFf 5 349 3 164 (empty) 1329843161.968492 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 tcp ftp 38.055625 180 3146 SF - 0 ShAdDfFa 38 2164 25 4458 (empty) -#end 2012-02-21-16-53-20 +#close 2012-02-21-16-53-20 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log index cee57182ed..0d0a8f57f1 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv4/ftp.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path ftp -#start 2012-02-21-16-53-13 +#open 2012-02-21-16-53-13 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p user password command arg mime_type mime_desc file_size reply_code reply_msg tags extraction_file #types time string addr port addr port string string string string string string count count string table[string] file 1329843179.926563 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 anonymous test RETR ftp://199.233.217.249/./robots.txt text/plain ASCII text 77 226 Transfer complete. - - 1329843197.727769 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 anonymous test RETR ftp://199.233.217.249/./robots.txt text/plain ASCII text, with CRLF line terminators 77 226 Transfer complete. - - -#end 2012-02-21-16-53-20 +#close 2012-02-21-16-53-20 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log index 299bdbc4ba..3d81f45670 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/conn.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#start 2012-02-15-17-43-15 +#open 2012-02-15-17-43-15 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1329327783.316897 arKYeMETxOg 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49186 2001:470:4867:99::21 57086 tcp ftp-data 0.219721 0 342 SF - 0 ShAdfFa 5 372 4 642 (empty) @@ -12,4 +12,4 @@ 1329327795.571921 j4u32Pc5bif 2001:470:4867:99::21 55785 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49189 tcp ftp-data 0.109813 77 0 SF - 0 ShADFaf 5 449 4 300 (empty) 1329327777.822004 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 tcp ftp 26.658219 310 3448 SF - 0 ShAdDfFa 57 4426 34 5908 (empty) 1329327800.017649 TEfuqmmG4bh 2001:470:4867:99::21 55647 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49190 tcp ftp-data 0.109181 342 0 SF - 0 ShADFaf 5 714 4 300 (empty) -#end 2012-02-15-17-43-24 +#close 2012-02-15-17-43-24 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log index 096b91df65..62ea4df18d 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.ftp-ipv6/ftp.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path ftp -#start 2012-02-15-17-43-07 +#open 2012-02-15-17-43-07 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p user password command arg mime_type mime_desc file_size reply_code reply_msg tags extraction_file #types time string addr port addr port string string string string string string count count string table[string] file 1329327787.396984 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - 1329327795.463946 UWkUyAuUGXf 2001:470:1f11:81f:c999:d94:aa7c:2e3e 49185 2001:470:4867:99::21 21 anonymous test RETR ftp://[2001:470:4867:99::21]/robots.txt - - 77 226 Transfer complete. - - -#end 2012-02-15-17-43-24 +#close 2012-02-15-17-43-24 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log b/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log index c457f9b64b..13c8b12502 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.100-continue/http.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path http -#start 2009-03-19-05-21-36 +#open 2009-03-19-05-21-36 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1237440095.634312 UWkUyAuUGXf 192.168.3.103 54102 128.146.216.51 80 1 POST www.osu.edu / - curl/7.17.1 (i386-apple-darwin8.11.1) libcurl/7.17.1 zlib/1.2.3 2001 60731 200 OK 100 Continue - (empty) - - - text/html - - -#end 2009-03-19-05-21-36 +#close 2009-03-19-05-21-36 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log b/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log index 46ae431fc2..0d61a6c8b3 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-extract-files/http.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path http -#start 2005-10-07-23-23-56 +#open 2005-10-07-23-23-56 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1128727435.634189 arKYeMETxOg 141.42.64.125 56730 125.190.109.199 80 1 GET www.icir.org / - Wget/1.10 0 9130 200 OK - - - (empty) - - - text/html - http-item_141.42.64.125:56730-125.190.109.199:80_resp_1.dat -#end 2005-10-07-23-23-57 +#close 2005-10-07-23-23-57 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log b/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log index 69e6613a3c..409d8fc812 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-mime-and-md5/http.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path http -#start 2009-11-18-20-58-04 +#open 2009-11-18-20-58-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file 1258577884.844956 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 1 GET www.mozilla.org /style/enhanced.css http://www.mozilla.org/projects/calendar/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2675 200 OK - - - (empty) - - - FAKE_MIME - - @@ -11,4 +11,4 @@ 1258577885.317160 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 3 GET www.mozilla.org /images/template/screen/bullet_utility.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 94 200 OK - - - (empty) - - - FAKE_MIME - - 1258577885.349639 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 4 GET www.mozilla.org /images/template/screen/key-point-top.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2349 200 OK - - - (empty) - - - image/png e0029eea80812e9a8e57b8d05d52938a - 1258577885.394612 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 5 GET www.mozilla.org /projects/calendar/images/header-sunbird.png http://www.mozilla.org/projects/calendar/calendar.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 27579 200 OK - - - (empty) - - - image/png 30aa926344f58019d047e85ba049ca1e - -#end 2009-11-18-20-58-32 +#close 2009-11-18-20-58-32 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log b/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log index 6e7eb96454..6b5e395902 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-pipelining/http.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path http -#start 2009-11-18-20-58-04 +#open 2009-11-18-20-58-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied md5 extraction_file #types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string file 1258577884.844956 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 1 GET www.mozilla.org /style/enhanced.css http://www.mozilla.org/projects/calendar/ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2675 200 OK - - - (empty) - - - - - @@ -11,4 +11,4 @@ 1258577885.317160 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 3 GET www.mozilla.org /images/template/screen/bullet_utility.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 94 200 OK - - - (empty) - - - - - 1258577885.349639 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 4 GET www.mozilla.org /images/template/screen/key-point-top.png http://www.mozilla.org/style/screen.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 2349 200 OK - - - (empty) - - - - - 1258577885.394612 UWkUyAuUGXf 192.168.1.104 1673 63.245.209.11 80 5 GET www.mozilla.org /projects/calendar/images/header-sunbird.png http://www.mozilla.org/projects/calendar/calendar.css Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 0 27579 200 OK - - - (empty) - - - - - -#end 2009-11-18-20-58-32 +#close 2009-11-18-20-58-32 diff --git a/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log b/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log index fe18751420..46adaa4c3e 100644 --- a/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log +++ b/testing/btest/Baseline/scripts.base.protocols.irc.basic/irc.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path irc -#start 2011-07-20-19-12-44 +#open 2011-07-20-19-12-44 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p nick user command value addl dcc_file_name dcc_file_size extraction_file #types time string addr port addr port string string string string string string count file 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 - - NICK bloed - - - - 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed - USER sdkfje sdkfje Montreal.QC.CA.Undernet.org dkdkrwq - - - 1311189174.474127 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje JOIN #easymovies (empty) - - - 1311189316.326025 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje DCC #easymovies (empty) ladyvampress-default(2011-07-07)-OS.zip 42208 - -#end 2011-07-20-19-15-42 +#close 2011-07-20-19-15-42 diff --git a/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log b/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log index 8bd6bd8394..e204a627b1 100644 --- a/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log +++ b/testing/btest/Baseline/scripts.base.protocols.irc.dcc-extract/irc.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path irc -#start 2011-07-20-19-12-44 +#open 2011-07-20-19-12-44 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p nick user command value addl dcc_file_name dcc_file_size dcc_mime_type extraction_file #types time string addr port addr port string string string string string string count string file 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 - - NICK bloed - - - - - 1311189164.119437 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed - USER sdkfje sdkfje Montreal.QC.CA.Undernet.org dkdkrwq - - - - 1311189174.474127 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje JOIN #easymovies (empty) - - - - 1311189316.326025 UWkUyAuUGXf 192.168.1.77 57640 66.198.80.67 6667 bloed sdkfje DCC #easymovies (empty) ladyvampress-default(2011-07-07)-OS.zip 42208 FAKE_MIME irc-dcc-item_192.168.1.77:57655-209.197.168.151:1024_1.dat -#end 2011-07-20-19-15-42 +#close 2011-07-20-19-15-42 diff --git a/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log b/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log index eca41f7d09..ba16578dfb 100644 --- a/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log +++ b/testing/btest/Baseline/scripts.base.protocols.smtp.basic/smtp.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path smtp -#start 2009-10-05-06-06-12 +#open 2009-10-05-06-06-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth helo mailfrom rcptto date from to reply_to msg_id in_reply_to subject x_originating_ip first_received second_received last_reply path user_agent #types time string addr port addr port count string string table[string] string string table[string] string string string string addr string string string vector[addr] string 1254722768.219663 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 GP Mon, 5 Oct 2009 11:36:07 +0530 "Gurpartap Singh" - <000301ca4581$ef9e57f0$cedb07d0$@in> - SMTP - - - 250 OK id=1Mugho-0003Dg-Un 74.53.140.153,10.10.1.4 Microsoft Office Outlook 12.0 -#end 2009-10-05-06-06-16 +#close 2009-10-05-06-06-16 diff --git a/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log b/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log index 9bae222897..396a2e058d 100644 --- a/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log +++ b/testing/btest/Baseline/scripts.base.protocols.smtp.mime-extract/smtp_entities.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path smtp_entities -#start 2009-10-05-06-06-10 +#open 2009-10-05-06-06-10 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth filename content_len mime_type md5 extraction_file excerpt #types time string addr port addr port count string count string string file string 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 79 FAKE_MIME - smtp-entity_10.10.1.4:1470-74.53.140.153:25_1.dat (empty) 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 1918 FAKE_MIME - - (empty) 1254722770.692804 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 NEWS.txt 10823 FAKE_MIME - smtp-entity_10.10.1.4:1470-74.53.140.153:25_2.dat (empty) -#end 2009-10-05-06-06-16 +#close 2009-10-05-06-06-16 diff --git a/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log b/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log index 5cb4bb15ef..1abe35e90f 100644 --- a/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log +++ b/testing/btest/Baseline/scripts.base.protocols.smtp.mime/smtp_entities.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path smtp_entities -#start 2009-10-05-06-06-10 +#open 2009-10-05-06-06-10 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth filename content_len mime_type md5 extraction_file excerpt #types time string addr port addr port count string count string string file string 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 79 FAKE_MIME 92bca2e6cdcde73647125da7dccbdd07 - (empty) 1254722770.692743 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 - 1918 FAKE_MIME - - (empty) 1254722770.692804 arKYeMETxOg 10.10.1.4 1470 74.53.140.153 25 1 NEWS.txt 10823 FAKE_MIME a968bb0f9f9d95835b2e74c845877e87 - (empty) -#end 2009-10-05-06-06-16 +#close 2009-10-05-06-06-16 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log index 960ea71720..b2a8ef7d4c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path socks -#start 2012-06-20-17-23-38 +#open 2012-06-20-17-23-38 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p #types time string addr port addr port count string string addr string port addr string port 1340213015.276495 UWkUyAuUGXf 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688 -#end 2012-06-20-17-28-10 +#close 2012-06-20-17-28-10 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log index d914b3074e..d5aa58652e 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/tunnel.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path tunnel -#start 2012-06-20-17-23-35 +#open 2012-06-20-17-23-35 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1340213015.276495 - 10.0.0.55 0 60.190.189.214 8124 Tunnel::SOCKS Tunnel::DISCOVER -#end 2012-06-20-17-28-10 +#close 2012-06-20-17-28-10 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log index ef07cc31a5..4053bd7359 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/socks.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path socks -#start 2012-06-19-13-41-02 +#open 2012-06-19-13-41-02 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p #types time string addr port addr port count string string addr string port addr string port 1340113261.914619 UWkUyAuUGXf 10.0.0.50 59580 85.194.84.197 1080 5 - succeeded - www.google.com 443 0.0.0.0 - 443 -#end 2012-06-19-13-41-05 +#close 2012-06-19-13-41-05 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log index 10f079b888..82df9b76df 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace2/tunnel.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path tunnel -#start 2012-06-19-13-41-01 +#open 2012-06-19-13-41-01 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1340113261.914619 - 10.0.0.50 0 85.194.84.197 1080 Tunnel::SOCKS Tunnel::DISCOVER -#end 2012-06-19-13-41-05 +#close 2012-06-19-13-41-05 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log index 4299e302ce..867f3ed157 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace3/tunnel.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path tunnel -#start 2008-04-15-22-43-49 +#open 2008-04-15-22-43-49 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p tunnel_type action #types time string addr port addr port enum enum 1208299429.265774 - 127.0.0.1 0 127.0.0.1 1080 Tunnel::SOCKS Tunnel::DISCOVER -#end 2008-04-15-22-43-49 +#close 2008-04-15-22-43-49 diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log index b77925e498..5bf3feddc5 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path ssl -#start 2012-04-27-14-53-12 +#open 2012-04-27-14-53-12 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher server_name session_id subject issuer_subject not_valid_before not_valid_after last_alert #types time string addr port addr port string string string string string string time time string 1335538392.319381 UWkUyAuUGXf 192.168.1.105 62045 74.125.224.79 443 TLSv10 TLS_ECDHE_RSA_WITH_RC4_128_SHA ssl.gstatic.com - CN=*.gstatic.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority,O=Google Inc,C=US 1334102677.000000 1365639277.000000 - -#end 2012-04-27-14-53-16 +#close 2012-04-27-14-53-16 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log index 6951e4d51f..d5f665e4bc 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-all.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path known_hosts -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields ts host #types time addr 1300475168.783842 141.142.220.118 1300475168.783842 208.80.152.118 1300475168.915940 208.80.152.3 1300475168.962628 208.80.152.2 -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log index b70a701448..a625691aa4 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-local.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path known_hosts -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields ts host #types time addr 1300475168.783842 141.142.220.118 -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log index 8e9d8c6c79..d05ccf6081 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-hosts/knownhosts-remote.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path known_hosts -#start 2011-03-18-19-06-08 +#open 2011-03-18-19-06-08 #fields ts host #types time addr 1300475168.783842 208.80.152.118 1300475168.915940 208.80.152.3 1300475168.962628 208.80.152.2 -#end 2011-03-18-19-06-13 +#close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log index 25198e92d5..af097e5db3 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path known_services -#start 2011-06-24-15-51-31 +#open 2011-06-24-15-51-31 #fields ts host port_num port_proto service #types time addr port enum table[string] 1308930691.049431 172.16.238.131 22 tcp SSH @@ -11,4 +11,4 @@ 1308930716.462556 74.125.225.81 80 tcp HTTP 1308930718.361665 172.16.238.131 21 tcp FTP 1308930726.872485 141.142.192.39 22 tcp SSH -#end 2011-06-24-15-52-08 +#close 2011-06-24-15-52-08 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log index 598f49fa65..7c27e63a24 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-local.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path known_services -#start 2011-06-24-15-51-31 +#open 2011-06-24-15-51-31 #fields ts host port_num port_proto service #types time addr port enum table[string] 1308930691.049431 172.16.238.131 22 tcp SSH 1308930694.550308 172.16.238.131 80 tcp HTTP 1308930718.361665 172.16.238.131 21 tcp FTP -#end 2011-06-24-15-52-08 +#close 2011-06-24-15-52-08 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log index c248b18146..77fbe1ef70 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path known_services -#start 2011-06-24-15-51-56 +#open 2011-06-24-15-51-56 #fields ts host port_num port_proto service #types time addr port enum table[string] 1308930716.462556 74.125.225.81 80 tcp HTTP 1308930726.872485 141.142.192.39 22 tcp SSH -#end 2011-06-24-15-52-08 +#close 2011-06-24-15-52-08 diff --git a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log b/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log index fb024db6d2..f4b77edde7 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log +++ b/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path dns -#start 1999-06-28-23-40-27 +#open 1999-06-28-23-40-27 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs auth addl #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] table[string] table[string] 930613226.529070 UWkUyAuUGXf 212.180.42.100 25000 131.243.64.3 53 tcp 34798 - - - - - 0 NOERROR F F F T 0 4.3.2.1 31337.000000 - - -#end 1999-06-28-23-40-27 +#close 1999-06-28-23-40-27 diff --git a/testing/scripts/diff-remove-timestamps b/testing/scripts/diff-remove-timestamps index 84bd21aa60..138b901743 100755 --- a/testing/scripts/diff-remove-timestamps +++ b/testing/scripts/diff-remove-timestamps @@ -11,4 +11,4 @@ fi # The first sed uses a "basic" regexp, the 2nd a "modern:. sed 's/[0-9]\{10\}\.[0-9]\{2,8\}/XXXXXXXXXX.XXXXXX/g' | \ -$sed 's/^#(start|end).(19|20)..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' +$sed 's/^#(open|close).(19|20)..-..-..-..-..-..$/#\1 XXXX-XX-XX-XX-XX-XX/g' From 596f07e50569d0ecb4d65ef58bdf6c8ba65fe50e Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Fri, 27 Jul 2012 15:31:10 -0400 Subject: [PATCH 550/651] Reworked how the logs-to-elasticsearch scripts works to stop abusing the logging framework. - New variable in logging framework Log::active_streams to indicate Log:ID enums which are currently active. --- scripts/base/frameworks/logging/main.bro | 9 ++++++ .../policy/tuning/logs-to-elasticsearch.bro | 28 ++++++------------- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index c29215fd86..aa44547567 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -327,6 +327,11 @@ export { ## Log::default_rotation_postprocessor_cmd ## Log::default_rotation_postprocessors global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool; + + ## The streams which are currently active and not disabled. + ## This set is not meant to be modified by users! Only use it for + ## examining which streams are active. + global active_streams: set[ID] = set(); } # We keep a script-level copy of all filters so that we can manipulate them. @@ -412,11 +417,15 @@ function create_stream(id: ID, stream: Stream) : bool if ( ! __create_stream(id, stream) ) return F; + add active_streams[id]; + return add_default_filter(id); } function disable_stream(id: ID) : bool { + delete active_streams[id]; + return __disable_stream(id); } diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro index b4d16a19a1..44fc3800b8 100644 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ b/scripts/policy/tuning/logs-to-elasticsearch.bro @@ -4,7 +4,7 @@ module LogElasticSearch; export { ## An elasticsearch specific rotation interval. - const rotation_interval = 24hr &redef; + const rotation_interval = 3hr &redef; ## Optionally ignore any :bro:type:`Log::ID` from being sent to ## ElasticSearch with this script. @@ -17,29 +17,17 @@ export { const send_logs: set[string] = set() &redef; } -module Log; - event bro_init() &priority=-5 { - local my_filters: table[ID, string] of Filter = table(); - - for ( [id, name] in filters ) + for ( stream_id in Log::active_streams ) { - local filter = filters[id, name]; - if ( fmt("%s", id) in LogElasticSearch::excluded_log_ids || - (|LogElasticSearch::send_logs| > 0 && fmt("%s", id) !in LogElasticSearch::send_logs) ) + if ( fmt("%s", stream_id) in excluded_log_ids || + (|send_logs| > 0 && fmt("%s", stream_id) !in send_logs) ) next; - filter$name = cat(name, "-es"); - filter$writer = Log::WRITER_ELASTICSEARCH; - filter$interv = LogElasticSearch::rotation_interval; - my_filters[id, name] = filter; - } - - # This had to be done separately to avoid an ever growing filters list - # where the for loop would never end. - for ( [id, name] in my_filters ) - { - Log::add_filter(id, filter); + local filter: Log::Filter = [$name = "default-es", + $writer = Log::WRITER_ELASTICSEARCH, + $interv = LogElasticSearch::rotation_interval]; + Log::add_filter(stream_id, filter); } } From 767a7921482599eae41707a931fee065b6038c06 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 27 Jul 2012 12:30:40 -0700 Subject: [PATCH 551/651] Tests updates for recent open/close log change. --- .../ssh.log | 12 +++++------- .../btest/scripts/base/frameworks/input/binary.bro | 2 +- .../scripts/base/frameworks/logging/ascii-escape.bro | 2 +- .../scripts/base/frameworks/logging/remote-types.bro | 4 ++-- 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log index 7a448ce6c1..d61eae873a 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.ascii-escape/ssh.log @@ -3,12 +3,10 @@ #empty_field||(empty) #unset_field||- #path||ssh -#open||2012-07-27-19-14-35 #fields||t||id.orig_h||id.orig_p||id.resp_h||id.resp_p||status||country #types||time||addr||port||addr||port||string||string -1343416475.837726||1.2.3.4||1234||2.3.4.5||80||success||unknown -1343416475.837726||1.2.3.4||1234||2.3.4.5||80||failure||US -1343416475.837726||1.2.3.4||1234||2.3.4.5||80||fa\x7c\x7cure||UK -1343416475.837726||1.2.3.4||1234||2.3.4.5||80||su\x7c\x7cess||BR -1343416475.837726||1.2.3.4||1234||2.3.4.5||80||failure||MX -#close||2012-07-27-19-14-35 +1343417536.767956||1.2.3.4||1234||2.3.4.5||80||success||unknown +1343417536.767956||1.2.3.4||1234||2.3.4.5||80||failure||US +1343417536.767956||1.2.3.4||1234||2.3.4.5||80||fa\x7c\x7cure||UK +1343417536.767956||1.2.3.4||1234||2.3.4.5||80||su\x7c\x7cess||BR +1343417536.767956||1.2.3.4||1234||2.3.4.5||80||failure||MX diff --git a/testing/btest/scripts/base/frameworks/input/binary.bro b/testing/btest/scripts/base/frameworks/input/binary.bro index ce7f66a01d..8d75abc5a9 100644 --- a/testing/btest/scripts/base/frameworks/input/binary.bro +++ b/testing/btest/scripts/base/frameworks/input/binary.bro @@ -16,7 +16,7 @@ redef InputAscii::unset_field = "-"; #empty_field|(empty) #unset_field|- #path|ssh -#start|2012-07-20-01-49-19 +#open|2012-07-20-01-49-19 #fields|data|data2 #types|string|string abc\x0a\xffdef|DATA2 diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro index 1d0742216d..d73464777a 100644 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro @@ -1,6 +1,6 @@ # # @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: cat ssh.log | egrep -v '#start|#end' >ssh.log.tmp && mv ssh.log.tmp ssh.log +# @TEST-EXEC: cat ssh.log | egrep -v '#open|#close' >ssh.log.tmp && mv ssh.log.tmp ssh.log # @TEST-EXEC: btest-diff ssh.log redef LogAscii::separator = "||"; diff --git a/testing/btest/scripts/base/frameworks/logging/remote-types.bro b/testing/btest/scripts/base/frameworks/logging/remote-types.bro index 3f102e6319..b8425428d3 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote-types.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote-types.bro @@ -4,8 +4,8 @@ # @TEST-EXEC: btest-bg-run receiver bro -B threading,logging --pseudo-realtime %INPUT ../receiver.bro # @TEST-EXEC: btest-bg-wait -k 10 # @TEST-EXEC: btest-diff receiver/test.log -# @TEST-EXEC: cat receiver/test.log | egrep -v '#start|#end' >r.log -# @TEST-EXEC: cat sender/test.log | egrep -v '#start|#end' >s.log +# @TEST-EXEC: cat receiver/test.log | egrep -v '#open|#close' >r.log +# @TEST-EXEC: cat sender/test.log | egrep -v '#open|#close' >s.log # @TEST-EXEC: cmp r.log s.log # Remote version testing all types. From 9f2abd0697568377c901b3fa8cd38f79f5ccf953 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 27 Jul 2012 12:39:20 -0700 Subject: [PATCH 552/651] Fix input test for recent default change on fastpath. --- testing/btest/scripts/base/frameworks/input/missing-file.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/btest/scripts/base/frameworks/input/missing-file.bro b/testing/btest/scripts/base/frameworks/input/missing-file.bro index 269e287acc..aa5acf619e 100644 --- a/testing/btest/scripts/base/frameworks/input/missing-file.bro +++ b/testing/btest/scripts/base/frameworks/input/missing-file.bro @@ -25,6 +25,6 @@ event bro_init() { try = 0; outfile = open("../out"); - Input::add_event([$source="does-not-exist.dat", $name="input", $fields=Val, $ev=line]); + Input::add_event([$source="does-not-exist.dat", $name="input", $fields=Val, $ev=line, $want_record=F]); Input::remove("input"); } From 4bdac985cbbe53b2767fb56412e6bdc1a577da0b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sat, 28 Jul 2012 11:21:20 -0700 Subject: [PATCH 553/651] Tweaking logs-to-elasticsearch.bro so that it doesn't do anything if ES server is unset. --- scripts/policy/tuning/logs-to-elasticsearch.bro | 3 +++ testing/external/scripts/testing-setup.bro | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro index 44fc3800b8..207a9acc04 100644 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ b/scripts/policy/tuning/logs-to-elasticsearch.bro @@ -19,6 +19,9 @@ export { event bro_init() &priority=-5 { + if ( server_host == "" ) + return; + for ( stream_id in Log::active_streams ) { if ( fmt("%s", stream_id) in excluded_log_ids || diff --git a/testing/external/scripts/testing-setup.bro b/testing/external/scripts/testing-setup.bro index fa5664a877..4b4d110864 100644 --- a/testing/external/scripts/testing-setup.bro +++ b/testing/external/scripts/testing-setup.bro @@ -1,6 +1,12 @@ # Sets some testing specific options. @ifdef ( SMTP::never_calc_md5 ) - # MDD5s can depend on libmagic output. + # MDD5s can depend on libmagic output. redef SMTP::never_calc_md5 = T; @endif + +@ifdef ( LogElasticSearch::server_host ) + # Set to empty so that logs-to-elasticsearch.bro doesn't try to setup + #log forwarding to ES. + redef LogElasticSearch::server_host = ""; +@endif From 4359bf6b42fb6438bf5d2285f07275625d9b542b Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 27 Jul 2012 13:31:17 -0500 Subject: [PATCH 554/651] Fix log manager hanging on waiting for pending file rotations. This changes writer implementations to always respond to rotation messages in their DoRotate() method, even for failure/no-op cases with a new RotationFailedMessage. This informs the manager to decrement its count of pending rotations. Addresses #860. --- src/logging/Manager.cc | 14 +++++++++++- src/logging/Manager.h | 7 ++++++ src/logging/WriterBackend.cc | 33 ++++++++++++++++++++++++++++ src/logging/WriterBackend.h | 19 ++++++++++++++++ src/logging/writers/Ascii.cc | 4 ++++ src/logging/writers/DataSeries.cc | 1 + src/logging/writers/ElasticSearch.cc | 1 + src/logging/writers/None.cc | 1 + 8 files changed, 79 insertions(+), 1 deletion(-) diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 269ba32bfa..bcbea6e266 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -1215,12 +1215,16 @@ bool Manager::Flush(EnumVal* id) void Manager::Terminate() { // Make sure we process all the pending rotations. - while ( rotations_pending ) + + while ( rotations_pending > 0 ) { thread_mgr->ForceProcessing(); // A blatant layering violation ... usleep(1000); } + if ( rotations_pending < 0 ) + reporter->InternalError("Negative pending log rotations: %d", rotations_pending); + for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) { if ( ! *s ) @@ -1384,3 +1388,11 @@ bool Manager::FinishedRotation(WriterFrontend* writer, const char* new_name, con return result; } +bool Manager::FailedRotation(WriterFrontend* writer, const char* filename, + double open, double close, bool terminating) + { + --rotations_pending; + DBG_LOG(DBG_LOGGING, "Failed rotating writer '%s', file '%s' at %.6f,", + writer->Name(), filename, network_time); + return true; + } diff --git a/src/logging/Manager.h b/src/logging/Manager.h index d2041592c1..7de99035c4 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -153,6 +153,7 @@ public: protected: friend class WriterFrontend; friend class RotationFinishedMessage; + friend class RotationFailedMessage; friend class ::RemoteSerializer; friend class ::RotationTimer; @@ -178,6 +179,12 @@ protected: bool FinishedRotation(WriterFrontend* writer, const char* new_name, const char* old_name, double open, double close, bool terminating); + // Signals that a file couldn't be rotated, either because the writer + // implementation decided there was nothing to do or because a real error + // occurred. In the error case, a separate message for the reason is sent. + bool FailedRotation(WriterFrontend* writer, const char* filename, + double open, double close, bool terminating); + // Deletes the values as passed into Write(). void DeleteVals(int num_fields, threading::Value** vals); diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index afdc4b99c5..8b4d49d6e9 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -43,6 +43,32 @@ private: bool terminating; }; +class RotationFailedMessage : public threading::OutputMessage +{ +public: + RotationFailedMessage(WriterFrontend* writer, const char* filename, + double open, double close, bool terminating) + : threading::OutputMessage("RotationFailed", writer), + filename(copy_string(filename)), open(open), + close(close), terminating(terminating) { } + + virtual ~RotationFailedMessage() + { + delete [] filename; + } + + virtual bool Process() + { + return log_mgr->FailedRotation(Object(), filename, open, close, terminating); + } + +private: + const char* filename; + double open; + double close; + bool terminating; +}; + class FlushWriteBufferMessage : public threading::OutputMessage { public: @@ -164,6 +190,13 @@ bool WriterBackend::FinishedRotation(const char* new_name, const char* old_name, return true; } +bool WriterBackend::FailedRotation(const char* filename, double open, + double close, bool terminating) + { + SendOut(new RotationFailedMessage(frontend, filename, open, close, terminating)); + return true; + } + void WriterBackend::DisableFrontend() { SendOut(new DisableMessage(frontend)); diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 77dbe71f45..64eb13ddec 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -229,6 +229,25 @@ public: bool FinishedRotation(const char* new_name, const char* old_name, double open, double close, bool terminating); + /** + * Signals that a file couldn't be rotated. This must be called by a + * writer's implementation of DoRotate() in all cases where + * FinishedRotation() was not called or failed. + * + * Most of the parameters should be passed through from DoRotate(). + * + * @param filename The name of the file that was attempted to be rotated. + * + * @param open: The timestamp when the original file was opened. + * + * @param close: The timestamp when the origina file was closed. + * + * @param terminating: True if the original rotation request occured + * due to the main Bro process shutting down. + */ + bool FailedRotation(const char* filename, double open, double close, + bool terminating); + /** Helper method to render an IP address as a string. * * @param addr The address. diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index c4c6b06563..805ccaa4cc 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -373,7 +373,10 @@ bool Ascii::DoRotate(const char* rotated_path, double open, double close, bool t { // Don't rotate special files or if there's not one currently open. if ( ! fd || IsSpecial(Info().path) ) + { + FailedRotation(rotated_path, open, close, terminating); return true; + } CloseFile(close); @@ -382,6 +385,7 @@ bool Ascii::DoRotate(const char* rotated_path, double open, double close, bool t if ( ! FinishedRotation(nname.c_str(), fname.c_str(), open, close, terminating) ) { + FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s to %s", fname.c_str(), nname.c_str())); return false; } diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 7d3053e341..29e1705bf5 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -407,6 +407,7 @@ bool DataSeries::DoRotate(const char* rotated_path, double open, double close, b if ( ! FinishedRotation(nname.c_str(), dsname.c_str(), open, close, terminating) ) { + FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s to %s", dsname.c_str(), nname.c_str())); return false; } diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index cc6f8b1c4f..d663e375c5 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -323,6 +323,7 @@ bool ElasticSearch::DoRotate(const char* rotated_path, double open, double close if ( ! FinishedRotation(current_index.c_str(), prev_index.c_str(), open, close, terminating) ) { + FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s to %s", prev_index.c_str(), current_index.c_str())); } diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index 9b91b82199..0d659ed34e 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -46,6 +46,7 @@ bool None::DoRotate(const char* rotated_path, double open, double close, bool te { if ( ! FinishedRotation("/dev/null", Info().path, open, close, terminating)) { + FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s", Info().path)); return false; } From 4ba038070f0047a81e422ada9a347395f5ba911d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sat, 28 Jul 2012 11:55:31 -0700 Subject: [PATCH 555/651] Tweaking writer API for failed rotations. There are now two FinishedRotation() methods, one that triggers post-processing and one that doesn't. There's also insurance built in against a writer not calling either (or both), in which case we abort with an internal error. --- CHANGES | 9 +++++ VERSION | 2 +- src/logging/Manager.cc | 20 +++++------ src/logging/Manager.h | 8 +---- src/logging/WriterBackend.cc | 52 ++++++++++------------------ src/logging/WriterBackend.h | 32 +++++++++++------ src/logging/WriterFrontend.cc | 5 ++- src/logging/writers/Ascii.cc | 3 +- src/logging/writers/DataSeries.cc | 1 - src/logging/writers/ElasticSearch.cc | 3 -- src/logging/writers/None.cc | 1 - src/util.cc | 3 ++ 12 files changed, 65 insertions(+), 74 deletions(-) diff --git a/CHANGES b/CHANGES index aaa2c53569..b3fe4ad620 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,13 @@ +2.0-905 | 2012-07-28 16:24:34 -0700 + + * Fix log manager hanging on waiting for pending file rotations, + plus writer API tweak for failed rotations. Addresses #860. (Jon + Siwek and Robin Sommer) + + * Tweaking logs-to-elasticsearch.bro so that it doesn't do anything + if ES server is unset. (Robin Sommer) + 2.0-902 | 2012-07-27 12:42:13 -0700 * New variable in logging framework Log::active_streams to indicate diff --git a/VERSION b/VERSION index f320985bf6..57c0d2a8a9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-902 +2.0-905 diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index bcbea6e266..7a182a78b7 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -1338,13 +1338,18 @@ void Manager::Rotate(WriterInfo* winfo) } bool Manager::FinishedRotation(WriterFrontend* writer, const char* new_name, const char* old_name, - double open, double close, bool terminating) + double open, double close, bool success, bool terminating) { + assert(writer); + --rotations_pending; - if ( ! writer ) - // Writer didn't produce local output. + if ( ! success ) + { + DBG_LOG(DBG_LOGGING, "Non-successful rotating writer '%s', file '%s' at %.6f,", + writer->Name(), filename, network_time); return true; + } DBG_LOG(DBG_LOGGING, "Finished rotating %s at %.6f, new name %s", writer->Name(), network_time, new_name); @@ -1387,12 +1392,3 @@ bool Manager::FinishedRotation(WriterFrontend* writer, const char* new_name, con return result; } - -bool Manager::FailedRotation(WriterFrontend* writer, const char* filename, - double open, double close, bool terminating) - { - --rotations_pending; - DBG_LOG(DBG_LOGGING, "Failed rotating writer '%s', file '%s' at %.6f,", - writer->Name(), filename, network_time); - return true; - } diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 7de99035c4..864a23ca88 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -177,13 +177,7 @@ protected: // Signals that a file has been rotated. bool FinishedRotation(WriterFrontend* writer, const char* new_name, const char* old_name, - double open, double close, bool terminating); - - // Signals that a file couldn't be rotated, either because the writer - // implementation decided there was nothing to do or because a real error - // occurred. In the error case, a separate message for the reason is sent. - bool FailedRotation(WriterFrontend* writer, const char* filename, - double open, double close, bool terminating); + double open, double close, bool success, bool terminating); // Deletes the values as passed into Write(). void DeleteVals(int num_fields, threading::Value** vals); diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 8b4d49d6e9..47fdec27ef 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -19,10 +19,10 @@ class RotationFinishedMessage : public threading::OutputMessage { public: RotationFinishedMessage(WriterFrontend* writer, const char* new_name, const char* old_name, - double open, double close, bool terminating) + double open, double close, bool success, bool terminating) : threading::OutputMessage("RotationFinished", writer), new_name(copy_string(new_name)), old_name(copy_string(old_name)), open(open), - close(close), terminating(terminating) { } + close(close), success(success), terminating(terminating) { } virtual ~RotationFinishedMessage() { @@ -32,7 +32,7 @@ public: virtual bool Process() { - return log_mgr->FinishedRotation(Object(), new_name, old_name, open, close, terminating); + return log_mgr->FinishedRotation(Object(), new_name, old_name, open, close, success, terminating); } private: @@ -40,32 +40,7 @@ private: const char* old_name; double open; double close; - bool terminating; -}; - -class RotationFailedMessage : public threading::OutputMessage -{ -public: - RotationFailedMessage(WriterFrontend* writer, const char* filename, - double open, double close, bool terminating) - : threading::OutputMessage("RotationFailed", writer), - filename(copy_string(filename)), open(open), - close(close), terminating(terminating) { } - - virtual ~RotationFailedMessage() - { - delete [] filename; - } - - virtual bool Process() - { - return log_mgr->FailedRotation(Object(), filename, open, close, terminating); - } - -private: - const char* filename; - double open; - double close; + bool success; bool terminating; }; @@ -152,6 +127,7 @@ WriterBackend::WriterBackend(WriterFrontend* arg_frontend) : MsgThread() buffering = true; frontend = arg_frontend; info = new WriterInfo(frontend->Info()); + rotation_counter = 0; SetName(frontend->Name()); } @@ -186,14 +162,15 @@ void WriterBackend::DeleteVals(int num_writes, Value*** vals) bool WriterBackend::FinishedRotation(const char* new_name, const char* old_name, double open, double close, bool terminating) { - SendOut(new RotationFinishedMessage(frontend, new_name, old_name, open, close, terminating)); + --rotation_counter; + SendOut(new RotationFinishedMessage(frontend, new_name, old_name, open, close, true, terminating)); return true; } -bool WriterBackend::FailedRotation(const char* filename, double open, - double close, bool terminating) +bool WriterBackend::FinishedRotation() { - SendOut(new RotationFailedMessage(frontend, filename, open, close, terminating)); + --rotation_counter; + SendOut(new RotationFinishedMessage(frontend, 0, 0, 0, 0, false, false)); return true; } @@ -303,12 +280,21 @@ bool WriterBackend::Rotate(const char* rotated_path, double open, if ( Failed() ) return true; + rotation_counter = 1; + if ( ! DoRotate(rotated_path, open, close, terminating) ) { DisableFrontend(); return false; } + // Insurance against broken writers. + if ( rotation_counter > 0 ) + InternalError(Fmt("writer %s did not call FinishedRotation() in DoRotation()", Name())); + + if ( rotation_counter < 0 ) + InternalError(Fmt("writer %s called FinishedRotation() more than once in DoRotation()", Name())); + return true; } diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 64eb13ddec..89185619c4 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -210,11 +210,15 @@ public: bool IsBuf() { return buffering; } /** - * Signals that a file has been rotated. This must be called by a - * writer's implementation of DoRotate() once rotation has finished. + * Signals that a file has been successfully rotated and any + * potential post-processor can now run. * * Most of the parameters should be passed through from DoRotate(). * + * Note: Exactly one of the two FinishedRotation() methods must be + * called by a writer's implementation of DoRotate() once rotation + * has finished. + * * @param new_name The filename of the rotated file. * * @param old_name The filename of the original file. @@ -230,13 +234,18 @@ public: double open, double close, bool terminating); /** - * Signals that a file couldn't be rotated. This must be called by a - * writer's implementation of DoRotate() in all cases where - * FinishedRotation() was not called or failed. + * Signals that a file rotation request has been processed, but no + * further post-processing needs to be performed (either because + * there was an error, or there was nothing to rotate to begin with + * with this writer). * - * Most of the parameters should be passed through from DoRotate(). + * Note: Exactly one of the two FinishedRotation() methods must be + * called by a writer's implementation of DoRotate() once rotation + * has finished. * - * @param filename The name of the file that was attempted to be rotated. + * @param new_name The filename of the rotated file. + * + * @param old_name The filename of the original file. * * @param open: The timestamp when the original file was opened. * @@ -245,8 +254,7 @@ public: * @param terminating: True if the original rotation request occured * due to the main Bro process shutting down. */ - bool FailedRotation(const char* filename, double open, double close, - bool terminating); + bool FinishedRotation(); /** Helper method to render an IP address as a string. * @@ -344,8 +352,8 @@ protected: * Writer-specific method implementing log rotation. Most directly * this only applies to writers writing into files, which should then * close the current file and open a new one. However, a writer may - * also trigger other apppropiate actions if semantics are similar. * - * Once rotation has finished, the implementation must call + * also trigger other apppropiate actions if semantics are similar. + * Once rotation has finished, the implementation *must* call * FinishedRotation() to signal the log manager that potential * postprocessors can now run. * @@ -407,6 +415,8 @@ private: int num_fields; // Number of log fields. const threading::Field* const* fields; // Log fields. bool buffering; // True if buffering is enabled. + + int rotation_counter; // Tracks FinishedRotation() calls. }; diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 7c8f6861cf..a97f48c1ed 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -248,9 +248,8 @@ void WriterFrontend::Rotate(const char* rotated_path, double open, double close, if ( backend ) backend->SendIn(new RotateMessage(backend, this, rotated_path, open, close, terminating)); else - // Still signal log manager that we're done, but signal that - // nothing happened by setting the writer to zeri. - log_mgr->FinishedRotation(0, "", rotated_path, open, close, terminating); + // Still signal log manager that we're done. + log_mgr->FinishedRotation(this, 0, 0, 0, 0, false, terminating); } void WriterFrontend::DeleteVals(Value** vals) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 805ccaa4cc..f6df3b9336 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -374,7 +374,7 @@ bool Ascii::DoRotate(const char* rotated_path, double open, double close, bool t // Don't rotate special files or if there's not one currently open. if ( ! fd || IsSpecial(Info().path) ) { - FailedRotation(rotated_path, open, close, terminating); + FinishedRotation(); return true; } @@ -385,7 +385,6 @@ bool Ascii::DoRotate(const char* rotated_path, double open, double close, bool t if ( ! FinishedRotation(nname.c_str(), fname.c_str(), open, close, terminating) ) { - FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s to %s", fname.c_str(), nname.c_str())); return false; } diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 29e1705bf5..7d3053e341 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -407,7 +407,6 @@ bool DataSeries::DoRotate(const char* rotated_path, double open, double close, b if ( ! FinishedRotation(nname.c_str(), dsname.c_str(), open, close, terminating) ) { - FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s to %s", dsname.c_str(), nname.c_str())); return false; } diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index d663e375c5..7a80866bf7 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -322,10 +322,7 @@ bool ElasticSearch::DoRotate(const char* rotated_path, double open, double close } if ( ! FinishedRotation(current_index.c_str(), prev_index.c_str(), open, close, terminating) ) - { - FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s to %s", prev_index.c_str(), current_index.c_str())); - } return true; } diff --git a/src/logging/writers/None.cc b/src/logging/writers/None.cc index 0d659ed34e..9b91b82199 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/None.cc @@ -46,7 +46,6 @@ bool None::DoRotate(const char* rotated_path, double open, double close, bool te { if ( ! FinishedRotation("/dev/null", Info().path, open, close, terminating)) { - FailedRotation(rotated_path, open, close, terminating); Error(Fmt("error rotating %s", Info().path)); return false; } diff --git a/src/util.cc b/src/util.cc index 228e40dddb..2d981e952e 100644 --- a/src/util.cc +++ b/src/util.cc @@ -113,6 +113,9 @@ std::string get_escaped_string(const std::string& str, bool escape_all) char* copy_string(const char* s) { + if ( ! s ) + return 0; + char* c = new char[strlen(s)+1]; strcpy(c, s); return c; From 00d41bb549732d0a66a0fa683264a063705821d9 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 30 Jul 2012 11:07:43 -0500 Subject: [PATCH 556/651] Add missing breaks to switch cases in ElasticSearch::HTTPReceive(). Observed as reason for segfault in testing/btest/scripts/check-test-all-policy.bro unit test when compiled with optimizations. --- src/logging/writers/ElasticSearch.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index 7a80866bf7..9e5e3fb207 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -385,12 +385,14 @@ bool ElasticSearch::HTTPSend(CURL *handle) if ( ! failing ) Error(Fmt("ElasticSearch server may not be accessible.")); } + break; case CURLE_OPERATION_TIMEDOUT: { if ( ! failing ) Warning(Fmt("HTTP operation with elasticsearch server timed out at %" PRIu64 " msecs.", transfer_timeout)); } + break; case CURLE_OK: { @@ -402,10 +404,12 @@ bool ElasticSearch::HTTPSend(CURL *handle) else if ( ! failing ) Error(Fmt("Received a non-successful status code back from ElasticSearch server, check the elasticsearch server log.")); } + break; default: { } + break; } // The "successful" return happens above return false; From 7b2c3db4881dd8acb3836c91c6d9da0895578405 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 30 Jul 2012 13:09:13 -0500 Subject: [PATCH 557/651] Improve log filter compatibility with remote logging. If a log filter attempts to write to a path for which a writer is already instantiated due to remote logging, it will re-use the writer as long as the fields of the filter and writer are compatible, else the filter path will be auto-adjusted to not conflict with existing writer's. Conflicts between two local filters are still always auto-adjusted even if field types agree (since they could still be semantically different). Addresses #842. --- src/RemoteSerializer.cc | 3 ++- src/logging/Manager.cc | 38 +++++++++++++++++++++++++++++++------- src/logging/Manager.h | 4 +++- 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 4e9ccb7dd2..cfd20eba39 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -2716,7 +2716,8 @@ bool RemoteSerializer::ProcessLogCreateWriter() id_val = new EnumVal(id, BifType::Enum::Log::ID); writer_val = new EnumVal(writer, BifType::Enum::Log::Writer); - if ( ! log_mgr->CreateWriter(id_val, writer_val, info, num_fields, fields, true, false) ) + if ( ! log_mgr->CreateWriter(id_val, writer_val, info, num_fields, fields, + true, false, true) ) goto error; Unref(id_val); diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 7a182a78b7..4c6d2e92fd 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -86,6 +86,7 @@ struct Manager::WriterInfo { Func* postprocessor; WriterFrontend* writer; WriterBackend::WriterInfo* info; + bool from_remote; string instantiating_filter; }; @@ -240,6 +241,29 @@ Manager::WriterInfo* Manager::FindWriter(WriterFrontend* writer) return 0; } +bool Manager::CompareFields(const Filter* filter, const WriterFrontend* writer) + { + if ( filter->num_fields != writer->NumFields() ) + return false; + + for ( int i = 0; i < filter->num_fields; ++ i) + if ( filter->fields[i]->type != writer->Fields()[i]->type ) + return false; + + return true; + } + +bool Manager::CheckFilterWriterConflict(const WriterInfo* winfo, const Filter* filter) + { + if ( winfo->from_remote ) + // If the writer was instantiated as a result of remote logging, then + // a filter and writer are only compatible if field types match + return ! CompareFields(filter, winfo->writer); + else + // If the writer was instantiated locally, it is bound to one filter + return winfo->instantiating_filter != filter->name; + } + void Manager::RemoveDisabledWriters(Stream* stream) { list disabled; @@ -756,10 +780,9 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) Stream::WriterMap::iterator w = stream->writers.find(wpp); if ( w != stream->writers.end() && - w->second->instantiating_filter != filter->name ) + CheckFilterWriterConflict(w->second, filter) ) { - // Auto-correct path due to conflict with another filter over the - // same writer/path pair + // Auto-correct path due to conflict over the writer/path pairs. string instantiator = w->second->instantiating_filter; string new_path; unsigned int i = 2; @@ -771,7 +794,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) wpp.second = new_path; w = stream->writers.find(wpp); } while ( w != stream->writers.end() && - w->second->instantiating_filter != filter->name ); + CheckFilterWriterConflict(w->second, filter) ); Unref(filter->path_val); filter->path_val = new StringVal(new_path.c_str()); @@ -824,8 +847,8 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) // CreateWriter() will set the other fields in info. writer = CreateWriter(stream->id, filter->writer, - info, filter->num_fields, - arg_fields, filter->local, filter->remote, filter->name); + info, filter->num_fields, arg_fields, filter->local, + filter->remote, false, filter->name); if ( ! writer ) { @@ -1024,7 +1047,7 @@ threading::Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, } WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, WriterBackend::WriterInfo* info, - int num_fields, const threading::Field* const* fields, bool local, bool remote, + int num_fields, const threading::Field* const* fields, bool local, bool remote, bool from_remote, const string& instantiating_filter) { Stream* stream = FindStream(id); @@ -1049,6 +1072,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, WriterBacken winfo->interval = 0; winfo->postprocessor = 0; winfo->info = info; + winfo->from_remote = from_remote; winfo->instantiating_filter = instantiating_filter; // Search for a corresponding filter for the writer/path pair and use its diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 864a23ca88..90ad944bc6 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -166,7 +166,7 @@ protected: // Takes ownership of fields and info. WriterFrontend* CreateWriter(EnumVal* id, EnumVal* writer, WriterBackend::WriterInfo* info, int num_fields, const threading::Field* const* fields, - bool local, bool remote, const string& instantiating_filter=""); + bool local, bool remote, bool from_remote, const string& instantiating_filter=""); // Takes ownership of values.. bool Write(EnumVal* id, EnumVal* writer, string path, @@ -200,6 +200,8 @@ private: void Rotate(WriterInfo* info); Filter* FindFilter(EnumVal* id, StringVal* filter); WriterInfo* FindWriter(WriterFrontend* writer); + bool CompareFields(const Filter* filter, const WriterFrontend* writer); + bool CheckFilterWriterConflict(const WriterInfo* winfo, const Filter* filter); vector streams; // Indexed by stream enum. int rotations_pending; // Number of rotations not yet finished. From e3acf3af58979b1d0a42c5eb6ae45edc8f208188 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 30 Jul 2012 11:59:53 -0700 Subject: [PATCH 558/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aux/binpac b/aux/binpac index 4f01ea4081..99e7a27431 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 4f01ea40817ad232a96535c64fce7dc16d4e2fff +Subproject commit 99e7a274319619a94a421eb62537c7a5c184f71b diff --git a/aux/broccoli b/aux/broccoli index 8234b8903c..b3692a02ba 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 8234b8903cbc775f341bdb6a1c0159981d88d27b +Subproject commit b3692a02bae9a47d701d2d547e327dd429a86e76 diff --git a/aux/broctl b/aux/broctl index 231358f166..5c9ed0d77b 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 231358f166f61cc32201a8ac3671ea0c0f5c324e +Subproject commit 5c9ed0d77bcb3e03d7e68334fe0d088fa2f92c71 From 01d91602ca60a0e2fc868b350c5170a9dd8452ce Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 30 Jul 2012 12:00:14 -0700 Subject: [PATCH 559/651] Updating CHANGES and VERSION. --- CHANGES | 2 +- NEWS | 3 ++- VERSION | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 8b0303d520..5267fa9f37 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ -2.0-909 | 2012-07-30 11:46:45 -0700 +2.1-beta | 2012-07-30 11:59:53 -0700 * Improve log filter compatibility with remote logging. Addresses #842. (Jon Siwek) diff --git a/NEWS b/NEWS index 7b60a05ccd..949b51d832 100644 --- a/NEWS +++ b/NEWS @@ -82,7 +82,8 @@ New Functionality * ElasticSearch: a distributed RESTful, storage engine and search engine built on top of Apache Lucene. It scales very well, both - for distributed indexing and distributed searching. + for distributed indexing and distributed searching. See + doc/logging-elasticsearch.rst for more information. Note that at this point, we consider Bro's support for these two formats as prototypes for collecting experience with alternative diff --git a/VERSION b/VERSION index 08cd7ce835..0fb956a360 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-909 +2.1-beta From 3bb6d4e54e6883cd9d64812d11aa9d2be9ed4fb4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 1 Aug 2012 13:58:18 -0500 Subject: [PATCH 560/651] Fix configure script to exit with non-zero status on error --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- configure | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aux/binpac b/aux/binpac index 99e7a27431..22120825f8 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 99e7a274319619a94a421eb62537c7a5c184f71b +Subproject commit 22120825f8ad70e051ef4ca42f2199aa195dff40 diff --git a/aux/bro-aux b/aux/bro-aux index c691c01e9c..941ee753f7 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit c691c01e9cefae5a79bcd4b0f84ca387c8c587a7 +Subproject commit 941ee753f7c71ec08fc29de04f09a8a83aebb69d diff --git a/aux/broccoli b/aux/broccoli index b3692a02ba..5ff3e6a8e8 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit b3692a02bae9a47d701d2d547e327dd429a86e76 +Subproject commit 5ff3e6a8e8535ed91e1f70d355b815ae8eeacb71 diff --git a/aux/broctl b/aux/broctl index 5c9ed0d77b..903108f6b4 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 5c9ed0d77bcb3e03d7e68334fe0d088fa2f92c71 +Subproject commit 903108f6b43ad228309713da880026d50add41f4 diff --git a/configure b/configure index bfe54123f0..b4ca606103 100755 --- a/configure +++ b/configure @@ -1,7 +1,7 @@ #!/bin/sh # Convenience wrapper for easily viewing/setting options that # the project's CMake scripts will recognize - +set -e command="$0 $*" # check for `cmake` command From 9829cf9a296b4f1c6614658b80225cd80f2e24ec Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 3 Aug 2012 10:44:46 -0700 Subject: [PATCH 561/651] Fixing little typo with big impact. --- CHANGES | 5 +++++ VERSION | 2 +- src/logging/writers/Ascii.cc | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 5267fa9f37..644a56d458 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,9 @@ +2.0-912 | 2012-08-03 10:44:46 -0700 + + * Fixing little typo with big impact. (Robin Sommer) + + 2.1-beta | 2012-07-30 11:59:53 -0700 * Improve log filter compatibility with remote logging. Addresses diff --git a/VERSION b/VERSION index 0fb956a360..f1cb181c5c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1-beta +2.0-912 diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index f6df3b9336..11b322f5a3 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -359,7 +359,7 @@ bool Ascii::DoWrite(int num_fields, const Field* const * fields, if ( ! safe_write(fd, bytes, len) ) goto write_error; - if ( IsBuf() ) + if ( ! IsBuf() ) fsync(fd); return true; From 10b671a6389ab0720a18ec6fb32be6e03ba6fa0b Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 3 Aug 2012 17:24:04 -0500 Subject: [PATCH 562/651] Add tests for untested BIFs --- testing/btest/Baseline/bifs.analyzer_name/out | 1 + testing/btest/Baseline/bifs.entropy_test/out | 2 ++ testing/btest/Baseline/bifs.global_sizes/out | 1 + testing/btest/Baseline/bifs.identify_data/out | 4 ++++ testing/btest/Baseline/bifs.strftime/out | 4 ++++ testing/btest/bifs/analyzer_name.bro | 9 +++++++ testing/btest/bifs/bro_version.bro | 9 +++++++ testing/btest/bifs/checkpoint_state.bro | 10 ++++++++ testing/btest/bifs/current_analyzer.bro | 11 +++++++++ testing/btest/bifs/current_time.bro | 9 +++++++ testing/btest/bifs/entropy_test.bro | 24 +++++++++++++++++++ testing/btest/bifs/gethostname.bro | 9 +++++++ testing/btest/bifs/getpid.bro | 9 +++++++ testing/btest/bifs/global_sizes.bro | 16 +++++++++++++ testing/btest/bifs/identify_data.bro | 16 +++++++++++++ testing/btest/bifs/resource_usage.bro | 9 +++++++ testing/btest/bifs/strftime.bro | 17 +++++++++++++ 17 files changed, 160 insertions(+) create mode 100644 testing/btest/Baseline/bifs.analyzer_name/out create mode 100644 testing/btest/Baseline/bifs.entropy_test/out create mode 100644 testing/btest/Baseline/bifs.global_sizes/out create mode 100644 testing/btest/Baseline/bifs.identify_data/out create mode 100644 testing/btest/Baseline/bifs.strftime/out create mode 100644 testing/btest/bifs/analyzer_name.bro create mode 100644 testing/btest/bifs/bro_version.bro create mode 100644 testing/btest/bifs/checkpoint_state.bro create mode 100644 testing/btest/bifs/current_analyzer.bro create mode 100644 testing/btest/bifs/current_time.bro create mode 100644 testing/btest/bifs/entropy_test.bro create mode 100644 testing/btest/bifs/gethostname.bro create mode 100644 testing/btest/bifs/getpid.bro create mode 100644 testing/btest/bifs/global_sizes.bro create mode 100644 testing/btest/bifs/identify_data.bro create mode 100644 testing/btest/bifs/resource_usage.bro create mode 100644 testing/btest/bifs/strftime.bro diff --git a/testing/btest/Baseline/bifs.analyzer_name/out b/testing/btest/Baseline/bifs.analyzer_name/out new file mode 100644 index 0000000000..84613e9dd1 --- /dev/null +++ b/testing/btest/Baseline/bifs.analyzer_name/out @@ -0,0 +1 @@ +PIA_TCP diff --git a/testing/btest/Baseline/bifs.entropy_test/out b/testing/btest/Baseline/bifs.entropy_test/out new file mode 100644 index 0000000000..08a09de4e4 --- /dev/null +++ b/testing/btest/Baseline/bifs.entropy_test/out @@ -0,0 +1,2 @@ +[entropy=4.715374, chi_square=591.981818, mean=75.472727, monte_carlo_pi=4.0, serial_correlation=-0.11027] +[entropy=2.083189, chi_square=3906.018182, mean=69.054545, monte_carlo_pi=4.0, serial_correlation=0.849402] diff --git a/testing/btest/Baseline/bifs.global_sizes/out b/testing/btest/Baseline/bifs.global_sizes/out new file mode 100644 index 0000000000..76c40b297a --- /dev/null +++ b/testing/btest/Baseline/bifs.global_sizes/out @@ -0,0 +1 @@ +found bro_init diff --git a/testing/btest/Baseline/bifs.identify_data/out b/testing/btest/Baseline/bifs.identify_data/out new file mode 100644 index 0000000000..a2872877f9 --- /dev/null +++ b/testing/btest/Baseline/bifs.identify_data/out @@ -0,0 +1,4 @@ +ASCII text, with no line terminators +text/plain; charset=us-ascii +PNG image data +image/png; charset=binary diff --git a/testing/btest/Baseline/bifs.strftime/out b/testing/btest/Baseline/bifs.strftime/out new file mode 100644 index 0000000000..b32393b332 --- /dev/null +++ b/testing/btest/Baseline/bifs.strftime/out @@ -0,0 +1,4 @@ +1970-01-01 00:00:00 +000000 19700101 +1973-11-29 21:33:09 +213309 19731129 diff --git a/testing/btest/bifs/analyzer_name.bro b/testing/btest/bifs/analyzer_name.bro new file mode 100644 index 0000000000..034344f5c4 --- /dev/null +++ b/testing/btest/bifs/analyzer_name.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = 1; + print analyzer_name(a); + } diff --git a/testing/btest/bifs/bro_version.bro b/testing/btest/bifs/bro_version.bro new file mode 100644 index 0000000000..7465cbc0f5 --- /dev/null +++ b/testing/btest/bifs/bro_version.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT + +event bro_init() + { + local a = bro_version(); + if ( |a| == 0 ) + exit(1); + } diff --git a/testing/btest/bifs/checkpoint_state.bro b/testing/btest/bifs/checkpoint_state.bro new file mode 100644 index 0000000000..2a66bd1729 --- /dev/null +++ b/testing/btest/bifs/checkpoint_state.bro @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: test -f .state/state.bst + +event bro_init() + { + local a = checkpoint_state(); + if ( a != T ) + exit(1); + } diff --git a/testing/btest/bifs/current_analyzer.bro b/testing/btest/bifs/current_analyzer.bro new file mode 100644 index 0000000000..45b495c046 --- /dev/null +++ b/testing/btest/bifs/current_analyzer.bro @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: bro %INPUT + +event bro_init() + { + local a = current_analyzer(); + if ( a != 0 ) + exit(1); + + # TODO: add a test for non-zero return value + } diff --git a/testing/btest/bifs/current_time.bro b/testing/btest/bifs/current_time.bro new file mode 100644 index 0000000000..5d16df396d --- /dev/null +++ b/testing/btest/bifs/current_time.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT + +event bro_init() + { + local a = current_time(); + if ( a <= double_to_time(0) ) + exit(1); + } diff --git a/testing/btest/bifs/entropy_test.bro b/testing/btest/bifs/entropy_test.bro new file mode 100644 index 0000000000..ca01c79ed7 --- /dev/null +++ b/testing/btest/bifs/entropy_test.bro @@ -0,0 +1,24 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"; + if ( entropy_test_init(1) != T ) + exit(1); + + if ( entropy_test_add(1, a) != T ) + exit(1); + + print entropy_test_finish(1); + + local b = "0011000aaabbbbcccc000011111000000000aaaabbbbcccc0000000"; + if ( entropy_test_init(2) != T ) + exit(1); + + if ( entropy_test_add(2, b) != T ) + exit(1); + + print entropy_test_finish(2); + } diff --git a/testing/btest/bifs/gethostname.bro b/testing/btest/bifs/gethostname.bro new file mode 100644 index 0000000000..97af719745 --- /dev/null +++ b/testing/btest/bifs/gethostname.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT + +event bro_init() + { + local a = gethostname(); + if ( |a| == 0 ) + exit(1); + } diff --git a/testing/btest/bifs/getpid.bro b/testing/btest/bifs/getpid.bro new file mode 100644 index 0000000000..98edc19a44 --- /dev/null +++ b/testing/btest/bifs/getpid.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT + +event bro_init() + { + local a = getpid(); + if ( a == 0 ) + exit(1); + } diff --git a/testing/btest/bifs/global_sizes.bro b/testing/btest/bifs/global_sizes.bro new file mode 100644 index 0000000000..4862db318b --- /dev/null +++ b/testing/btest/bifs/global_sizes.bro @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local a = global_sizes(); + for ( i in a ) + { + # the table is quite large, so just look for one item we expect + if ( i == "bro_init" ) + print "found bro_init"; + + } + + } diff --git a/testing/btest/bifs/identify_data.bro b/testing/btest/bifs/identify_data.bro new file mode 100644 index 0000000000..11824b5e85 --- /dev/null +++ b/testing/btest/bifs/identify_data.bro @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + # plain text + local a = "This is a test"; + print identify_data(a, F); + print identify_data(a, T); + + # PNG image + local b = "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"; + print identify_data(b, F); + print identify_data(b, T); + } diff --git a/testing/btest/bifs/resource_usage.bro b/testing/btest/bifs/resource_usage.bro new file mode 100644 index 0000000000..35f5b020d6 --- /dev/null +++ b/testing/btest/bifs/resource_usage.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT + +event bro_init() + { + local a = resource_usage(); + if ( a$version != bro_version() ) + exit(1); + } diff --git a/testing/btest/bifs/strftime.bro b/testing/btest/bifs/strftime.bro new file mode 100644 index 0000000000..31f9538632 --- /dev/null +++ b/testing/btest/bifs/strftime.bro @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + local f1 = "%Y-%m-%d %H:%M:%S"; + local f2 = "%H%M%S %Y%m%d"; + + local a = double_to_time(0); + print strftime(f1, a); + print strftime(f2, a); + + a = double_to_time(123456789); + print strftime(f1, a); + print strftime(f2, a); + } From 18550ab009852059ecacc98b8035fc370a5e8fee Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sat, 4 Aug 2012 22:24:44 -0700 Subject: [PATCH 563/651] small bug in test script. Still worked, because the internal type checking let this through... --- testing/btest/scripts/base/frameworks/input/predicate.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro index 2cda6f5fb9..fcd986c9a6 100644 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ b/testing/btest/scripts/base/frameworks/input/predicate.bro @@ -35,7 +35,7 @@ type Val: record { b: bool; }; -global servers: table[int] of Val = table(); +global servers: table[int] of bool = table(); event bro_init() { From a2b5028b58dee3dfd2759235a65a7c829ca40555 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sat, 4 Aug 2012 22:38:26 -0700 Subject: [PATCH 564/651] fix little sneaky bug in input framework with an edge case. An assertion would trigger in the case when a predicate refuses a new entry and another entry with the same index elements was already in the table. (I thought that code block was unreachable ... did not think of this case). --- src/input/Manager.cc | 4 +- .../out | 3 + .../input/predicaterefusesecondsamerecord.bro | 56 +++++++++++++++++++ 3 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.predicaterefusesecondsamerecord/out create mode 100644 testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 64e54f9333..3c29f14928 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1044,9 +1044,7 @@ int Manager::SendEntryTable(Stream* i, const Value* const *vals) if ( ! updated ) { - // throw away. Hence - we quit. And remove the entry from the current dictionary... - // (but why should it be in there? assert this). - assert ( stream->currDict->RemoveEntry(idxhash) == 0 ); + // just quit and delete everything we created. delete idxhash; delete h; return stream->num_val_fields + stream->num_idx_fields; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.predicaterefusesecondsamerecord/out b/testing/btest/Baseline/scripts.base.frameworks.input.predicaterefusesecondsamerecord/out new file mode 100644 index 0000000000..f752ff451a --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.predicaterefusesecondsamerecord/out @@ -0,0 +1,3 @@ +{ +[1.228.83.33] = [asn=9318 HANARO-AS Hanaro Telecom Inc., severity=medium, confidence=95, detecttime=1342569600.0] +} diff --git a/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.bro b/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.bro new file mode 100644 index 0000000000..d572b30090 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.bro @@ -0,0 +1,56 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +# Ok, this one tests a fun case. +# Input file contains two lines mapping to the same index, but with different values, +# where the predicate accepts the first one and refuses the second one. +# Desired result -> first entry stays. + +@TEST-START-FILE input.log +#fields restriction guid severity confidence detecttime address protocol portlist asn prefix rir cc impact description alternativeid_restriction alternativeid +need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 65 1342656000 1.0.17.227 - - 2519 VECTANT VECTANT Ltd. 1.0.16.0/23 apnic JP spam infrastructure spamming public http://reputation.alienvault.com/reputation.generic +need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 95 1342569600 1.228.83.33 6 25 9318 HANARO-AS Hanaro Telecom Inc. 1.224.0.0/13 apnic KR spam infrastructure direct ube sources, spam operations & spam services public http://www.spamhaus.org/query/bl?ip=1.228.83.33 +need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 65 1342656000 1.228.83.33 - - 9318 HANARO-AS Hanaro Telecom Inc. 1.224.0.0/13 apnic KR spam infrastructure spamming;malware domain public http://reputation.alienvault.com/reputation.generic +@TEST-END-FILE + +@load frameworks/communication/listen + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + address: addr; +}; + +type Val: record { + asn: string; + severity: string; + confidence: count; + detecttime: time; +}; + +global servers: table[addr] of Val = table(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { if ( right$confidence > 90 ) { return T; } return F; } + ]); + Input::remove("input"); + } + +event Input::update_finished(name: string, source: string) + { + print outfile, servers; + close(outfile); + terminate(); + } From bda8631f32d366128622fb474567573d54184d8f Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 7 Aug 2012 14:10:55 -0500 Subject: [PATCH 565/651] Add more BIF tests --- testing/btest/Baseline/bifs.capture_state_updates/out | 1 + testing/btest/Baseline/bifs.is_local_interface/out | 4 ++++ testing/btest/Baseline/bifs.reading_traces/out1 | 1 + testing/btest/Baseline/bifs.reading_traces/out2 | 1 + testing/btest/bifs/capture_state_updates.bro | 9 +++++++++ testing/btest/bifs/get_matcher_stats.bro | 9 +++++++++ testing/btest/bifs/is_local_interface.bro | 11 +++++++++++ testing/btest/bifs/reading_traces.bro | 10 ++++++++++ 8 files changed, 46 insertions(+) create mode 100644 testing/btest/Baseline/bifs.capture_state_updates/out create mode 100644 testing/btest/Baseline/bifs.is_local_interface/out create mode 100644 testing/btest/Baseline/bifs.reading_traces/out1 create mode 100644 testing/btest/Baseline/bifs.reading_traces/out2 create mode 100644 testing/btest/bifs/capture_state_updates.bro create mode 100644 testing/btest/bifs/get_matcher_stats.bro create mode 100644 testing/btest/bifs/is_local_interface.bro create mode 100644 testing/btest/bifs/reading_traces.bro diff --git a/testing/btest/Baseline/bifs.capture_state_updates/out b/testing/btest/Baseline/bifs.capture_state_updates/out new file mode 100644 index 0000000000..62a6e3c9df --- /dev/null +++ b/testing/btest/Baseline/bifs.capture_state_updates/out @@ -0,0 +1 @@ +T diff --git a/testing/btest/Baseline/bifs.is_local_interface/out b/testing/btest/Baseline/bifs.is_local_interface/out new file mode 100644 index 0000000000..328bff6687 --- /dev/null +++ b/testing/btest/Baseline/bifs.is_local_interface/out @@ -0,0 +1,4 @@ +T +F +F +T diff --git a/testing/btest/Baseline/bifs.reading_traces/out1 b/testing/btest/Baseline/bifs.reading_traces/out1 new file mode 100644 index 0000000000..cf84443e49 --- /dev/null +++ b/testing/btest/Baseline/bifs.reading_traces/out1 @@ -0,0 +1 @@ +F diff --git a/testing/btest/Baseline/bifs.reading_traces/out2 b/testing/btest/Baseline/bifs.reading_traces/out2 new file mode 100644 index 0000000000..62a6e3c9df --- /dev/null +++ b/testing/btest/Baseline/bifs.reading_traces/out2 @@ -0,0 +1 @@ +T diff --git a/testing/btest/bifs/capture_state_updates.bro b/testing/btest/bifs/capture_state_updates.bro new file mode 100644 index 0000000000..3abfdffdc1 --- /dev/null +++ b/testing/btest/bifs/capture_state_updates.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: test -f testfile + +event bro_init() + { + print capture_state_updates("testfile"); + } diff --git a/testing/btest/bifs/get_matcher_stats.bro b/testing/btest/bifs/get_matcher_stats.bro new file mode 100644 index 0000000000..baee49fe1e --- /dev/null +++ b/testing/btest/bifs/get_matcher_stats.bro @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: bro %INPUT + +event bro_init() + { + local a = get_matcher_stats(); + if ( a$matchers == 0 ) + exit(1); + } diff --git a/testing/btest/bifs/is_local_interface.bro b/testing/btest/bifs/is_local_interface.bro new file mode 100644 index 0000000000..8befdca385 --- /dev/null +++ b/testing/btest/bifs/is_local_interface.bro @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +event bro_init() + { + print is_local_interface(127.0.0.1); + print is_local_interface(1.2.3.4); + print is_local_interface([2607::a:b:c:d]); + print is_local_interface([::1]); + } diff --git a/testing/btest/bifs/reading_traces.bro b/testing/btest/bifs/reading_traces.bro new file mode 100644 index 0000000000..fc83c50ccb --- /dev/null +++ b/testing/btest/bifs/reading_traces.bro @@ -0,0 +1,10 @@ + +# @TEST-EXEC: bro %INPUT >out1 +# @TEST-EXEC: btest-diff out1 +# @TEST-EXEC: bro -r $TRACES/web.trace %INPUT >out2 +# @TEST-EXEC: btest-diff out2 + +event bro_init() + { + print reading_traces(); + } From 7c6b891b633c0c26298803d19b882b02f4a6f526 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 9 Aug 2012 13:46:58 -0400 Subject: [PATCH 566/651] Small improvements for printing reporter messages to STDERR. --- scripts/base/frameworks/reporter/main.bro | 27 +++++++++++++++++------ 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/scripts/base/frameworks/reporter/main.bro b/scripts/base/frameworks/reporter/main.bro index 8b45819442..0248b82d10 100644 --- a/scripts/base/frameworks/reporter/main.bro +++ b/scripts/base/frameworks/reporter/main.bro @@ -37,15 +37,15 @@ export { location: string &log &optional; }; - ## Send reporter error messages to STDERR by default. The option to + ## Tunable for sending reporter warning messages to STDERR. The option to + ## turn it off is presented here in case Bro is being run by some + ## external harness and shouldn't output anything to the console. + const warnings_to_stderr = T &redef; + + ## Tunable for sending reporter error messages to STDERR. The option to ## turn it off is presented here in case Bro is being run by some ## external harness and shouldn't output anything to the console. const errors_to_stderr = T &redef; - - ## Send reporter warning messages to STDERR by default. The option to - ## turn it off is presented here in case Bro is being run by some - ## external harness and shouldn't output anything to the console. - const warnings_to_stderr = T &redef; } global stderr: file; @@ -65,13 +65,26 @@ event reporter_info(t: time, msg: string, location: string) &priority=-5 event reporter_warning(t: time, msg: string, location: string) &priority=-5 { + if ( warnings_to_stderr ) + { + if ( t > double_to_time(0.0) ) + print stderr, fmt("WARNING: %.6f %s (%s)", t, msg, location); + else + print stderr, fmt("WARNING: %s (%s)", msg, location); + } + Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]); } event reporter_error(t: time, msg: string, location: string) &priority=-5 { if ( errors_to_stderr ) - print stderr, fmt("ERROR: %s", msg); + { + if ( t > double_to_time(0.0) ) + print stderr, fmt("ERROR: %.6f %s (%s)", t, msg, location); + else + print stderr, fmt("ERROR: %s (%s)", msg, location); + } Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]); } From cfe1402281eeb5fc935485f5e8c8082395820c29 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 9 Aug 2012 14:48:46 -0400 Subject: [PATCH 567/651] A couple of tests for printing reporter messages to STDERR. --- .../.stderr | 0 .../reporter.log | 8 ++++++++ .../scripts.base.frameworks.reporter.stderr/.stderr | 1 + .../reporter.log | 8 ++++++++ .../base/frameworks/reporter/disable-stderr.bro | 13 +++++++++++++ .../scripts/base/frameworks/reporter/stderr.bro | 10 ++++++++++ 6 files changed, 40 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/.stderr create mode 100644 testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log create mode 100644 testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr create mode 100644 testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log create mode 100644 testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro create mode 100644 testing/btest/scripts/base/frameworks/reporter/stderr.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/.stderr b/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/.stderr new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log new file mode 100644 index 0000000000..5c6e795074 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path reporter +#fields ts level message location +#types time enum string string +0.000000 Reporter::ERROR no such index (test[3]) /blah/testing/btest/.tmp/scripts.base.frameworks.reporter.disable-stderr/disable-stderr.bro, line 12 diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr new file mode 100644 index 0000000000..78af1e7a73 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr @@ -0,0 +1 @@ +ERROR: no such index (test[3]) (/blah/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.bro, line 9) diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log new file mode 100644 index 0000000000..4a00bb95ad --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path reporter +#fields ts level message location +#types time enum string string +0.000000 Reporter::ERROR no such index (test[3]) /blah/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.bro, line 9 diff --git a/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro b/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro new file mode 100644 index 0000000000..438e24d80b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro @@ -0,0 +1,13 @@ +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff reporter.log + +redef Reporter::warnings_to_stderr = F; +redef Reporter::errors_to_stderr = F; + +global test: table[count] of string = {}; + +event bro_init() + { + print test[3]; + } \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/reporter/stderr.bro b/testing/btest/scripts/base/frameworks/reporter/stderr.bro new file mode 100644 index 0000000000..7ea748d94f --- /dev/null +++ b/testing/btest/scripts/base/frameworks/reporter/stderr.bro @@ -0,0 +1,10 @@ +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff reporter.log + +global test: table[count] of string = {}; + +event bro_init() + { + print test[3]; + } \ No newline at end of file From 38912c182c0d6d051b3040fb1a206f102b65966e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 10 Aug 2012 12:33:45 -0700 Subject: [PATCH 568/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aux/binpac b/aux/binpac index 22120825f8..99e7a27431 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 22120825f8ad70e051ef4ca42f2199aa195dff40 +Subproject commit 99e7a274319619a94a421eb62537c7a5c184f71b diff --git a/aux/bro-aux b/aux/bro-aux index 941ee753f7..c691c01e9c 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 941ee753f7c71ec08fc29de04f09a8a83aebb69d +Subproject commit c691c01e9cefae5a79bcd4b0f84ca387c8c587a7 diff --git a/aux/broccoli b/aux/broccoli index 5ff3e6a8e8..b3692a02ba 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 5ff3e6a8e8535ed91e1f70d355b815ae8eeacb71 +Subproject commit b3692a02bae9a47d701d2d547e327dd429a86e76 diff --git a/aux/broctl b/aux/broctl index 903108f6b4..84428286a1 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 903108f6b43ad228309713da880026d50add41f4 +Subproject commit 84428286a1980e21cafc4e066d95bf58f82a92b8 From d1c78d030045569fa1205e73a52aafe8483e9409 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 10 Aug 2012 13:10:24 -0700 Subject: [PATCH 569/651] Updating baselines. --- .../btest/Baseline/core.reporter-error-in-handler/output | 3 ++- testing/btest/Baseline/core.reporter-runtime-error/output | 2 +- testing/btest/Baseline/core.reporter/output | 7 ++++--- .../reporter.log | 4 +++- .../scripts.base.frameworks.reporter.stderr/reporter.log | 4 +++- .../scripts/base/frameworks/reporter/disable-stderr.bro | 4 ++-- testing/btest/scripts/base/frameworks/reporter/stderr.bro | 4 ++-- 7 files changed, 17 insertions(+), 11 deletions(-) diff --git a/testing/btest/Baseline/core.reporter-error-in-handler/output b/testing/btest/Baseline/core.reporter-error-in-handler/output index 83b310ab61..190631f4d1 100644 --- a/testing/btest/Baseline/core.reporter-error-in-handler/output +++ b/testing/btest/Baseline/core.reporter-error-in-handler/output @@ -1,2 +1,3 @@ -error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 22: no such index (a[2]) +ERROR: no such index (a[1]) (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 28) + 1st error printed on script level diff --git a/testing/btest/Baseline/core.reporter-runtime-error/output b/testing/btest/Baseline/core.reporter-runtime-error/output index 59bcc3ac9b..94f7860cb4 100644 --- a/testing/btest/Baseline/core.reporter-runtime-error/output +++ b/testing/btest/Baseline/core.reporter-runtime-error/output @@ -1 +1 @@ -error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 12: no such index (a[1]) +ERROR: no such index (a[2]) (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 9) diff --git a/testing/btest/Baseline/core.reporter/output b/testing/btest/Baseline/core.reporter/output index 2735adc931..b4f89bad2f 100644 --- a/testing/btest/Baseline/core.reporter/output +++ b/testing/btest/Baseline/core.reporter/output @@ -1,3 +1,4 @@ -/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 52: pre test-info -warning in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 53: pre test-warning -error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 54: pre test-error +WARNING: init test-warning (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 9) +ERROR: init test-error (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 10) +WARNING: done test-warning (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 16) +ERROR: done test-error (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 17) diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log index 5c6e795074..144c094b2f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path reporter +#open 2012-08-10-20-09-16 #fields ts level message location #types time enum string string -0.000000 Reporter::ERROR no such index (test[3]) /blah/testing/btest/.tmp/scripts.base.frameworks.reporter.disable-stderr/disable-stderr.bro, line 12 +0.000000 Reporter::ERROR no such index (test[3]) /da/home/robin/bro/master/testing/btest/.tmp/scripts.base.frameworks.reporter.disable-stderr/disable-stderr.bro, line 12 +#close 2012-08-10-20-09-16 diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log index 4a00bb95ad..b314bc45c3 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log @@ -3,6 +3,8 @@ #empty_field (empty) #unset_field - #path reporter +#open 2012-08-10-20-09-23 #fields ts level message location #types time enum string string -0.000000 Reporter::ERROR no such index (test[3]) /blah/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.bro, line 9 +0.000000 Reporter::ERROR no such index (test[3]) /da/home/robin/bro/master/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.bro, line 9 +#close 2012-08-10-20-09-23 diff --git a/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro b/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro index 438e24d80b..b1afb99b5c 100644 --- a/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro +++ b/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro @@ -1,6 +1,6 @@ # @TEST-EXEC: bro %INPUT # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff reporter.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log redef Reporter::warnings_to_stderr = F; redef Reporter::errors_to_stderr = F; @@ -10,4 +10,4 @@ global test: table[count] of string = {}; event bro_init() { print test[3]; - } \ No newline at end of file + } diff --git a/testing/btest/scripts/base/frameworks/reporter/stderr.bro b/testing/btest/scripts/base/frameworks/reporter/stderr.bro index 7ea748d94f..ef01c9fdf9 100644 --- a/testing/btest/scripts/base/frameworks/reporter/stderr.bro +++ b/testing/btest/scripts/base/frameworks/reporter/stderr.bro @@ -1,10 +1,10 @@ # @TEST-EXEC: bro %INPUT # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff reporter.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log global test: table[count] of string = {}; event bro_init() { print test[3]; - } \ No newline at end of file + } From eee4fbf7ad2b8855f5d6a488b5a7b83bd75dfe9b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 10 Aug 2012 13:33:57 -0700 Subject: [PATCH 570/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aux/binpac b/aux/binpac index 99e7a27431..22120825f8 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 99e7a274319619a94a421eb62537c7a5c184f71b +Subproject commit 22120825f8ad70e051ef4ca42f2199aa195dff40 diff --git a/aux/bro-aux b/aux/bro-aux index c691c01e9c..941ee753f7 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit c691c01e9cefae5a79bcd4b0f84ca387c8c587a7 +Subproject commit 941ee753f7c71ec08fc29de04f09a8a83aebb69d diff --git a/aux/broccoli b/aux/broccoli index b3692a02ba..5ff3e6a8e8 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit b3692a02bae9a47d701d2d547e327dd429a86e76 +Subproject commit 5ff3e6a8e8535ed91e1f70d355b815ae8eeacb71 diff --git a/aux/broctl b/aux/broctl index 84428286a1..6d0eb6083a 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 84428286a1980e21cafc4e066d95bf58f82a92b8 +Subproject commit 6d0eb6083acdc77e0a912bec0fb23df79b98da63 From 205ad78369701a5e67260b421411a52b28c45440 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 14 Aug 2012 15:09:38 -0400 Subject: [PATCH 571/651] Fix some problems in logs-to-elasticsearch.bro --- scripts/policy/tuning/logs-to-elasticsearch.bro | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro index 207a9acc04..2a4b70362a 100644 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ b/scripts/policy/tuning/logs-to-elasticsearch.bro @@ -8,13 +8,13 @@ export { ## Optionally ignore any :bro:type:`Log::ID` from being sent to ## ElasticSearch with this script. - const excluded_log_ids: set[string] = set("Communication::LOG") &redef; + const excluded_log_ids: set[Log::ID] &redef; ## If you want to explicitly only send certain :bro:type:`Log::ID` ## streams, add them to this set. If the set remains empty, all will ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option will remain in ## effect as well. - const send_logs: set[string] = set() &redef; + const send_logs: set[Log::ID] &redef; } event bro_init() &priority=-5 @@ -24,8 +24,8 @@ event bro_init() &priority=-5 for ( stream_id in Log::active_streams ) { - if ( fmt("%s", stream_id) in excluded_log_ids || - (|send_logs| > 0 && fmt("%s", stream_id) !in send_logs) ) + if ( stream_id in excluded_log_ids || + (|send_logs| > 0 && stream_id !in send_logs) ) next; local filter: Log::Filter = [$name = "default-es", From b13196cbf194419836f9b7627aab5cab25c47397 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 16 Aug 2012 09:24:25 -0400 Subject: [PATCH 572/651] Fixed more potential problems with deadlocked ES threads and signals from libcurl. --- src/logging/writers/ElasticSearch.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc index e688686b35..cb3248a044 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/ElasticSearch.cc @@ -371,7 +371,11 @@ bool ElasticSearch::HTTPSend(CURL *handle) // The best (only?) way to disable that is to just use HTTP 1.0 curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); - //curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, transfer_timeout); + // Some timeout options. These will need more attention later. + curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1); + curl_easy_setopt(handle, CURLOPT_CONNECTTIMEOUT_MS, transfer_timeout); + curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, transfer_timeout*2); + curl_easy_setopt(handle, CURLOPT_DNS_CACHE_TIMEOUT, 60*60); CURLcode return_code = curl_easy_perform(handle); From 4da209d3b1fe6fa9c5118e055752843b2fb73a45 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 16 Aug 2012 11:48:56 -0700 Subject: [PATCH 573/651] Installing a handler for running out of memory in "new". Bro will now print an error message in that case rather than abort with an uncaught exception. --- CHANGES | 6 ++++++ VERSION | 2 +- src/main.cc | 4 ++++ src/util.cc | 8 +++++++- 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 08998ab9f4..f0c73ce8d9 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,10 @@ +2.1-beta-21 | 2012-08-16 11:48:56 -0700 + + * Installing a handler for running out of memory in "new". Bro will + now print an error message in that case rather than abort with an + uncaught exception. (Robin Sommer) + 2.1-beta-20 | 2012-08-16 11:43:31 -0700 * Fixed potential problems with ElasticSearch output plugin. (Seth diff --git a/VERSION b/VERSION index c42c76c8ba..5d7a2a2cce 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1-beta-20 +2.1-beta-21 diff --git a/src/main.cc b/src/main.cc index 407f67c9af..5999186240 100644 --- a/src/main.cc +++ b/src/main.cc @@ -337,6 +337,8 @@ void terminate_bro() delete log_mgr; delete thread_mgr; delete reporter; + + reporter = 0; } void termination_signal() @@ -380,6 +382,8 @@ static void bro_new_handler() int main(int argc, char** argv) { + std::set_new_handler(bro_new_handler); + brofiler.ReadStats(); bro_argc = argc; diff --git a/src/util.cc b/src/util.cc index 2d981e952e..3b6fcac76f 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1383,7 +1383,13 @@ void safe_close(int fd) void out_of_memory(const char* where) { - reporter->FatalError("out of memory in %s.\n", where); + fprintf(stderr, "out of memory in %s.\n", where); + + if ( reporter ) + // Guess that might fail here if memory is really tight ... + reporter->FatalError("out of memory in %s.\n", where); + + abort(); } void get_memory_usage(unsigned int* total, unsigned int* malloced) From a6f7fd9c874ffdab31c3c79c9956857617b723d5 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 16 Aug 2012 15:59:26 -0500 Subject: [PATCH 574/651] Fix memory leak of serialized IDs when compiled with --enable-debug. When using --enable-debug, values keep track of the last identifier to which they were bound by storing a ref'd ID pointer. This could lead to some circular dependencies in which an ID is never reclaimed because the Val is bound to the ID and the ID is bound to the Val, with both holding references to each other. There might be more cases where this feature of --enable-debug caused a leak, but it showed up in particular when running the core.leaks.remote unit test due to the internal SendID("peer_description") call during the handshake between remote processes. Other tests showed the send_id() BIF leaked more generally. Tracking the ID last bound to a Val through just the identifier string instead of a ref'd ID pointer fixes the leak. --- src/RemoteSerializer.cc | 5 ----- src/Val.cc | 2 +- src/Val.h | 16 +++++++++------- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index cfd20eba39..564ad2be68 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -2897,11 +2897,6 @@ void RemoteSerializer::GotID(ID* id, Val* val) (desc && *desc) ? desc : "not set"), current_peer); -#ifdef USE_PERFTOOLS_DEBUG - // May still be cached, but we don't care. - heap_checker->IgnoreObject(id); -#endif - Unref(id); return; } diff --git a/src/Val.cc b/src/Val.cc index 8a8c2b18c0..79fa8a0c69 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -64,7 +64,7 @@ Val::~Val() Unref(type); #ifdef DEBUG - Unref(bound_id); + delete [] bound_id; #endif } diff --git a/src/Val.h b/src/Val.h index 2ca18e6131..c3ec5b04fb 100644 --- a/src/Val.h +++ b/src/Val.h @@ -347,13 +347,15 @@ public: #ifdef DEBUG // For debugging, we keep a reference to the global ID to which a // value has been bound *last*. - ID* GetID() const { return bound_id; } + ID* GetID() const + { + return bound_id ? global_scope()->Lookup(bound_id) : 0; + } + void SetID(ID* id) { - if ( bound_id ) - ::Unref(bound_id); - bound_id = id; - ::Ref(bound_id); + delete [] bound_id; + bound_id = id ? copy_string(id->Name()) : 0; } #endif @@ -401,8 +403,8 @@ protected: RecordVal* attribs; #ifdef DEBUG - // For debugging, we keep the ID to which a Val is bound. - ID* bound_id; + // For debugging, we keep the name of the ID to which a Val is bound. + const char* bound_id; #endif }; From 508ac1c7ba1b9fbddc128a109b51bd6376ba4bd9 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 16 Aug 2012 16:33:46 -0500 Subject: [PATCH 575/651] Unit test tweaks/fixes. - Some baselines for tests in "leaks" group were outdated. - Changed a few of the cluster/communication tests to terminate more explicitly instead of relying on btest-bg-wait to kill processes. This makes the tests finish faster in the success case and makes the reason for failing clearer in the that case. --- .../manager-1.metrics.log | 8 +++-- .../core.leaks.remote/sender.test.failure.log | 8 +++-- .../core.leaks.remote/sender.test.log | 12 ++++--- .../core.leaks.remote/sender.test.success.log | 6 ++-- testing/btest/core/leaks/basic-cluster.bro | 23 +++++++++++++- testing/btest/core/leaks/remote.bro | 31 ++++++++++++++----- .../base/frameworks/logging/remote.bro | 23 +++++++++++--- .../base/frameworks/metrics/basic-cluster.bro | 23 +++++++++++++- .../metrics/cluster-intermediate-update.bro | 17 +++++++++- 9 files changed, 122 insertions(+), 29 deletions(-) diff --git a/testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log b/testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log index 42fcd6a526..cb1bd5af01 100644 --- a/testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log +++ b/testing/btest/Baseline/core.leaks.basic-cluster/manager-1.metrics.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path metrics +#open 2012-07-20-01-50-41 #fields ts metric_id filter_name index.host index.str index.network value #types time enum string addr string subnet count -1331256494.591966 TEST_METRIC foo-bar 6.5.4.3 - - 4 -1331256494.591966 TEST_METRIC foo-bar 7.2.1.5 - - 2 -1331256494.591966 TEST_METRIC foo-bar 1.2.3.4 - - 6 +1342749041.601712 TEST_METRIC foo-bar 6.5.4.3 - - 4 +1342749041.601712 TEST_METRIC foo-bar 7.2.1.5 - - 2 +1342749041.601712 TEST_METRIC foo-bar 1.2.3.4 - - 6 +#close 2012-07-20-01-50-49 diff --git a/testing/btest/Baseline/core.leaks.remote/sender.test.failure.log b/testing/btest/Baseline/core.leaks.remote/sender.test.failure.log index 5a26f322f4..71e1d18c73 100644 --- a/testing/btest/Baseline/core.leaks.remote/sender.test.failure.log +++ b/testing/btest/Baseline/core.leaks.remote/sender.test.failure.log @@ -3,8 +3,10 @@ #empty_field (empty) #unset_field - #path test.failure +#open 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure US -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure UK -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure MX +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure US +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure UK +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure MX +#close 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/core.leaks.remote/sender.test.log b/testing/btest/Baseline/core.leaks.remote/sender.test.log index 9d2ba26f48..bc3dac5a1a 100644 --- a/testing/btest/Baseline/core.leaks.remote/sender.test.log +++ b/testing/btest/Baseline/core.leaks.remote/sender.test.log @@ -3,10 +3,12 @@ #empty_field (empty) #unset_field - #path test +#open 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success unknown -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure US -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure UK -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success BR -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 failure MX +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success unknown +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure US +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure UK +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success BR +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 failure MX +#close 2012-07-20-01-50-18 diff --git a/testing/btest/Baseline/core.leaks.remote/sender.test.success.log b/testing/btest/Baseline/core.leaks.remote/sender.test.success.log index 1b2ed452a0..f0b26454b4 100644 --- a/testing/btest/Baseline/core.leaks.remote/sender.test.success.log +++ b/testing/btest/Baseline/core.leaks.remote/sender.test.success.log @@ -3,7 +3,9 @@ #empty_field (empty) #unset_field - #path test.success +#open 2012-07-20-01-50-18 #fields t id.orig_h id.orig_p id.resp_h id.resp_p status country #types time addr port addr port string string -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success unknown -1331256472.375609 1.2.3.4 1234 2.3.4.5 80 success BR +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success unknown +1342749018.970682 1.2.3.4 1234 2.3.4.5 80 success BR +#close 2012-07-20-01-50-18 diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro index f5b40c1104..d9d2f97b1e 100644 --- a/testing/btest/core/leaks/basic-cluster.bro +++ b/testing/btest/core/leaks/basic-cluster.bro @@ -9,7 +9,7 @@ # @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT # @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT -# @TEST-EXEC: btest-bg-wait -k 30 +# @TEST-EXEC: btest-bg-wait 40 # @TEST-EXEC: btest-diff manager-1/metrics.log @TEST-START-FILE cluster-layout.bro @@ -40,3 +40,24 @@ event bro_init() &priority=5 Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); } } + +event remote_connection_closed(p: event_peer) + { + terminate(); + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global n = 0; + +event Metrics::log_metrics(rec: Metrics::Info) + { + n = n + 1; + if ( n == 3 ) + { + terminate_communication(); + terminate(); + } + } + +@endif diff --git a/testing/btest/core/leaks/remote.bro b/testing/btest/core/leaks/remote.bro index f888d8f6ee..8c8dc73364 100644 --- a/testing/btest/core/leaks/remote.bro +++ b/testing/btest/core/leaks/remote.bro @@ -4,17 +4,19 @@ # # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # -# @TEST-EXEC: btest-bg-run sender HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m --pseudo-realtime %INPUT ../sender.bro +# @TEST-EXEC: btest-bg-run sender HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -b -m --pseudo-realtime %INPUT ../sender.bro # @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-run receiver HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m --pseudo-realtime %INPUT ../receiver.bro +# @TEST-EXEC: btest-bg-run receiver HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -b -m --pseudo-realtime %INPUT ../receiver.bro # @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-bg-wait 30 # @TEST-EXEC: btest-diff sender/test.log # @TEST-EXEC: btest-diff sender/test.failure.log # @TEST-EXEC: btest-diff sender/test.success.log -# @TEST-EXEC: cmp receiver/test.log sender/test.log -# @TEST-EXEC: cmp receiver/test.failure.log sender/test.failure.log -# @TEST-EXEC: cmp receiver/test.success.log sender/test.success.log +# @TEST-EXEC: ( cd sender && for i in *.log; do cat $i | $SCRIPTS/diff-remove-timestamps >c.$i; done ) +# @TEST-EXEC: ( cd receiver && for i in *.log; do cat $i | $SCRIPTS/diff-remove-timestamps >c.$i; done ) +# @TEST-EXEC: cmp receiver/c.test.log sender/c.test.log +# @TEST-EXEC: cmp receiver/c.test.failure.log sender/c.test.failure.log +# @TEST-EXEC: cmp receiver/c.test.success.log sender/c.test.success.log # This is the common part loaded by both sender and receiver. module Test; @@ -43,10 +45,10 @@ event bro_init() @TEST-START-FILE sender.bro -module Test; - @load frameworks/communication/listen +module Test; + function fail(rec: Log): bool { return rec$status != "success"; @@ -68,14 +70,27 @@ event remote_connection_handshake_done(p: event_peer) Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); disconnect(p); } + +event remote_connection_closed(p: event_peer) + { + terminate(); + } + @TEST-END-FILE @TEST-START-FILE receiver.bro ##### +@load base/frameworks/communication + redef Communication::nodes += { ["foo"] = [$host = 127.0.0.1, $connect=T, $request_logs=T] }; +event remote_connection_closed(p: event_peer) + { + terminate(); + } + @TEST-END-FILE diff --git a/testing/btest/scripts/base/frameworks/logging/remote.bro b/testing/btest/scripts/base/frameworks/logging/remote.bro index 48683148f5..ba577cc92b 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote.bro @@ -1,10 +1,10 @@ # @TEST-SERIALIZE: comm # -# @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro +# @TEST-EXEC: btest-bg-run sender bro -b --pseudo-realtime %INPUT ../sender.bro # @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro +# @TEST-EXEC: btest-bg-run receiver bro -b --pseudo-realtime %INPUT ../receiver.bro # @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-bg-wait 15 # @TEST-EXEC: btest-diff sender/test.log # @TEST-EXEC: btest-diff sender/test.failure.log # @TEST-EXEC: btest-diff sender/test.success.log @@ -41,10 +41,10 @@ event bro_init() @TEST-START-FILE sender.bro -module Test; - @load frameworks/communication/listen +module Test; + function fail(rec: Log): bool { return rec$status != "success"; @@ -66,14 +66,27 @@ event remote_connection_handshake_done(p: event_peer) Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); disconnect(p); } + +event remote_connection_closed(p: event_peer) + { + terminate(); + } + @TEST-END-FILE @TEST-START-FILE receiver.bro ##### +@load base/frameworks/communication + redef Communication::nodes += { ["foo"] = [$host = 127.0.0.1, $connect=T, $request_logs=T] }; +event remote_connection_closed(p: event_peer) + { + terminate(); + } + @TEST-END-FILE diff --git a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro index 09479b7a2f..4aa1afa96f 100644 --- a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro +++ b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro @@ -5,7 +5,7 @@ # @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT # @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff manager-1/metrics.log @TEST-START-FILE cluster-layout.bro @@ -36,3 +36,24 @@ event bro_init() &priority=5 Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); } } + +event remote_connection_closed(p: event_peer) + { + terminate(); + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global n = 0; + +event Metrics::log_metrics(rec: Metrics::Info) + { + n = n + 1; + if ( n == 3 ) + { + terminate_communication(); + terminate(); + } + } + +@endif diff --git a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro index 654e42976a..db2c7e9f5d 100644 --- a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro +++ b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro @@ -5,7 +5,7 @@ # @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT # @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff manager-1/notice.log @TEST-START-FILE cluster-layout.bro @@ -37,6 +37,21 @@ event bro_init() &priority=5 $log=T]); } +event remote_connection_closed(p: event_peer) + { + terminate(); + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +event Notice::log_notice(rec: Notice::Info) + { + terminate_communication(); + terminate(); + } + +@endif + @if ( Cluster::local_node_type() == Cluster::WORKER ) event do_metrics(i: count) From 907c92e1ccd692023ea305fa9e1acba5f4819aa9 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 17 Aug 2012 15:22:51 -0500 Subject: [PATCH 576/651] Fix mime type diff canonifier to also skip mime_desc columns In particular, the ftp.log baseline in the new ipv6 test in bro-testing was failign on various platforms because of this. --- testing/scripts/diff-remove-mime-types | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/testing/scripts/diff-remove-mime-types b/testing/scripts/diff-remove-mime-types index fb447a9989..b8cc3d1e6d 100755 --- a/testing/scripts/diff-remove-mime-types +++ b/testing/scripts/diff-remove-mime-types @@ -3,20 +3,27 @@ # A diff canonifier that removes all MIME types because libmagic output # can differ between installations. -BEGIN { FS="\t"; OFS="\t"; column = -1; } +BEGIN { FS="\t"; OFS="\t"; type_col = -1; desc_col = -1 } /^#fields/ { for ( i = 2; i < NF; ++i ) + { if ( $i == "mime_type" ) - column = i-1; + type_col = i-1; + if ( $i == "mime_desc" ) + desc_col = i-1; + } } -column >= 0 { - if ( $column != "-" ) +function remove_mime (n) { + if ( n >= 0 && $n != "-" ) # Mark that it's set, but ignore content. - $column = "+"; + $n = "+" } +remove_mime(type_col) +remove_mime(desc_col) + { print; } From f201a9f1a7f52329f1c8db35ab46dbfa50f0bda4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 17 Aug 2012 17:27:02 -0500 Subject: [PATCH 577/651] Fix portability of printing to files returned by open("/dev/stderr"). The BroFile ctor now wraps /dev/std{in,out,err} string arguments into the actual FILE* provided by stdio.h because use of the former directly isn't POSIX compliant and led to subtle differences that broke unit tests on certain platforms (e.g. OS X redirection of stderr behavior started differing from Linux). The BroFile (un)serialization methods already did this kind of logic, so adding it in the ctor also should make things more consistent. Some of the reporter-related unit tests looked like they were missing output because of this, and the coverage test for bare-mode errors needed tweaking to branch on whether or not libcurl was available (since the error output differs when elasticsearch isn't there). --- src/File.cc | 14 ++++++++++++-- .../Baseline/core.reporter-error-in-handler/output | 4 ++-- .../Baseline/core.reporter-runtime-error/output | 3 ++- .../btest/Baseline/core.reporter/logger-test.log | 12 ++++++------ testing/btest/Baseline/core.reporter/output | 11 +++++++---- .../unique_errors_no_elasticsearch | 1 + testing/btest/coverage/bare-mode-errors.test | 3 ++- 7 files changed, 32 insertions(+), 16 deletions(-) create mode 100644 testing/btest/Baseline/coverage.bare-mode-errors/unique_errors_no_elasticsearch diff --git a/src/File.cc b/src/File.cc index 20e845c09f..20ab2e1013 100644 --- a/src/File.cc +++ b/src/File.cc @@ -138,11 +138,21 @@ BroFile::BroFile(FILE* arg_f, const char* arg_name, const char* arg_access) BroFile::BroFile(const char* arg_name, const char* arg_access, BroType* arg_t) { Init(); - + f = 0; name = copy_string(arg_name); access = copy_string(arg_access); t = arg_t ? arg_t : base_type(TYPE_STRING); - if ( ! Open() ) + + if ( streq(name, "/dev/stdin") ) + f = stdin; + else if ( streq(name, "/dev/stdout") ) + f = stdout; + else if ( streq(name, "/dev/stderr") ) + f = stderr; + + if ( f ) + is_open = 1; + else if ( ! Open() ) { reporter->Error("cannot open %s: %s", name, strerror(errno)); is_open = 0; diff --git a/testing/btest/Baseline/core.reporter-error-in-handler/output b/testing/btest/Baseline/core.reporter-error-in-handler/output index 190631f4d1..b20b1b2292 100644 --- a/testing/btest/Baseline/core.reporter-error-in-handler/output +++ b/testing/btest/Baseline/core.reporter-error-in-handler/output @@ -1,3 +1,3 @@ -ERROR: no such index (a[1]) (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 28) - +error in /home/jsiwek/bro/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 22: no such index (a[2]) +ERROR: no such index (a[1]) (/home/jsiwek/bro/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 28) 1st error printed on script level diff --git a/testing/btest/Baseline/core.reporter-runtime-error/output b/testing/btest/Baseline/core.reporter-runtime-error/output index 94f7860cb4..5a03f5feb2 100644 --- a/testing/btest/Baseline/core.reporter-runtime-error/output +++ b/testing/btest/Baseline/core.reporter-runtime-error/output @@ -1 +1,2 @@ -ERROR: no such index (a[2]) (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 9) +error in /home/jsiwek/bro/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 12: no such index (a[1]) +ERROR: no such index (a[2]) (/home/jsiwek/bro/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 9) diff --git a/testing/btest/Baseline/core.reporter/logger-test.log b/testing/btest/Baseline/core.reporter/logger-test.log index 6f7ba1d8c7..5afd904b63 100644 --- a/testing/btest/Baseline/core.reporter/logger-test.log +++ b/testing/btest/Baseline/core.reporter/logger-test.log @@ -1,6 +1,6 @@ -reporter_info|init test-info|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 8|0.000000 -reporter_warning|init test-warning|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 9|0.000000 -reporter_error|init test-error|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 10|0.000000 -reporter_info|done test-info|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 15|0.000000 -reporter_warning|done test-warning|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 16|0.000000 -reporter_error|done test-error|/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 17|0.000000 +reporter_info|init test-info|/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 8|0.000000 +reporter_warning|init test-warning|/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 9|0.000000 +reporter_error|init test-error|/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 10|0.000000 +reporter_info|done test-info|/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 15|0.000000 +reporter_warning|done test-warning|/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 16|0.000000 +reporter_error|done test-error|/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 17|0.000000 diff --git a/testing/btest/Baseline/core.reporter/output b/testing/btest/Baseline/core.reporter/output index b4f89bad2f..f2c59259c2 100644 --- a/testing/btest/Baseline/core.reporter/output +++ b/testing/btest/Baseline/core.reporter/output @@ -1,4 +1,7 @@ -WARNING: init test-warning (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 9) -ERROR: init test-error (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 10) -WARNING: done test-warning (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 16) -ERROR: done test-error (/da/home/robin/bro/master/testing/btest/.tmp/core.reporter/reporter.bro, line 17) +/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 52: pre test-info +warning in /home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 53: pre test-warning +error in /home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 54: pre test-error +WARNING: init test-warning (/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 9) +ERROR: init test-error (/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 10) +WARNING: done test-warning (/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 16) +ERROR: done test-error (/home/jsiwek/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 17) diff --git a/testing/btest/Baseline/coverage.bare-mode-errors/unique_errors_no_elasticsearch b/testing/btest/Baseline/coverage.bare-mode-errors/unique_errors_no_elasticsearch new file mode 100644 index 0000000000..e95f88e74b --- /dev/null +++ b/testing/btest/Baseline/coverage.bare-mode-errors/unique_errors_no_elasticsearch @@ -0,0 +1 @@ +error: unknown writer type requested diff --git a/testing/btest/coverage/bare-mode-errors.test b/testing/btest/coverage/bare-mode-errors.test index 21e7d4f4a9..7084d74e83 100644 --- a/testing/btest/coverage/bare-mode-errors.test +++ b/testing/btest/coverage/bare-mode-errors.test @@ -10,4 +10,5 @@ # @TEST-EXEC: test -d $DIST/scripts # @TEST-EXEC: for script in `find $DIST/scripts -name \*\.bro -not -path '*/site/*'`; do echo $script; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0 # @TEST-EXEC: cat allerrors | grep -v "received termination signal" | sort | uniq > unique_errors -# @TEST-EXEC: btest-diff unique_errors +# @TEST-EXEC: if [ $(grep -c CURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then cp unique_errors unique_errors_no_elasticsearch; fi +# @TEST-EXEC: if [ $(grep -c CURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then btest-diff unique_errors_no_elasticsearch; else btest-diff unique_errors; fi From 0dbf2f18fa679a1231f957e474a1bb1bb59e5042 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Mon, 20 Aug 2012 13:26:17 -0400 Subject: [PATCH 578/651] Add the Stream record to Log:active_streams to make more dynamic logging possible. --- scripts/base/frameworks/logging/main.bro | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index ccc65ddf67..bed76a1ae5 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -329,9 +329,9 @@ export { global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool; ## The streams which are currently active and not disabled. - ## This set is not meant to be modified by users! Only use it for + ## This table is not meant to be modified by users! Only use it for ## examining which streams are active. - global active_streams: set[ID] = set(); + global active_streams: table[ID] of Stream = table(); } # We keep a script-level copy of all filters so that we can manipulate them. @@ -417,7 +417,7 @@ function create_stream(id: ID, stream: Stream) : bool if ( ! __create_stream(id, stream) ) return F; - add active_streams[id]; + active_streams[id] = stream; return add_default_filter(id); } From 434d6a84d8bb73cef7704799fcd391375bba5862 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 21 Aug 2012 08:32:42 -0700 Subject: [PATCH 579/651] Linking ES docs into logging document. --- CHANGES | 4 ++++ VERSION | 2 +- doc/logging.rst | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 7b381b5c5d..b6225097db 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.1-beta-28 | 2012-08-21 08:32:42 -0700 + + * Linking ES docs into logging document. (Robin Sommer) + 2.1-beta-27 | 2012-08-20 20:06:20 -0700 * Add the Stream record to Log:active_streams to make more dynamic diff --git a/VERSION b/VERSION index e82f524ce7..c403b714f8 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1-beta-27 +2.1-beta-28 diff --git a/doc/logging.rst b/doc/logging.rst index cc6cb1e54d..7fb4205b9a 100644 --- a/doc/logging.rst +++ b/doc/logging.rst @@ -383,3 +383,4 @@ Bro supports the following output formats other than ASCII: :maxdepth: 1 logging-dataseries + logging-elasticsearch From 06b7379bc3f112faab220d59663844d449add3a8 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 21 Aug 2012 14:54:57 -0500 Subject: [PATCH 580/651] Ignore small mem leak every rotation interval for dataseries logs. Not sure if more can be done to work around it, but reported to dataseries devs here: https://github.com/dataseries/DataSeries/issues/1 The core/leaks/dataseries-rotate.bro unit test fails without this. --- src/logging/writers/DataSeries.cc | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index 7d3053e341..bc5a82ec54 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -243,8 +243,25 @@ bool DataSeries::OpenLog(string path) log_file->writeExtentLibrary(log_types); for( size_t i = 0; i < schema_list.size(); ++i ) - extents.insert(std::make_pair(schema_list[i].field_name, - GeneralField::create(log_series, schema_list[i].field_name))); + { + string fn = schema_list[i].field_name; + GeneralField* gf = 0; +#ifdef USE_PERFTOOLS_DEBUG + { + // GeneralField isn't cleaning up some results of xml parsing, reported + // here: https://github.com/dataseries/DataSeries/issues/1 + // Ignore for now to make leak tests pass. There's confidence that + // we do clean up the GeneralField* since the ExtentSeries dtor for + // member log_series would trigger an assert if dynamically allocated + // fields aren't deleted beforehand. + HeapLeakChecker::Disabler disabler; +#endif + gf = GeneralField::create(log_series, fn); +#ifdef USE_PERFTOOLS_DEBUG + } +#endif + extents.insert(std::make_pair(fn, gf)); + } if ( ds_extent_size < ROW_MIN ) { From bb4b68946f9530b119a0144191e8e72a27896b9d Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 21 Aug 2012 15:22:54 -0500 Subject: [PATCH 581/651] Tweak to rotate-custom.bro unit test. This one would fail intermittently in the cases where log files were opened or closed on a different second of the time of day from each other since the "out" baseline contains only a single "#open" and "#close" tag (indicating all logs opened/closed on same second of time of day). Piping aggregated log output through the timestamp canonifier before `uniq` makes it so "#open" and "#close" tags for different seconds of the time of day are reduced to a single one. --- testing/btest/scripts/base/frameworks/logging/rotate-custom.bro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro index 07fc8cef7c..c0f0ef8643 100644 --- a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro +++ b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro @@ -1,7 +1,7 @@ # # @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT | egrep "test|test2" | sort >out.tmp # @TEST-EXEC: cat out.tmp pp.log | sort >out -# @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | uniq >>out +# @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | $SCRIPTS/diff-remove-timestamps | uniq >>out # @TEST-EXEC: btest-diff out # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff .stderr From cd67603f49b3e287d5244d702b62373a265ede10 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 21 Aug 2012 21:48:49 -0700 Subject: [PATCH 582/651] add testcase for input of set. Sets can be imported by not specifying $val in the add_table call. This actually was already implemented, I just completely forgot about it. --- .../scripts.base.frameworks.input.set/out | 7 +++ .../scripts/base/frameworks/input/set.bro | 46 +++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.set/out create mode 100644 testing/btest/scripts/base/frameworks/input/set.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.set/out b/testing/btest/Baseline/scripts.base.frameworks.input.set/out new file mode 100644 index 0000000000..998244cf3f --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.set/out @@ -0,0 +1,7 @@ +{ +192.168.17.7, +192.168.17.42, +192.168.17.14, +192.168.17.1, +192.168.17.2 +} diff --git a/testing/btest/scripts/base/frameworks/input/set.bro b/testing/btest/scripts/base/frameworks/input/set.bro new file mode 100644 index 0000000000..5215523ee3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/set.bro @@ -0,0 +1,46 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#fields ip +#types addr +192.168.17.1 +192.168.17.2 +192.168.17.7 +192.168.17.14 +192.168.17.42 +@TEST-END-FILE + +@load frameworks/communication/listen + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + ip: addr; +}; + +global servers: set[addr] = set(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $destination=servers]); + Input::remove("ssh"); + } + +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } From ec224ada0679d8dcc1c7925969ba44b459145957 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 21 Aug 2012 22:17:28 -0700 Subject: [PATCH 583/651] single-line documentation addition to main input framework script. --- scripts/base/frameworks/input/main.bro | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 55da6ae7ec..758bc94732 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -8,6 +8,7 @@ export { ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; + ## The default reader mode used. Defaults to `MANUAL`. const default_mode = MANUAL &redef; ## TableFilter description type used for the `table` method. From b53be217502d6bf143e61e2f5d09bd7cdd23c525 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 21 Aug 2012 23:00:04 -0700 Subject: [PATCH 584/651] add an option to the input framework that allows the user to chose to not die upon encountering files/functions. I am not entirely sure if I like the approach I took for this, it is a bit... hacky. --- scripts/base/frameworks/input/main.bro | 7 ++ src/input.bif | 4 ++ src/input/Manager.cc | 40 ++++++++++-- src/input/Manager.h | 2 +- .../out | 14 ++++ .../frameworks/input/unsupported_types.bro | 64 +++++++++++++++++++ 6 files changed, 125 insertions(+), 6 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.unsupported_types/out create mode 100644 testing/btest/scripts/base/frameworks/input/unsupported_types.bro diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 55da6ae7ec..e8aa67b23b 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -10,6 +10,13 @@ export { const default_mode = MANUAL &redef; + ## Flag that controls if the input framework accepts records + ## that contain types that are not supported (at the moment + ## file and function). If true, the input framework will + ## warn in these cases, but continue. If false, it will + ## abort. Defaults to false (abort) + const accept_unsupported_types = F &redef; + ## TableFilter description type used for the `table` method. type TableDescription: record { ## Common definitions for tables and events diff --git a/src/input.bif b/src/input.bif index f494ef3b2f..199b665fa6 100644 --- a/src/input.bif +++ b/src/input.bif @@ -34,6 +34,10 @@ function Input::__force_update%(id: string%) : bool return new Val(res, TYPE_BOOL); %} +# Options for the input framework + +const accept_unsupported_types: bool; + # Options for Ascii Reader module InputAscii; diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 3c29f14928..4422a9814f 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -388,6 +388,8 @@ bool Manager::CreateEventStream(RecordVal* fval) FuncType* etype = event->FType()->AsFuncType(); + bool allow_file_func = false; + if ( ! etype->IsEvent() ) { reporter->Error("stream event is a function, not an event"); @@ -453,6 +455,8 @@ bool Manager::CreateEventStream(RecordVal* fval) return false; } + allow_file_func = BifConst::Input::accept_unsupported_types; + } else @@ -461,7 +465,7 @@ bool Manager::CreateEventStream(RecordVal* fval) vector fieldsV; // vector, because UnrollRecordType needs it - bool status = !UnrollRecordType(&fieldsV, fields, ""); + bool status = !UnrollRecordType(&fieldsV, fields, "", allow_file_func); if ( status ) { @@ -609,12 +613,12 @@ bool Manager::CreateTableStream(RecordVal* fval) vector fieldsV; // vector, because we don't know the length beforehands - bool status = !UnrollRecordType(&fieldsV, idx, ""); + bool status = !UnrollRecordType(&fieldsV, idx, "", false); int idxfields = fieldsV.size(); if ( val ) // if we are not a set - status = status || !UnrollRecordType(&fieldsV, val, ""); + status = status || !UnrollRecordType(&fieldsV, val, "", BifConst::Input::accept_unsupported_types); int valfields = fieldsV.size() - idxfields; @@ -773,7 +777,7 @@ bool Manager::RemoveStreamContinuation(ReaderFrontend* reader) } bool Manager::UnrollRecordType(vector *fields, - const RecordType *rec, const string& nameprepend) + const RecordType *rec, const string& nameprepend, bool allow_file_func) { for ( int i = 0; i < rec->NumFields(); i++ ) @@ -781,6 +785,23 @@ bool Manager::UnrollRecordType(vector *fields, if ( ! IsCompatibleType(rec->FieldType(i)) ) { + + // if the field is a file or a function type + // and it is optional, we accept it nevertheless. + // This allows importing logfiles containing this + // stuff that we actually cannot read :) + if ( allow_file_func ) + { + if ( ( rec->FieldType(i)->Tag() == TYPE_FILE || + rec->FieldType(i)->Tag() == TYPE_FUNC ) && + rec->FieldDecl(i)->FindAttr(ATTR_OPTIONAL) + ) + { + reporter->Info("Encountered incompatible type \"%s\" in table definition for ReaderFrontend. Ignoring field.", type_name(rec->FieldType(i)->Tag())); + continue; + } + } + reporter->Error("Incompatible type \"%s\" in table definition for ReaderFrontend", type_name(rec->FieldType(i)->Tag())); return false; } @@ -789,7 +810,7 @@ bool Manager::UnrollRecordType(vector *fields, { string prep = nameprepend + rec->FieldName(i) + "."; - if ( !UnrollRecordType(fields, rec->FieldType(i)->AsRecordType(), prep) ) + if ( !UnrollRecordType(fields, rec->FieldType(i)->AsRecordType(), prep, allow_file_func) ) { return false; } @@ -1675,6 +1696,15 @@ RecordVal* Manager::ValueToRecordVal(const Value* const *vals, Val* fieldVal = 0; if ( request_type->FieldType(i)->Tag() == TYPE_RECORD ) fieldVal = ValueToRecordVal(vals, request_type->FieldType(i)->AsRecordType(), position); + else if ( request_type->FieldType(i)->Tag() == TYPE_FILE || + request_type->FieldType(i)->Tag() == TYPE_FUNC ) + { + // If those two unsupported types are encountered here, they have + // been let through by the type checking. + // That means that they are optional & the user agreed to ignore + // them and has been warned by reporter. + // Hence -> assign null to the field, done. + } else { fieldVal = ValueToVal(vals[*position], request_type->FieldType(i)); diff --git a/src/input/Manager.h b/src/input/Manager.h index 1590042183..cc81df38b7 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -158,7 +158,7 @@ private: // Check if a record is made up of compatible types and return a list // of all fields that are in the record in order. Recursively unrolls // records - bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend); + bool UnrollRecordType(vector *fields, const RecordType *rec, const string& nameprepend, bool allow_file_func); // Send events void SendEvent(EventHandlerPtr ev, const int numvals, ...); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.unsupported_types/out b/testing/btest/Baseline/scripts.base.frameworks.input.unsupported_types/out new file mode 100644 index 0000000000..7ef82cf368 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.unsupported_types/out @@ -0,0 +1,14 @@ +{ +[-42] = [fi=, b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +} diff --git a/testing/btest/scripts/base/frameworks/input/unsupported_types.bro b/testing/btest/scripts/base/frameworks/input/unsupported_types.bro new file mode 100644 index 0000000000..7affa4065d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/unsupported_types.bro @@ -0,0 +1,64 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields fi b i e c p sn a d t iv s sc ss se vc ve f +#types file bool int enum count port subnet addr double time interval string table table table vector vector func +whatever T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +@load frameworks/communication/listen + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; +redef Input::accept_unsupported_types = T; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + fi: file &optional; + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); + } + +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } From b6bd849018aa41c910a9675bf56d08a7e11b4e29 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 22 Aug 2012 12:12:16 -0400 Subject: [PATCH 585/651] Fixed ack tracking which could overflow quickly in some situations. - Problem presented itself through incorrect results in capture-loss.bro under odd traffic circumstances (exact circumstances unknown). - Changed variables involved in ack tracking to all be uint64 values. --- src/Stats.cc | 8 ++++---- src/Stats.h | 8 ++++---- src/TCP_Reassembler.cc | 16 ++++++++-------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/Stats.cc b/src/Stats.cc index c3035231e9..8d48c47a25 100644 --- a/src/Stats.cc +++ b/src/Stats.cc @@ -12,10 +12,10 @@ int killed_by_inactivity = 0; -uint32 tot_ack_events = 0; -uint32 tot_ack_bytes = 0; -uint32 tot_gap_events = 0; -uint32 tot_gap_bytes = 0; +uint64 tot_ack_events = 0; +uint64 tot_ack_bytes = 0; +uint64 tot_gap_events = 0; +uint64 tot_gap_bytes = 0; class ProfileTimer : public Timer { diff --git a/src/Stats.h b/src/Stats.h index eeebfe2213..a11d66828a 100644 --- a/src/Stats.h +++ b/src/Stats.h @@ -116,10 +116,10 @@ extern SampleLogger* sample_logger; extern int killed_by_inactivity; // Content gap statistics. -extern uint32 tot_ack_events; -extern uint32 tot_ack_bytes; -extern uint32 tot_gap_events; -extern uint32 tot_gap_bytes; +extern uint64 tot_ack_events; +extern uint64 tot_ack_bytes; +extern uint64 tot_gap_events; +extern uint64 tot_gap_bytes; // A TCPStateStats object tracks the distribution of TCP states for diff --git a/src/TCP_Reassembler.cc b/src/TCP_Reassembler.cc index fb67dba7ee..eb2709373c 100644 --- a/src/TCP_Reassembler.cc +++ b/src/TCP_Reassembler.cc @@ -20,10 +20,10 @@ const bool DEBUG_tcp_connection_close = false; const bool DEBUG_tcp_match_undelivered = false; static double last_gap_report = 0.0; -static uint32 last_ack_events = 0; -static uint32 last_ack_bytes = 0; -static uint32 last_gap_events = 0; -static uint32 last_gap_bytes = 0; +static uint64 last_ack_events = 0; +static uint64 last_ack_bytes = 0; +static uint64 last_gap_events = 0; +static uint64 last_gap_bytes = 0; TCP_Reassembler::TCP_Reassembler(Analyzer* arg_dst_analyzer, TCP_Analyzer* arg_tcp_analyzer, @@ -513,10 +513,10 @@ void TCP_Reassembler::AckReceived(int seq) if ( gap_report && gap_report_freq > 0.0 && dt >= gap_report_freq ) { - int devents = tot_ack_events - last_ack_events; - int dbytes = tot_ack_bytes - last_ack_bytes; - int dgaps = tot_gap_events - last_gap_events; - int dgap_bytes = tot_gap_bytes - last_gap_bytes; + uint64 devents = tot_ack_events - last_ack_events; + uint64 dbytes = tot_ack_bytes - last_ack_bytes; + uint64 dgaps = tot_gap_events - last_gap_events; + uint64 dgap_bytes = tot_gap_bytes - last_gap_bytes; RecordVal* r = new RecordVal(gap_info); r->Assign(0, new Val(devents, TYPE_COUNT)); From e66e9e5d321716ecee47d9ab08155b9fe2ee034a Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 22 Aug 2012 11:12:27 -0500 Subject: [PATCH 586/651] Minor tweak to coverage.bare-mode-errors unit test. Adding trailing slash to $DIST/scripts makes the `find` work with a symlinked 'scripts' dir. --- testing/btest/coverage/bare-mode-errors.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/btest/coverage/bare-mode-errors.test b/testing/btest/coverage/bare-mode-errors.test index 7084d74e83..635726841b 100644 --- a/testing/btest/coverage/bare-mode-errors.test +++ b/testing/btest/coverage/bare-mode-errors.test @@ -8,7 +8,7 @@ # @TEST-SERIALIZE: comm # # @TEST-EXEC: test -d $DIST/scripts -# @TEST-EXEC: for script in `find $DIST/scripts -name \*\.bro -not -path '*/site/*'`; do echo $script; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0 +# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro -not -path '*/site/*'`; do echo $script; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0 # @TEST-EXEC: cat allerrors | grep -v "received termination signal" | sort | uniq > unique_errors # @TEST-EXEC: if [ $(grep -c CURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then cp unique_errors unique_errors_no_elasticsearch; fi # @TEST-EXEC: if [ $(grep -c CURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then btest-diff unique_errors_no_elasticsearch; else btest-diff unique_errors; fi From 201c4aa43aec4371f67851294036556154664808 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 22 Aug 2012 13:25:22 -0700 Subject: [PATCH 587/651] to be sure - add a small assertion --- src/input/Manager.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 4422a9814f..c3176d9c33 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1704,6 +1704,9 @@ RecordVal* Manager::ValueToRecordVal(const Value* const *vals, // That means that they are optional & the user agreed to ignore // them and has been warned by reporter. // Hence -> assign null to the field, done. + + // better check that it really is optional. you never know. + assert(request_type->FieldDecl(i)->FindAttr(ATTR_OPTIONAL)); } else { From 655a73bc13ff6d9cee18e98f90ad42a90b6a5b29 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 22 Aug 2012 16:46:47 -0500 Subject: [PATCH 588/651] Change to metrics/basic-cluster unit test for reliability. If the metrics break interval happened to occur between first and second worker starting up and getting connected to the cluster, the test would fail because the second worker didn't get a chance to connect and send data. The test now waits for the cluster setup to complete before workers send metrics data. --- testing/btest/core/leaks/basic-cluster.bro | 43 +++++++++++++------ .../base/frameworks/metrics/basic-cluster.bro | 39 ++++++++++++----- 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro index d9d2f97b1e..7fb176b8db 100644 --- a/testing/btest/core/leaks/basic-cluster.bro +++ b/testing/btest/core/leaks/basic-cluster.bro @@ -1,21 +1,21 @@ # Needs perftools support. # # @TEST-GROUP: leaks - +# # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks - +# # @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT # @TEST-EXEC: btest-bg-run proxy-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro -m %INPUT # @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT # @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT -# @TEST-EXEC: btest-bg-wait 40 +# @TEST-EXEC: btest-bg-wait 60 # @TEST-EXEC: btest-diff manager-1/metrics.log @TEST-START-FILE cluster-layout.bro redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1")], + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1", "worker-2")], ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"], ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"], }; @@ -32,13 +32,6 @@ event bro_init() &priority=5 Metrics::add_filter(TEST_METRIC, [$name="foo-bar", $break_interval=3secs]); - - if ( Cluster::local_node_type() == Cluster::WORKER ) - { - Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); - Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); - Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); - } } event remote_connection_closed(p: event_peer) @@ -46,9 +39,25 @@ event remote_connection_closed(p: event_peer) terminate(); } +global ready_for_data: event(); + +redef Cluster::manager2worker_events += /ready_for_data/; + +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event ready_for_data() + { + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); + Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); + Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + } + +@endif + @if ( Cluster::local_node_type() == Cluster::MANAGER ) global n = 0; +global peer_count = 0; event Metrics::log_metrics(rec: Metrics::Info) { @@ -60,4 +69,14 @@ event Metrics::log_metrics(rec: Metrics::Info) } } +event remote_connection_handshake_done(p: event_peer) + { + print p; + peer_count = peer_count + 1; + if ( peer_count == 3 ) + { + event ready_for_data(); + } + } + @endif diff --git a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro index 4aa1afa96f..89ae5bf79f 100644 --- a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro +++ b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro @@ -5,13 +5,13 @@ # @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT # @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-bg-wait 30 # @TEST-EXEC: btest-diff manager-1/metrics.log @TEST-START-FILE cluster-layout.bro redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1")], + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1", "worker-2")], ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"], ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"], }; @@ -28,13 +28,6 @@ event bro_init() &priority=5 Metrics::add_filter(TEST_METRIC, [$name="foo-bar", $break_interval=3secs]); - - if ( Cluster::local_node_type() == Cluster::WORKER ) - { - Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); - Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); - Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); - } } event remote_connection_closed(p: event_peer) @@ -42,9 +35,25 @@ event remote_connection_closed(p: event_peer) terminate(); } +global ready_for_data: event(); + +redef Cluster::manager2worker_events += /ready_for_data/; + +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event ready_for_data() + { + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); + Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); + Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + } + +@endif + @if ( Cluster::local_node_type() == Cluster::MANAGER ) global n = 0; +global peer_count = 0; event Metrics::log_metrics(rec: Metrics::Info) { @@ -56,4 +65,14 @@ event Metrics::log_metrics(rec: Metrics::Info) } } +event remote_connection_handshake_done(p: event_peer) + { + print p; + peer_count = peer_count + 1; + if ( peer_count == 3 ) + { + event ready_for_data(); + } + } + @endif From 93744c8d9b22888269f466f116559f90f96638d4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 22 Aug 2012 16:54:00 -0500 Subject: [PATCH 589/651] Add test serialization to "leak" unit tests that use communication. --- testing/btest/core/leaks/basic-cluster.bro | 1 + testing/btest/core/leaks/remote.bro | 1 + 2 files changed, 2 insertions(+) diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro index 7fb176b8db..319368bc6e 100644 --- a/testing/btest/core/leaks/basic-cluster.bro +++ b/testing/btest/core/leaks/basic-cluster.bro @@ -1,5 +1,6 @@ # Needs perftools support. # +# @TEST-SERIALIZE: comm # @TEST-GROUP: leaks # # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks diff --git a/testing/btest/core/leaks/remote.bro b/testing/btest/core/leaks/remote.bro index 8c8dc73364..41bbaec076 100644 --- a/testing/btest/core/leaks/remote.bro +++ b/testing/btest/core/leaks/remote.bro @@ -1,5 +1,6 @@ # Needs perftools support. # +# @TEST-SERIALIZE: comm # @TEST-GROUP: leaks # # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks From 95d7055373763787628936431e82ea6562f4d7ba Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 22 Aug 2012 16:17:27 -0700 Subject: [PATCH 590/651] Updating submodule(s). [nomail] --- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aux/binpac b/aux/binpac index 22120825f8..a93ef13735 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 22120825f8ad70e051ef4ca42f2199aa195dff40 +Subproject commit a93ef1373512c661ffcd0d0a61bd19b96667e0d5 diff --git a/aux/bro-aux b/aux/bro-aux index 941ee753f7..4bc1a6f6a8 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 941ee753f7c71ec08fc29de04f09a8a83aebb69d +Subproject commit 4bc1a6f6a8816dfacd8288fcf182ba35520e589b diff --git a/aux/broccoli b/aux/broccoli index 5ff3e6a8e8..ebfa4de45a 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 5ff3e6a8e8535ed91e1f70d355b815ae8eeacb71 +Subproject commit ebfa4de45a839e58aec200e7e4bad33eaab4f1ed diff --git a/aux/broctl b/aux/broctl index 6d0eb6083a..5b3f9e5906 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 6d0eb6083acdc77e0a912bec0fb23df79b98da63 +Subproject commit 5b3f9e5906c90b76c5aa1626e112d4c991cb3fd8 From 25ef0a89e752aec2b1506363ffdeea738d1e3f1b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 22 Aug 2012 18:15:55 -0700 Subject: [PATCH 591/651] Updating NEWS. --- NEWS | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 949b51d832..d7018575d3 100644 --- a/NEWS +++ b/NEWS @@ -7,8 +7,8 @@ release. For a complete list of changes, see the ``CHANGES`` file (note that submodules, such as BroControl and Broccoli, come with their own CHANGES.) -Bro 2.1 Beta ------------- +Bro 2.1 +------- New Functionality ~~~~~~~~~~~~~~~~~ @@ -161,6 +161,7 @@ the full set. - The ASCII writers "header_*" options have been renamed to "meta_*" (because there's now also a footer). + Bro 2.0 ------- From bef0ce1c98bc2dfc0e2dddef821878b7eb91f4b7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 23 Aug 2012 11:52:39 -0500 Subject: [PATCH 592/651] Add type checking for signature 'eval' condition functions. Otherwise functions could be called with a mismatching argument list and cause a crash at run-time. The incorrect function type is now reported at parse-time. --- src/RuleCondition.cc | 17 ++++++++++++++ .../signatures.bad-eval-condition/.stderr | 2 ++ .../signatures.eval-condition/conn.log | 14 ++++++++++++ .../output | 0 testing/btest/btest.cfg | 2 +- .../btest/signatures/bad-eval-condition.bro | 22 +++++++++++++++++++ testing/btest/signatures/eval-condition.bro | 20 +++++++++++++++++ .../btest/{core => signatures}/load-sigs.bro | 0 8 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/signatures.bad-eval-condition/.stderr create mode 100644 testing/btest/Baseline/signatures.eval-condition/conn.log rename testing/btest/Baseline/{core.load-sigs => signatures.load-sigs}/output (100%) create mode 100644 testing/btest/signatures/bad-eval-condition.bro create mode 100644 testing/btest/signatures/eval-condition.bro rename testing/btest/{core => signatures}/load-sigs.bro (100%) diff --git a/src/RuleCondition.cc b/src/RuleCondition.cc index 8852747cc4..3e64f9ffca 100644 --- a/src/RuleCondition.cc +++ b/src/RuleCondition.cc @@ -126,6 +126,23 @@ RuleConditionEval::RuleConditionEval(const char* func) rules_error("unknown identifier", func); return; } + + if ( id->Type()->Tag() == TYPE_FUNC ) + { + // validate argument quantity and type + FuncType* f = id->Type()->AsFuncType(); + + if ( f->YieldType()->Tag() != TYPE_BOOL ) + rules_error("eval function type must yield a 'bool'", func); + + TypeList tl; + tl.Append(internal_type("signature_state")->Ref()); + tl.Append(base_type(TYPE_STRING)); + + if ( ! f->CheckArgs(tl.Types()) ) + rules_error("eval function parameters must be a 'signature_state' " + "and a 'string' type", func); + } } bool RuleConditionEval::DoMatch(Rule* rule, RuleEndpointState* state, diff --git a/testing/btest/Baseline/signatures.bad-eval-condition/.stderr b/testing/btest/Baseline/signatures.bad-eval-condition/.stderr new file mode 100644 index 0000000000..c4de35ffe9 --- /dev/null +++ b/testing/btest/Baseline/signatures.bad-eval-condition/.stderr @@ -0,0 +1,2 @@ +error: Error in signature (./blah.sig:6): eval function parameters must be a 'signature_state' and a 'string' type (mark_conn) + diff --git a/testing/btest/Baseline/signatures.eval-condition/conn.log b/testing/btest/Baseline/signatures.eval-condition/conn.log new file mode 100644 index 0000000000..a803f74320 --- /dev/null +++ b/testing/btest/Baseline/signatures.eval-condition/conn.log @@ -0,0 +1,14 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2012-08-23-16-41-23 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1329843175.736107 arKYeMETxOg 141.142.220.235 37604 199.233.217.249 56666 tcp ftp-data 0.112432 0 342 SF - 0 ShAdfFa 4 216 4 562 (empty) +1329843179.871641 k6kgXLOoSKl 141.142.220.235 59378 199.233.217.249 56667 tcp ftp-data 0.111218 0 77 SF - 0 ShAdfFa 4 216 4 297 (empty) +1329843194.151526 nQcgTWjvg4c 199.233.217.249 61920 141.142.220.235 33582 tcp ftp-data 0.056211 342 0 SF - 0 ShADaFf 5 614 3 164 (empty) +1329843197.783443 j4u32Pc5bif 199.233.217.249 61918 141.142.220.235 37835 tcp ftp-data 0.056005 77 0 SF - 0 ShADaFf 5 349 3 164 (empty) +1329843161.968492 UWkUyAuUGXf 141.142.220.235 50003 199.233.217.249 21 tcp ftp,blah 38.055625 180 3146 SF - 0 ShAdDfFa 38 2164 25 4458 (empty) +#close 2012-08-23-16-41-23 diff --git a/testing/btest/Baseline/core.load-sigs/output b/testing/btest/Baseline/signatures.load-sigs/output similarity index 100% rename from testing/btest/Baseline/core.load-sigs/output rename to testing/btest/Baseline/signatures.load-sigs/output diff --git a/testing/btest/btest.cfg b/testing/btest/btest.cfg index 4c4074ee24..d86b45d8a9 100644 --- a/testing/btest/btest.cfg +++ b/testing/btest/btest.cfg @@ -1,5 +1,5 @@ [btest] -TestDirs = doc bifs language core scripts istate coverage +TestDirs = doc bifs language core scripts istate coverage signatures TmpDir = %(testbase)s/.tmp BaselineDir = %(testbase)s/Baseline IgnoreDirs = .svn CVS .tmp diff --git a/testing/btest/signatures/bad-eval-condition.bro b/testing/btest/signatures/bad-eval-condition.bro new file mode 100644 index 0000000000..34997b1124 --- /dev/null +++ b/testing/btest/signatures/bad-eval-condition.bro @@ -0,0 +1,22 @@ +# @TEST-EXEC-FAIL: bro -r $TRACES/ftp-ipv4.trace %INPUT +# @TEST-EXEC: btest-diff .stderr + +@load-sigs blah.sig + +@TEST-START-FILE blah.sig +signature blah + { + ip-proto == tcp + src-port == 21 + payload /.*/ + eval mark_conn + } +@TEST-END-FILE + +# wrong function signature for use with signature 'eval' conditions +# needs to be reported +function mark_conn(state: signature_state): bool + { + add state$conn$service["blah"]; + return T; + } diff --git a/testing/btest/signatures/eval-condition.bro b/testing/btest/signatures/eval-condition.bro new file mode 100644 index 0000000000..f3f1171da6 --- /dev/null +++ b/testing/btest/signatures/eval-condition.bro @@ -0,0 +1,20 @@ +# @TEST-EXEC: bro -r $TRACES/ftp-ipv4.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load-sigs blah.sig + +@TEST-START-FILE blah.sig +signature blah + { + ip-proto == tcp + src-port == 21 + payload /.*/ + eval mark_conn + } +@TEST-END-FILE + +function mark_conn(state: signature_state, data: string): bool + { + add state$conn$service["blah"]; + return T; + } diff --git a/testing/btest/core/load-sigs.bro b/testing/btest/signatures/load-sigs.bro similarity index 100% rename from testing/btest/core/load-sigs.bro rename to testing/btest/signatures/load-sigs.bro From ff60b0bb4bf9a1d6da38bd273b0ec34eb2f37f60 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 23 Aug 2012 11:59:51 -0500 Subject: [PATCH 593/651] Remove orphaned unit tests. Looks like they're maybe from 1.5 and not applicable/updateable. --- testing/btest/Baseline/analyzers.conn-size-cc/conn.log | 5 ----- testing/btest/Baseline/analyzers.conn-size/conn.log | 5 ----- testing/btest/analyzers/conn-size-cc.bro | 2 -- testing/btest/analyzers/conn-size.bro | 2 -- 4 files changed, 14 deletions(-) delete mode 100644 testing/btest/Baseline/analyzers.conn-size-cc/conn.log delete mode 100644 testing/btest/Baseline/analyzers.conn-size/conn.log delete mode 100644 testing/btest/analyzers/conn-size-cc.bro delete mode 100644 testing/btest/analyzers/conn-size.bro diff --git a/testing/btest/Baseline/analyzers.conn-size-cc/conn.log b/testing/btest/Baseline/analyzers.conn-size-cc/conn.log deleted file mode 100644 index 2f703cbcd6..0000000000 --- a/testing/btest/Baseline/analyzers.conn-size-cc/conn.log +++ /dev/null @@ -1,5 +0,0 @@ -1128727430.350788 ? 141.42.64.125 125.190.109.199 other 56729 12345 tcp ? ? S0 X 1 60 0 0 cc=1 -1144876538.705610 5.921003 169.229.147.203 239.255.255.253 other 49370 427 udp 147 ? S0 X 3 231 0 0 -1144876599.397603 0.815763 192.150.186.169 194.64.249.244 http 53063 80 tcp 377 445 SF X 6 677 5 713 -1144876709.032670 9.000191 169.229.147.43 239.255.255.253 other 49370 427 udp 196 ? S0 X 4 308 0 0 -1144876697.068273 0.000650 192.150.186.169 192.150.186.15 icmp-unreach 3 3 icmp 56 ? OTH X 2 112 0 0 diff --git a/testing/btest/Baseline/analyzers.conn-size/conn.log b/testing/btest/Baseline/analyzers.conn-size/conn.log deleted file mode 100644 index 8129bc37f8..0000000000 --- a/testing/btest/Baseline/analyzers.conn-size/conn.log +++ /dev/null @@ -1,5 +0,0 @@ -1128727430.350788 ? 141.42.64.125 125.190.109.199 other 56729 12345 tcp ? ? S0 X 1 60 0 0 -1144876538.705610 5.921003 169.229.147.203 239.255.255.253 other 49370 427 udp 147 ? S0 X 3 231 0 0 -1144876599.397603 0.815763 192.150.186.169 194.64.249.244 http 53063 80 tcp 377 445 SF X 6 697 5 713 -1144876709.032670 9.000191 169.229.147.43 239.255.255.253 other 49370 427 udp 196 ? S0 X 4 308 0 0 -1144876697.068273 0.000650 192.150.186.169 192.150.186.15 icmp-unreach 3 3 icmp 56 ? OTH X 2 112 0 0 diff --git a/testing/btest/analyzers/conn-size-cc.bro b/testing/btest/analyzers/conn-size-cc.bro deleted file mode 100644 index 0ba7977cf5..0000000000 --- a/testing/btest/analyzers/conn-size-cc.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r ${TRACES}/conn-size.trace tcp udp icmp report_conn_size_analyzer=T -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/analyzers/conn-size.bro b/testing/btest/analyzers/conn-size.bro deleted file mode 100644 index 0ba7977cf5..0000000000 --- a/testing/btest/analyzers/conn-size.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r ${TRACES}/conn-size.trace tcp udp icmp report_conn_size_analyzer=T -# @TEST-EXEC: btest-diff conn.log From 558ca2867c873073d30522073049a35f5cc52111 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 23 Aug 2012 12:29:42 -0500 Subject: [PATCH 594/651] Doc fixes for signature 'eval' conditions. --- doc/signatures.rst | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/doc/signatures.rst b/doc/signatures.rst index f65215eceb..36099ba40f 100644 --- a/doc/signatures.rst +++ b/doc/signatures.rst @@ -229,20 +229,10 @@ matched. The following context conditions are defined: confirming the match. If false is returned, no signature match is going to be triggered. The function has to be of type ``function cond(state: signature_state, data: string): bool``. Here, - ``content`` may contain the most recent content chunk available at + ``data`` may contain the most recent content chunk available at the time the signature was matched. If no such chunk is available, - ``content`` will be the empty string. ``signature_state`` is - defined as follows: - - .. code:: bro - - type signature_state: record { - id: string; # ID of the signature - conn: connection; # Current connection - is_orig: bool; # True if current endpoint is originator - payload_size: count; # Payload size of the first packet - }; - + ``data`` will be the empty string. See :bro:type:`signature_state` + for its definition. ``payload-size `` Compares the integer to the size of the payload of a packet. For From 5f40e153a87b37e7621809a38545504b696202a0 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 23 Aug 2012 13:55:04 -0400 Subject: [PATCH 595/651] Adding an identifier to the SMTP blocklist notices for duplicate suppression. - Slight addition and revision to inline docs. --- scripts/policy/protocols/smtp/blocklists.bro | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/scripts/policy/protocols/smtp/blocklists.bro b/scripts/policy/protocols/smtp/blocklists.bro index a3e75318bb..b1fb0e498d 100644 --- a/scripts/policy/protocols/smtp/blocklists.bro +++ b/scripts/policy/protocols/smtp/blocklists.bro @@ -1,3 +1,4 @@ +##! Watch for various SPAM blocklist URLs in SMTP error messages. @load base/protocols/smtp @@ -5,9 +6,11 @@ module SMTP; export { redef enum Notice::Type += { - ## Indicates that the server sent a reply mentioning an SMTP block list. + ## An SMTP server sent a reply mentioning an SMTP block list. Blocklist_Error_Message, - ## Indicates the client's address is seen in the block list error message. + ## The originator's address is seen in the block list error message. + ## This is useful to detect local hosts sending SPAM with a high + ## positive rate. Blocklist_Blocked_Host, }; @@ -52,7 +55,8 @@ event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, message = fmt("%s is on an SMTP block list", c$id$orig_h); } - NOTICE([$note=note, $conn=c, $msg=message, $sub=msg]); + NOTICE([$note=note, $conn=c, $msg=message, $sub=msg, + $identifier=cat(c$id$orig_h)]); } } } From c1c9c9e34af571c5f204b4608b849823922c228f Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 23 Aug 2012 13:04:18 -0500 Subject: [PATCH 596/651] Update documentation for builtin types Add missing description of interval "msec" unit. Improved description of pattern by clarifying the issue of operand order and difference between exact and embedded matching. --- doc/scripts/builtins.rst | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/doc/scripts/builtins.rst b/doc/scripts/builtins.rst index 32908f71fd..0501067409 100644 --- a/doc/scripts/builtins.rst +++ b/doc/scripts/builtins.rst @@ -55,8 +55,8 @@ The Bro scripting language supports the following built-in types. A temporal type representing a relative time. An ``interval`` constant can be written as a numeric constant followed by a time - unit where the time unit is one of ``usec``, ``sec``, ``min``, - ``hr``, or ``day`` which respectively represent microseconds, + unit where the time unit is one of ``usec``, ``msec``, ``sec``, ``min``, + ``hr``, or ``day`` which respectively represent microseconds, milliseconds, seconds, minutes, hours, and days. Whitespace between the numeric constant and time unit is optional. Appending the letter "s" to the time unit in order to pluralize it is also optional (to no semantic @@ -95,14 +95,14 @@ The Bro scripting language supports the following built-in types. and embedded. In exact matching the ``==`` equality relational operator is used - with one :bro:type:`string` operand and one :bro:type:`pattern` - operand to check whether the full string exactly matches the - pattern. In this case, the ``^`` beginning-of-line and ``$`` - end-of-line anchors are redundant since pattern is implicitly - anchored to the beginning and end of the line to facilitate an exact - match. For example:: + with one :bro:type:`pattern` operand and one :bro:type:`string` + operand (order of operands does not matter) to check whether the full + string exactly matches the pattern. In exact matching, the ``^`` + beginning-of-line and ``$`` end-of-line anchors are redundant since + the pattern is implicitly anchored to the beginning and end of the + line to facilitate an exact match. For example:: - "foo" == /foo|bar/ + /foo|bar/ == "foo" yields true, while:: @@ -110,9 +110,9 @@ The Bro scripting language supports the following built-in types. yields false. The ``!=`` operator would yield the negation of ``==``. - In embedded matching the ``in`` operator is again used with one - :bro:type:`string` operand and one :bro:type:`pattern` operand - (which must be on the left-hand side), but tests whether the pattern + In embedded matching the ``in`` operator is used with one + :bro:type:`pattern` operand (which must be on the left-hand side) and + one :bro:type:`string` operand, but tests whether the pattern appears anywhere within the given string. For example:: /foo|bar/ in "foobar" From 90281a2423230671c8a022cac4dcd509aeb233cd Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 24 Aug 2012 11:32:49 -0500 Subject: [PATCH 597/651] Add tests of the Bro scripting language Added tests of all built-in Bro data types (including different representations of constant values, and max./min. values), keywords, and operators (including special properties of certain operators, such as short-circuit evaluation and associativity). --- testing/btest/Baseline/language.addr/out | 13 ++ testing/btest/Baseline/language.bool/out | 7 + testing/btest/Baseline/language.count/out | 16 +++ testing/btest/Baseline/language.double/out | 25 ++++ testing/btest/Baseline/language.enum/out | 4 + testing/btest/Baseline/language.event/out | 4 + testing/btest/Baseline/language.file/out1 | 2 + testing/btest/Baseline/language.file/out2 | 1 + testing/btest/Baseline/language.for/out | 3 + testing/btest/Baseline/language.function/out | 11 ++ testing/btest/Baseline/language.if/out | 12 ++ testing/btest/Baseline/language.int/out | 21 +++ testing/btest/Baseline/language.interval/out | 23 +++ .../Baseline/language.null-statement/out | 1 + testing/btest/Baseline/language.pattern/out | 6 + testing/btest/Baseline/language.port/out | 8 ++ testing/btest/Baseline/language.set/out | 36 +++++ .../btest/Baseline/language.short-circuit/out | 4 + testing/btest/Baseline/language.string/out | 24 ++++ testing/btest/Baseline/language.subnet/out | 10 ++ testing/btest/Baseline/language.table/out | 37 +++++ testing/btest/Baseline/language.time/out | 7 + testing/btest/Baseline/language.timeout/out | 1 + testing/btest/Baseline/language.vector/out | 31 +++++ testing/btest/Baseline/language.when/out | 2 + testing/btest/language/addr.bro | 46 ++++++ testing/btest/language/bool.bro | 28 ++++ testing/btest/language/count.bro | 42 ++++++ testing/btest/language/double.bro | 66 +++++++++ testing/btest/language/enum.bro | 32 +++++ testing/btest/language/event.bro | 49 +++++++ testing/btest/language/file.bro | 19 +++ testing/btest/language/for.bro | 44 ++++++ testing/btest/language/function.bro | 73 ++++++++++ testing/btest/language/if.bro | 71 ++++++++++ testing/btest/language/int.bro | 54 ++++++++ testing/btest/language/interval.bro | 77 ++++++++++ testing/btest/language/null-statement.bro | 34 +++++ testing/btest/language/pattern.bro | 28 ++++ testing/btest/language/port.bro | 35 +++++ testing/btest/language/set.bro | 121 ++++++++++++++++ testing/btest/language/short-circuit.bro | 48 +++++++ testing/btest/language/string.bro | 59 ++++++++ testing/btest/language/subnet.bro | 48 +++++++ testing/btest/language/table.bro | 131 ++++++++++++++++++ testing/btest/language/time.bro | 28 ++++ testing/btest/language/timeout.bro | 19 +++ testing/btest/language/vector.bro | 104 ++++++++++++++ testing/btest/language/when.bro | 15 ++ 49 files changed, 1580 insertions(+) create mode 100644 testing/btest/Baseline/language.addr/out create mode 100644 testing/btest/Baseline/language.bool/out create mode 100644 testing/btest/Baseline/language.count/out create mode 100644 testing/btest/Baseline/language.double/out create mode 100644 testing/btest/Baseline/language.enum/out create mode 100644 testing/btest/Baseline/language.event/out create mode 100644 testing/btest/Baseline/language.file/out1 create mode 100644 testing/btest/Baseline/language.file/out2 create mode 100644 testing/btest/Baseline/language.for/out create mode 100644 testing/btest/Baseline/language.function/out create mode 100644 testing/btest/Baseline/language.if/out create mode 100644 testing/btest/Baseline/language.int/out create mode 100644 testing/btest/Baseline/language.interval/out create mode 100644 testing/btest/Baseline/language.null-statement/out create mode 100644 testing/btest/Baseline/language.pattern/out create mode 100644 testing/btest/Baseline/language.port/out create mode 100644 testing/btest/Baseline/language.set/out create mode 100644 testing/btest/Baseline/language.short-circuit/out create mode 100644 testing/btest/Baseline/language.string/out create mode 100644 testing/btest/Baseline/language.subnet/out create mode 100644 testing/btest/Baseline/language.table/out create mode 100644 testing/btest/Baseline/language.time/out create mode 100644 testing/btest/Baseline/language.timeout/out create mode 100644 testing/btest/Baseline/language.vector/out create mode 100644 testing/btest/Baseline/language.when/out create mode 100644 testing/btest/language/addr.bro create mode 100644 testing/btest/language/bool.bro create mode 100644 testing/btest/language/count.bro create mode 100644 testing/btest/language/double.bro create mode 100644 testing/btest/language/enum.bro create mode 100644 testing/btest/language/event.bro create mode 100644 testing/btest/language/file.bro create mode 100644 testing/btest/language/for.bro create mode 100644 testing/btest/language/function.bro create mode 100644 testing/btest/language/if.bro create mode 100644 testing/btest/language/int.bro create mode 100644 testing/btest/language/interval.bro create mode 100644 testing/btest/language/null-statement.bro create mode 100644 testing/btest/language/pattern.bro create mode 100644 testing/btest/language/port.bro create mode 100644 testing/btest/language/set.bro create mode 100644 testing/btest/language/short-circuit.bro create mode 100644 testing/btest/language/string.bro create mode 100644 testing/btest/language/subnet.bro create mode 100644 testing/btest/language/table.bro create mode 100644 testing/btest/language/time.bro create mode 100644 testing/btest/language/timeout.bro create mode 100644 testing/btest/language/vector.bro create mode 100644 testing/btest/language/when.bro diff --git a/testing/btest/Baseline/language.addr/out b/testing/btest/Baseline/language.addr/out new file mode 100644 index 0000000000..79a88d6dcb --- /dev/null +++ b/testing/btest/Baseline/language.addr/out @@ -0,0 +1,13 @@ +IPv4 address inequality (PASS) +IPv4 address equality (PASS) +IPv4 address comparison (PASS) +IPv4 address comparison (PASS) +size of IPv4 address (PASS) +IPv6 address inequality (PASS) +IPv6 address equality (PASS) +IPv6 address equality (PASS) +IPv6 address comparison (PASS) +IPv6 address comparison (PASS) +IPv6 address not case-sensitive (PASS) +size of IPv6 address (PASS) +IPv4 and IPv6 address inequality (PASS) diff --git a/testing/btest/Baseline/language.bool/out b/testing/btest/Baseline/language.bool/out new file mode 100644 index 0000000000..177c6795ef --- /dev/null +++ b/testing/btest/Baseline/language.bool/out @@ -0,0 +1,7 @@ +equality operator (PASS) +inequality operator (PASS) +logical or operator (PASS) +logical and operator (PASS) +negation operator (PASS) +absolute value (PASS) +absolute value (PASS) diff --git a/testing/btest/Baseline/language.count/out b/testing/btest/Baseline/language.count/out new file mode 100644 index 0000000000..7dba9ea24c --- /dev/null +++ b/testing/btest/Baseline/language.count/out @@ -0,0 +1,16 @@ +inequality operator (PASS) +relational operator (PASS) +relational operator (PASS) +relational operator (PASS) +relational operator (PASS) +hexadecimal (PASS) +counter alias (PASS) +absolute value (PASS) +absolute value (PASS) +pre-increment operator (PASS) +pre-decrement operator (PASS) +modulus operator (PASS) +division operator (PASS) +assignment operator (PASS) +assignment operator (PASS) +max count value = 4294967295 (PASS) diff --git a/testing/btest/Baseline/language.double/out b/testing/btest/Baseline/language.double/out new file mode 100644 index 0000000000..01e3047743 --- /dev/null +++ b/testing/btest/Baseline/language.double/out @@ -0,0 +1,25 @@ +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +double representations (PASS) +inequality operator (PASS) +absolute value (PASS) +assignment operator (PASS) +assignment operator (PASS) +relational operator (PASS) +relational operator (PASS) +relational operator (PASS) +relational operator (PASS) +division operator (PASS) +max double value = 1.7e+308 (PASS) diff --git a/testing/btest/Baseline/language.enum/out b/testing/btest/Baseline/language.enum/out new file mode 100644 index 0000000000..1bafdd73b0 --- /dev/null +++ b/testing/btest/Baseline/language.enum/out @@ -0,0 +1,4 @@ +enum equality comparison (PASS) +enum equality comparison (PASS) +enum equality comparison (PASS) +type inference (PASS) diff --git a/testing/btest/Baseline/language.event/out b/testing/btest/Baseline/language.event/out new file mode 100644 index 0000000000..d5a22b3745 --- /dev/null +++ b/testing/btest/Baseline/language.event/out @@ -0,0 +1,4 @@ +event statement +event part1 +event part2 +schedule statement diff --git a/testing/btest/Baseline/language.file/out1 b/testing/btest/Baseline/language.file/out1 new file mode 100644 index 0000000000..5ff4194027 --- /dev/null +++ b/testing/btest/Baseline/language.file/out1 @@ -0,0 +1,2 @@ +20 +12 diff --git a/testing/btest/Baseline/language.file/out2 b/testing/btest/Baseline/language.file/out2 new file mode 100644 index 0000000000..12be2d6723 --- /dev/null +++ b/testing/btest/Baseline/language.file/out2 @@ -0,0 +1 @@ +test, 123, 456 diff --git a/testing/btest/Baseline/language.for/out b/testing/btest/Baseline/language.for/out new file mode 100644 index 0000000000..dccc00ce3e --- /dev/null +++ b/testing/btest/Baseline/language.for/out @@ -0,0 +1,3 @@ +for loop (PASS) +for loop with break (PASS) +for loop with next (PASS) diff --git a/testing/btest/Baseline/language.function/out b/testing/btest/Baseline/language.function/out new file mode 100644 index 0000000000..f530024370 --- /dev/null +++ b/testing/btest/Baseline/language.function/out @@ -0,0 +1,11 @@ +no args without return value (PASS) +no args no return value, empty return (PASS) +no args with return value (PASS) +args without return value (PASS) +args with return value (PASS) +multiple args with return value (PASS) +anonymous function without args or return value (PASS) +anonymous function with return value (PASS) +anonymous function with args and return value (PASS) +assign function variable (PASS) +reassign function variable (PASS) diff --git a/testing/btest/Baseline/language.if/out b/testing/btest/Baseline/language.if/out new file mode 100644 index 0000000000..510b66b0cf --- /dev/null +++ b/testing/btest/Baseline/language.if/out @@ -0,0 +1,12 @@ +if T (PASS) +if T else (PASS) +if F else (PASS) +if T else if F (PASS) +if F else if T (PASS) +if T else if T (PASS) +if T else if F else (PASS) +if F else if T else (PASS) +if T else if T else (PASS) +if F else if F else (PASS) +if F else if F else if T else (PASS) +if F else if F else if F else (PASS) diff --git a/testing/btest/Baseline/language.int/out b/testing/btest/Baseline/language.int/out new file mode 100644 index 0000000000..a50887999a --- /dev/null +++ b/testing/btest/Baseline/language.int/out @@ -0,0 +1,21 @@ +optional '+' sign (PASS) +negative vs. positive (PASS) +negative vs. positive (PASS) +hexadecimal (PASS) +hexadecimal (PASS) +hexadecimal (PASS) +relational operator (PASS) +relational operator (PASS) +relational operator (PASS) +relational operator (PASS) +absolute value (PASS) +absolute value (PASS) +pre-increment operator (PASS) +pre-decrement operator (PASS) +modulus operator (PASS) +division operator (PASS) +assignment operator (PASS) +assignment operator (PASS) +max int value = 4294967295 (PASS) +min int value = -4294967295 (PASS) +type inference (PASS) diff --git a/testing/btest/Baseline/language.interval/out b/testing/btest/Baseline/language.interval/out new file mode 100644 index 0000000000..3eb135de52 --- /dev/null +++ b/testing/btest/Baseline/language.interval/out @@ -0,0 +1,23 @@ +optional space (PASS) +different units with same numeric value (PASS) +plural/singular interval are same (PASS) +compare different time units (PASS) +compare different time units (PASS) +compare different time units (PASS) +compare different time units (PASS) +compare different time units (PASS) +compare different time units (PASS) +compare different time units (PASS) +add different time units (PASS) +subtract different time units (PASS) +absolute value (PASS) +absolute value (PASS) +assignment operator (PASS) +multiplication operator (PASS) +division operator (PASS) +division operator (PASS) +relative size of units (PASS) +relative size of units (PASS) +relative size of units (PASS) +relative size of units (PASS) +relative size of units (PASS) diff --git a/testing/btest/Baseline/language.null-statement/out b/testing/btest/Baseline/language.null-statement/out new file mode 100644 index 0000000000..19f86f493a --- /dev/null +++ b/testing/btest/Baseline/language.null-statement/out @@ -0,0 +1 @@ +done diff --git a/testing/btest/Baseline/language.pattern/out b/testing/btest/Baseline/language.pattern/out new file mode 100644 index 0000000000..5a31e4eacb --- /dev/null +++ b/testing/btest/Baseline/language.pattern/out @@ -0,0 +1,6 @@ +equality operator (PASS) +equality operator (order of operands) (PASS) +inequality operator (PASS) +in operator (PASS) +in operator (PASS) +!in operator (PASS) diff --git a/testing/btest/Baseline/language.port/out b/testing/btest/Baseline/language.port/out new file mode 100644 index 0000000000..9dd7ba03c2 --- /dev/null +++ b/testing/btest/Baseline/language.port/out @@ -0,0 +1,8 @@ +protocol ordering (PASS) +protocol ordering (PASS) +protocol ordering (PASS) +protocol ordering (PASS) +protocol ordering (PASS) +different protocol but same numeric value (PASS) +different protocol but same numeric value (PASS) +equality operator (PASS) diff --git a/testing/btest/Baseline/language.set/out b/testing/btest/Baseline/language.set/out new file mode 100644 index 0000000000..b4801ac799 --- /dev/null +++ b/testing/btest/Baseline/language.set/out @@ -0,0 +1,36 @@ +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +iterate over set (PASS) +iterate over set (PASS) +iterate over set (PASS) +iterate over set (PASS) +add element (PASS) +in operator (PASS) +add element (PASS) +add element (PASS) +in operator (PASS) +in operator (PASS) +add element (PASS) +in operator (PASS) +add element (PASS) +in operator (PASS) +add element (PASS) +in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) diff --git a/testing/btest/Baseline/language.short-circuit/out b/testing/btest/Baseline/language.short-circuit/out new file mode 100644 index 0000000000..c92995ea7c --- /dev/null +++ b/testing/btest/Baseline/language.short-circuit/out @@ -0,0 +1,4 @@ +&& operator (eval. both operands) (PASS) +&& operator (eval. 1st operand) (PASS) +|| operator (eval. 1st operand) (PASS) +|| operator (eval. both operands) (PASS) diff --git a/testing/btest/Baseline/language.string/out b/testing/btest/Baseline/language.string/out new file mode 100644 index 0000000000..623d1cd3ba --- /dev/null +++ b/testing/btest/Baseline/language.string/out @@ -0,0 +1,24 @@ +empty string (PASS) +nonempty string (PASS) +string comparison (PASS) +string comparison (PASS) +string comparison (PASS) +string comparison (PASS) +null escape sequence (PASS) +tab escape sequence (PASS) +newline escape sequence (PASS) +hex escape sequence (PASS) +hex escape sequence (PASS) +hex escape sequence (PASS) +octal escape sequence (PASS) +quote escape sequence (PASS) +backslash escape sequence (PASS) +null escape sequence (PASS) +newline escape sequence (PASS) +tab escape sequence (PASS) +string concatenation (PASS) +string concatenation (PASS) +long string initialization (PASS) +in operator (PASS) +!in operator (PASS) +type inference (PASS) diff --git a/testing/btest/Baseline/language.subnet/out b/testing/btest/Baseline/language.subnet/out new file mode 100644 index 0000000000..f753d65c68 --- /dev/null +++ b/testing/btest/Baseline/language.subnet/out @@ -0,0 +1,10 @@ +IPv4 subnet equality (PASS) +IPv4 subnet inequality (PASS) +IPv4 subnet in operator (PASS) +IPv4 subnet !in operator (PASS) +IPv6 subnet equality (PASS) +IPv6 subnet inequality (PASS) +IPv6 subnet in operator (PASS) +IPv6 subnet !in operator (PASS) +IPv4 and IPv6 subnet inequality (PASS) +IPv4 address and IPv6 subnet (PASS) diff --git a/testing/btest/Baseline/language.table/out b/testing/btest/Baseline/language.table/out new file mode 100644 index 0000000000..8a45707e2d --- /dev/null +++ b/testing/btest/Baseline/language.table/out @@ -0,0 +1,37 @@ +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +iterate over table (PASS) +iterate over table (PASS) +iterate over table (PASS) +iterate over table (PASS) +iterate over table (PASS) +add element (PASS) +in operator (PASS) +add element (PASS) +add element (PASS) +in operator (PASS) +in operator (PASS) +add element (PASS) +in operator (PASS) +add element (PASS) +in operator (PASS) +add element (PASS) +in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) +remove element (PASS) +!in operator (PASS) diff --git a/testing/btest/Baseline/language.time/out b/testing/btest/Baseline/language.time/out new file mode 100644 index 0000000000..3615a17c53 --- /dev/null +++ b/testing/btest/Baseline/language.time/out @@ -0,0 +1,7 @@ +add interval (PASS) +subtract interval (PASS) +inequality (PASS) +equality (PASS) +subtract time (PASS) +size operator (PASS) +type inference (PASS) diff --git a/testing/btest/Baseline/language.timeout/out b/testing/btest/Baseline/language.timeout/out new file mode 100644 index 0000000000..790851a6bb --- /dev/null +++ b/testing/btest/Baseline/language.timeout/out @@ -0,0 +1 @@ +timeout diff --git a/testing/btest/Baseline/language.vector/out b/testing/btest/Baseline/language.vector/out new file mode 100644 index 0000000000..4196b36141 --- /dev/null +++ b/testing/btest/Baseline/language.vector/out @@ -0,0 +1,31 @@ +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +iterate over vector (PASS) +iterate over vector (PASS) +iterate over vector (PASS) +add element (PASS) +access element (PASS) +add element (PASS) +add element (PASS) +access element (PASS) +access element (PASS) +add element (PASS) +access element (PASS) +add element (PASS) +access element (PASS) +add element (PASS) +access element (PASS) +overwrite element (PASS) +access element (PASS) +overwrite element (PASS) +access element (PASS) +access element (PASS) +overwrite element (PASS) +access element (PASS) +overwrite element (PASS) +access element (PASS) +overwrite element (PASS) +access element (PASS) diff --git a/testing/btest/Baseline/language.when/out b/testing/btest/Baseline/language.when/out new file mode 100644 index 0000000000..3a052217ab --- /dev/null +++ b/testing/btest/Baseline/language.when/out @@ -0,0 +1,2 @@ +done +lookup successful diff --git a/testing/btest/language/addr.bro b/testing/btest/language/addr.bro new file mode 100644 index 0000000000..b97710ce22 --- /dev/null +++ b/testing/btest/language/addr.bro @@ -0,0 +1,46 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + # IPv4 addresses + local a1: addr = 0.0.0.0; + local a2: addr = 10.0.0.11; + local a3: addr = 255.255.255.255; + + test_case( "IPv4 address inequality", a1 != a2 ); + test_case( "IPv4 address equality", a1 == 0.0.0.0 ); + test_case( "IPv4 address comparison", a1 < a2 ); + test_case( "IPv4 address comparison", a3 > a2 ); + test_case( "size of IPv4 address", |a1| == 32 ); + + # IPv6 addresses + local b1: addr = [::]; + local b2: addr = [::255.255.255.255]; + local b3: addr = [::ffff:ffff]; + local b4: addr = [ffff::ffff]; + local b5: addr = [0000:0000:0000:0000:0000:0000:0000:0000]; + local b6: addr = [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]; + local b7: addr = [AAAA:BBBB:CCCC:DDDD:EEEE:FFFF:1111:2222]; + + test_case( "IPv6 address inequality", b1 != b2 ); + test_case( "IPv6 address equality", b1 == b5 ); + test_case( "IPv6 address equality", b2 == b3 ); + test_case( "IPv6 address comparison", b1 < b2 ); + test_case( "IPv6 address comparison", b4 > b2 ); + test_case( "IPv6 address not case-sensitive", b6 == b7 ); + test_case( "size of IPv6 address", |b1| == 128 ); + + test_case( "IPv4 and IPv6 address inequality", a1 != b1 ); + + # type inference + local x = 192.1.2.3; + local y = [a::b]; +} + diff --git a/testing/btest/language/bool.bro b/testing/btest/language/bool.bro new file mode 100644 index 0000000000..09614b516e --- /dev/null +++ b/testing/btest/language/bool.bro @@ -0,0 +1,28 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local b1: bool = T; + local b2: bool = F; + local b3: bool = T; + + test_case( "equality operator", b1 == b3 ); + test_case( "inequality operator", b1 != b2 ); + test_case( "logical or operator", b1 || b2 ); + test_case( "logical and operator", b1 && b3 ); + test_case( "negation operator", !b2 ); + test_case( "absolute value", |b1| == 1 ); + test_case( "absolute value", |b2| == 0 ); + + # type inference + local x = T; + local y = F; +} + diff --git a/testing/btest/language/count.bro b/testing/btest/language/count.bro new file mode 100644 index 0000000000..f2c248eae9 --- /dev/null +++ b/testing/btest/language/count.bro @@ -0,0 +1,42 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local c1: count = 0; + local c2: count = 5; + local c3: count = 0xff; + local c4: count = 255; + local c5: count = 4294967295; # maximum allowed value + local c6: counter = 5; + + test_case( "inequality operator", c1 != c2 ); + test_case( "relational operator", c1 < c2 ); + test_case( "relational operator", c1 <= c2 ); + test_case( "relational operator", c2 > c1 ); + test_case( "relational operator", c2 >= c1 ); + test_case( "hexadecimal", c3 == c4 ); + test_case( "counter alias", c2 == c6 ); + test_case( "absolute value", |c1| == 0 ); + test_case( "absolute value", |c2| == 5 ); + test_case( "pre-increment operator", ++c2 == 6 ); + test_case( "pre-decrement operator", --c2 == 5 ); + test_case( "modulus operator", c2%2 == 1 ); + test_case( "division operator", c2/2 == 2 ); + c2 += 3; + test_case( "assignment operator", c2 == 8 ); + c2 -= 2; + test_case( "assignment operator", c2 == 6 ); + local str1 = fmt("max count value = %d", c5); + test_case( str1, str1 == "max count value = 4294967295" ); + + # type inference + local x = 1; +} + diff --git a/testing/btest/language/double.bro b/testing/btest/language/double.bro new file mode 100644 index 0000000000..bee7e41a94 --- /dev/null +++ b/testing/btest/language/double.bro @@ -0,0 +1,66 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local d1: double = 3; + local d2: double = +3; + local d3: double = 3.; + local d4: double = 3.0; + local d5: double = +3.0; + local d6: double = 3e0; + local d7: double = 3E0; + local d8: double = 3e+0; + local d9: double = 3e-0; + local d10: double = 3.0e0; + local d11: double = +3.0e0; + local d12: double = +3.0e+0; + local d13: double = +3.0E+0; + local d14: double = +3.0E-0; + local d15: double = .03E+2; + local d16: double = .03E2; + local d17: double = 3.0001; + local d18: double = -3.0001; + local d19: double = 1.7e308; # almost maximum allowed value + + test_case( "double representations", d1 == d2 ); + test_case( "double representations", d1 == d3 ); + test_case( "double representations", d1 == d4 ); + test_case( "double representations", d1 == d5 ); + test_case( "double representations", d1 == d6 ); + test_case( "double representations", d1 == d7 ); + test_case( "double representations", d1 == d8 ); + test_case( "double representations", d1 == d9 ); + test_case( "double representations", d1 == d10 ); + test_case( "double representations", d1 == d11 ); + test_case( "double representations", d1 == d12 ); + test_case( "double representations", d1 == d13 ); + test_case( "double representations", d1 == d14 ); + test_case( "double representations", d1 == d15 ); + test_case( "double representations", d1 == d16 ); + test_case( "inequality operator", d18 != d17 ); + test_case( "absolute value", |d18| == d17 ); + d4 += 2; + test_case( "assignment operator", d4 == 5.0 ); + d4 -= 3; + test_case( "assignment operator", d4 == 2.0 ); + test_case( "relational operator", d4 <= d3 ); + test_case( "relational operator", d4 < d3 ); + test_case( "relational operator", d17 >= d3 ); + test_case( "relational operator", d17 > d3 ); + test_case( "division operator", d3/2 == 1.5 ); + local str1 = fmt("max double value = %.1e", d19); + test_case( str1, str1 == "max double value = 1.7e+308" ); + + # type inference + local x = 7.0; + local y = 7e0; + local z = 7e+1; +} + diff --git a/testing/btest/language/enum.bro b/testing/btest/language/enum.bro new file mode 100644 index 0000000000..5cafb323a6 --- /dev/null +++ b/testing/btest/language/enum.bro @@ -0,0 +1,32 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +# enum with optional comma at end of definition +type color: enum { Red, White, Blue, }; + +# enum without optional comma +type city: enum { Rome, Paris }; + + +event bro_init() +{ + local e1: color = Blue; + local e2: color = White; + local e3: color = Blue; + local e4: city = Rome; + + test_case( "enum equality comparison", e1 != e2 ); + test_case( "enum equality comparison", e1 == e3 ); + test_case( "enum equality comparison", e1 != e4 ); + + # type inference + local x = Blue; + test_case( "type inference", x == e1 ); +} + diff --git a/testing/btest/language/event.bro b/testing/btest/language/event.bro new file mode 100644 index 0000000000..1ea5c7b6d8 --- /dev/null +++ b/testing/btest/language/event.bro @@ -0,0 +1,49 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + + +event e1() + { + print "event statement"; + return; + print "Error: this should not happen"; + } + +event e2() + { + print "schedule statement"; + } + +event e3(test: string) + { + print "event part1"; + } + +event e4(num: count) + { + print "assign event variable"; + } + +# Note: the name of this event is intentionally the same as one above +event e3(test: string) + { + print "event part2"; + } + +event bro_init() +{ + # Test calling an event with "event" statement + event e1(); + + # Test calling an event with "schedule" statement + schedule 1 sec { e2() }; + + # Test calling an event that has two separate definitions + event e3("foo"); + + # Test assigning an event variable to an event + local e5: event(num: count); + e5 = e4; + event e5(6); # TODO: this does not do anything +} + diff --git a/testing/btest/language/file.bro b/testing/btest/language/file.bro new file mode 100644 index 0000000000..77650a6082 --- /dev/null +++ b/testing/btest/language/file.bro @@ -0,0 +1,19 @@ +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: btest-diff out1 +# @TEST-EXEC: btest-diff out2 + + +event bro_init() +{ + # Test using "print" statement to output directly to a file + local f1: file = open( "out1" ); + print f1, 20; + print f1, 12; + close(f1); + + # Test again, but without explicitly using the type name in declaration + local f2 = open( "out2" ); + print f2, "test", 123, 456; + close(f2); +} + diff --git a/testing/btest/language/for.bro b/testing/btest/language/for.bro new file mode 100644 index 0000000000..f10ef0eb1b --- /dev/null +++ b/testing/btest/language/for.bro @@ -0,0 +1,44 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + + +event bro_init() +{ + local vv: vector of string = vector( "a", "b", "c" ); + local ct: count = 0; + + # Test a "for" loop without "break" or "next" + + ct = 0; + for ( i in vv ) ++ct; + test_case("for loop", ct == 3 ); + + # Test the "break" statement + + ct = 0; + for ( i in vv ) + { + ++ct; + break; + test_case("Error: this should not happen", F); + } + test_case("for loop with break", ct == 1 ); + + # Test the "next" statement + + ct = 0; + for ( i in vv ) + { + ++ct; + next; + test_case("Error: this should not happen", F); + } + test_case("for loop with next", ct == 3 ); +} + diff --git a/testing/btest/language/function.bro b/testing/btest/language/function.bro new file mode 100644 index 0000000000..13efbb91f8 --- /dev/null +++ b/testing/btest/language/function.bro @@ -0,0 +1,73 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +function f1() + { + test_case("no args without return value", T ); + } + +function f2() + { + test_case("no args no return value, empty return", T ); + return; + } + +function f3(): bool + { + return T; + } + +function f4(test: string) + { + test_case("args without return value", T ); + } + +function f5(test: string): bool + { + return T; + } + +function f6(test: string, num: count): bool + { + local val: int = -num; + if ( test == "bar" && num == 3 && val < 0 ) return T; + return F; + } + +function f7(test: string): bool + { + return F; + } + +event bro_init() +{ + f1(); + f2(); + test_case("no args with return value", f3() ); + f4("foo"); + test_case("args with return value", f5("foo") ); + test_case("multiple args with return value", f6("bar", 3) ); + + local f10 = function() { test_case("anonymous function without args or return value", T ); }; + f10(); + + local f11 = function(): bool { return T; }; + test_case("anonymous function with return value", f11() ); + + local f12 = function(val: int): bool { if (val > 0) return T; else return F; }; + test_case("anonymous function with args and return value", f12(2) ); + + # Test that a function variable can later be assigned to a function + local f13: function(test: string): bool; + f13 = f5; + test_case("assign function variable", f13("foo") ); + f13 = f7; + test_case("reassign function variable", !f13("bar") ); +} + diff --git a/testing/btest/language/if.bro b/testing/btest/language/if.bro new file mode 100644 index 0000000000..e9acea865f --- /dev/null +++ b/testing/btest/language/if.bro @@ -0,0 +1,71 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + + +event bro_init() +{ + # Test "if" without "else" + + if ( T ) test_case( "if T", T); + + if ( F ) test_case( "Error: this should not happen", F); + + # Test "if" with only an "else" + + if ( T ) test_case( "if T else", T); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else test_case( "if F else", T); + + # Test "if" with only an "else if" + + if ( T ) test_case( "if T else if F", T); + else if ( F ) test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( T ) test_case( "if F else if T", T); + + if ( T ) test_case( "if T else if T", T); + else if ( T ) test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + + # Test "if" with both "else if" and "else" + + if ( T ) test_case( "if T else if F else", T); + else if ( F ) test_case( "Error: this should not happen", F); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( T ) test_case( "if F else if T else", T); + else test_case( "Error: this should not happen", F); + + if ( T ) test_case( "if T else if T else", T); + else if ( T ) test_case( "Error: this should not happen", F); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else test_case( "if F else if F else", T); + + # Test "if" with multiple "else if" and an "else" + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else if ( T ) test_case( "if F else if F else if T else", T); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else test_case( "if F else if F else if F else", T); +} + diff --git a/testing/btest/language/int.bro b/testing/btest/language/int.bro new file mode 100644 index 0000000000..0c11b94235 --- /dev/null +++ b/testing/btest/language/int.bro @@ -0,0 +1,54 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local i1: int = 3; + local i2: int = +3; + local i3: int = -3; + local i4: int = +0; + local i5: int = -0; + local i6: int = 12; + local i7: int = 0xc; + local i8: int = 0xC; + local i9: int = -0xC; + local i10: int = -12; + local i11: int = 4294967295; + local i12: int = -4294967295; + + test_case( "optional '+' sign", i1 == i2 ); + test_case( "negative vs. positive", i1 != i3 ); + test_case( "negative vs. positive", i4 == i5 ); + test_case( "hexadecimal", i6 == i7 ); + test_case( "hexadecimal", i6 == i8 ); + test_case( "hexadecimal", i9 == i10 ); + test_case( "relational operator", i2 > i3 ); + test_case( "relational operator", i2 >= i3 ); + test_case( "relational operator", i3 < i2 ); + test_case( "relational operator", i3 <= i2 ); + test_case( "absolute value", |i4| == 0 ); + test_case( "absolute value", |i3| == 3 ); + test_case( "pre-increment operator", ++i2 == 4 ); + test_case( "pre-decrement operator", --i2 == 3 ); + test_case( "modulus operator", i2%2 == 1 ); + test_case( "division operator", i2/2 == 1 ); + i2 += 4; + test_case( "assignment operator", i2 == 7 ); + i2 -= 2; + test_case( "assignment operator", i2 == 5 ); + local str1 = fmt("max int value = %d", i11); + test_case( str1, str1 == "max int value = 4294967295" ); + local str2 = fmt("min int value = %d", i12); + test_case( str2, str2 == "min int value = -4294967295" ); + + # type inference + local x = +3; + test_case( "type inference", type_name(x) == "int" ); +} + diff --git a/testing/btest/language/interval.bro b/testing/btest/language/interval.bro new file mode 100644 index 0000000000..9467db9397 --- /dev/null +++ b/testing/btest/language/interval.bro @@ -0,0 +1,77 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +function approx_equal(x: double, y: double): bool + { + # return T if x and y are approximately equal, and F otherwise + return |(x - y)/x| < 1e-6 ? T : F; + } + +event bro_init() +{ + # constants without space and no letter "s" + local in11: interval = 2usec; + local in12: interval = 2msec; + local in13: interval = 120sec; + local in14: interval = 2min; + local in15: interval = -2hr; + # TODO: this one causes bro to fail + #local in16: interval = 2.5day; + + # constants with space and no letter "s" + local in21: interval = 2 usec; + local in22: interval = 2 msec; + local in23: interval = 120 sec; + local in24: interval = 2 min; + local in25: interval = -2 hr; + local in26: interval = 2.5 day; + + # constants with space and letter "s" + local in31: interval = 2 usecs; + local in32: interval = 2 msecs; + local in33: interval = 120 secs; + local in34: interval = 2 mins; + local in35: interval = -2 hrs; + local in36: interval = 2.5 days; + + test_case( "optional space", in11 == in21 ); + test_case( "different units with same numeric value", in11 != in12 ); + test_case( "plural/singular interval are same", in11 == in31 ); + test_case( "compare different time units", in13 == in34 ); + test_case( "compare different time units", in13 <= in34 ); + test_case( "compare different time units", in13 >= in34 ); + test_case( "compare different time units", in13 < in36 ); + test_case( "compare different time units", in13 <= in36 ); + test_case( "compare different time units", in13 > in35 ); + test_case( "compare different time units", in13 >= in35 ); + test_case( "add different time units", in13 + in14 == 4min ); + test_case( "subtract different time units", in24 - in23 == 0sec ); + test_case( "absolute value", |in25| == 2.0*3600 ); + test_case( "absolute value", |in36| == 2.5*86400 ); + in34 += 2hr; + test_case( "assignment operator", in34 == 122min ); + # TODO: this should work (subtraction works) + #in34 -= 2hr; + #test_case( "assignment operator", in34 == 2min ); + test_case( "multiplication operator", in33*2 == 4min ); + test_case( "division operator", in35/2 == -1hr ); + test_case( "division operator", approx_equal(in32/in31, 1e3) ); + + test_case( "relative size of units", approx_equal(1msec/1usec, 1000) ); + test_case( "relative size of units", approx_equal(1sec/1msec, 1000) ); + test_case( "relative size of units", approx_equal(1min/1sec, 60) ); + test_case( "relative size of units", approx_equal(1hr/1min, 60) ); + test_case( "relative size of units", approx_equal(1day/1hr, 24) ); + + # type inference + local x = 2 usec; + # TODO: this one causes bro to fail + #local y = 2.1usec; + local z = 3usecs; +} + diff --git a/testing/btest/language/null-statement.bro b/testing/btest/language/null-statement.bro new file mode 100644 index 0000000000..420ebd8a6c --- /dev/null +++ b/testing/btest/language/null-statement.bro @@ -0,0 +1,34 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + + +function f1(test: string) + { + ; # null statement in function + } + +event bro_init() +{ + local s1: set[string] = set( "this", "test" ); + + ; # null statement in event + + for ( i in s1 ) + ; # null statement in for loop + + if ( |s1| > 0 ) ; # null statement in if statement + + f1("foo"); + + { ; } # null compound statement + + if ( |s1| == 0 ) + { + print "Error: this should not happen"; + } + else + ; # null statement in else + + print "done"; +} + diff --git a/testing/btest/language/pattern.bro b/testing/btest/language/pattern.bro new file mode 100644 index 0000000000..de33e4d2b6 --- /dev/null +++ b/testing/btest/language/pattern.bro @@ -0,0 +1,28 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local p1: pattern = /foo|bar/; + local p2: pattern = /oob/; + local p3: pattern = /^oob/; + + test_case( "equality operator", "foo" == p1 ); + test_case( "equality operator (order of operands)", p1 == "foo" ); + test_case( "inequality operator", "foobar" != p1 ); + test_case( "in operator", p1 in "foobar" ); + test_case( "in operator", p2 in "foobar" ); + test_case( "!in operator", p3 !in "foobar" ); + + # type inference + local x = /foo|bar/; + local y = /foo/; + local z = /^foo/; +} + diff --git a/testing/btest/language/port.bro b/testing/btest/language/port.bro new file mode 100644 index 0000000000..b45401da7a --- /dev/null +++ b/testing/btest/language/port.bro @@ -0,0 +1,35 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local p1: port = 1/icmp; + local p2: port = 2/udp; + local p3: port = 3/tcp; + local p4: port = 4/unknown; + + # maximum allowed values for each port type + local p5: port = 255/icmp; + local p6: port = 65535/udp; + local p7: port = 65535/tcp; + local p8: port = 255/unknown; + + test_case( "protocol ordering", p1 > p2 ); + test_case( "protocol ordering", p2 > p3 ); + test_case( "protocol ordering", p3 > p4 ); + test_case( "protocol ordering", p7 < p6 ); + test_case( "protocol ordering", p8 < p5 ); + test_case( "different protocol but same numeric value", p6 != p7 ); + test_case( "different protocol but same numeric value", p5 != p8 ); + test_case( "equality operator", 65535/tcp == p7 ); + + # type inference + local x = 123/tcp; +} + diff --git a/testing/btest/language/set.bro b/testing/btest/language/set.bro new file mode 100644 index 0000000000..66b2ebc3af --- /dev/null +++ b/testing/btest/language/set.bro @@ -0,0 +1,121 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +# Note: only global sets can be initialized with curly braces +global s10: set[string] = { "curly", "braces" }; +global s11: set[port, string, bool] = { [10/udp, "curly", F], + [11/udp, "braces", T] }; + +event bro_init() +{ + local s1: set[string] = set( "test", "example" ); + local s2: set[string] = set(); + local s3: set[string]; + local s4 = set( "type inference" ); + local s5: set[port, string, bool] = set( [1/tcp, "test", T], + [2/tcp, "example", F] ); + local s6: set[port, string, bool] = set(); + local s7: set[port, string, bool]; + local s8 = set( [8/tcp, "type inference", T] ); + + # Test the size of each set + test_case( "cardinality", |s1| == 2 ); + test_case( "cardinality", |s2| == 0 ); + test_case( "cardinality", |s3| == 0 ); + test_case( "cardinality", |s4| == 1 ); + test_case( "cardinality", |s5| == 2 ); + test_case( "cardinality", |s6| == 0 ); + test_case( "cardinality", |s7| == 0 ); + test_case( "cardinality", |s8| == 1 ); + test_case( "cardinality", |s10| == 2 ); + test_case( "cardinality", |s11| == 2 ); + + # Test iterating over each set + local ct: count; + ct = 0; + for ( c in s1 ) + { + if ( type_name(c) != "string" ) + print "Error: wrong set element type"; + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + ct = 0; + for ( c in s2 ) + { + ++ct; + } + test_case( "iterate over set", ct == 0 ); + + ct = 0; + for ( [c1,c2,c3] in s5 ) + { + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + ct = 0; + for ( [c1,c2,c3] in s11 ) + { + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + # Test adding elements to each set + add s1["added"]; + test_case( "add element", |s1| == 3 ); + test_case( "in operator", "added" in s1 ); + + add s2["another"]; + test_case( "add element", |s2| == 1 ); + add s2["test"]; + test_case( "add element", |s2| == 2 ); + test_case( "in operator", "another" in s2 ); + test_case( "in operator", "test" in s2 ); + + add s3["foo"]; + test_case( "add element", |s3| == 1 ); + test_case( "in operator", "foo" in s3 ); + + add s4["local"]; + test_case( "add element", |s4| == 2 ); + test_case( "in operator", "local" in s4 ); + + # Note: cannot add elements to sets of multiple types + + add s10["global"]; + test_case( "add element", |s10| == 3 ); + test_case( "in operator", "global" in s10 ); + + # Test removing elements from each set + delete s1["test"]; + delete s1["foobar"]; # element does not exist + test_case( "remove element", |s1| == 2 ); + test_case( "!in operator", "test" !in s1 ); + + delete s2["test"]; + test_case( "remove element", |s2| == 1 ); + test_case( "!in operator", "test" !in s2 ); + + delete s3["foo"]; + test_case( "remove element", |s3| == 0 ); + test_case( "!in operator", "foo" !in s3 ); + + delete s4["type inference"]; + test_case( "remove element", |s4| == 1 ); + test_case( "!in operator", "type inference" !in s4 ); + + # Note: cannot remove elements from sets of multiple types + + delete s10["braces"]; + test_case( "remove element", |s10| == 2 ); + test_case( "!in operator", "braces" !in s10 ); +} + diff --git a/testing/btest/language/short-circuit.bro b/testing/btest/language/short-circuit.bro new file mode 100644 index 0000000000..f0ba585cea --- /dev/null +++ b/testing/btest/language/short-circuit.bro @@ -0,0 +1,48 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global ct: count; + +function t_func(): bool + { + ct += 1; + return T; + } + +function f_func(): bool + { + ct += 2; + return F; + } + + +event bro_init() +{ + local res: bool; + + # both functions should be called + ct = 0; + res = t_func() && f_func(); + test_case("&& operator (eval. both operands)", res == F && ct == 3 ); + + # only first function should be called + ct = 0; + res = f_func() && t_func(); + test_case("&& operator (eval. 1st operand)", res == F && ct == 2 ); + + # only first function should be called + ct = 0; + res = t_func() || f_func(); + test_case("|| operator (eval. 1st operand)", res == T && ct == 1 ); + + # both functions should be called + ct = 0; + res = f_func() || t_func(); + test_case("|| operator (eval. both operands)", res == T && ct == 3 ); +} + diff --git a/testing/btest/language/string.bro b/testing/btest/language/string.bro new file mode 100644 index 0000000000..b9a17e3645 --- /dev/null +++ b/testing/btest/language/string.bro @@ -0,0 +1,59 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local s1: string = ""; # empty string + local s2: string = "x"; # no escape sequences + local s3: string = "a\0b"; # null character + local s4: string = "a\tb"; # tab + local s5: string = "a\nb"; # newline + local s6: string = "a\xffb"; # hex value + local s7: string = "a\x00b"; # hex value + local s8: string = "a\x0ab"; # hex value + local s9: string = "a\011b"; # octal value + local s10: string = "a\"b"; # double quote + local s11: string = "a\\b"; # backslash + local s12: string = s2 + s3; # string concatenation + local s13: string = "test"; + local s14: string = "this is a very long string" + + "which continues on the next line" + + "the end"; + local s15: string = "on"; + + test_case( "empty string", |s1| == 0 ); + test_case( "nonempty string", |s2| == 1 ); + test_case( "string comparison", s2 > s3 ); + test_case( "string comparison", s2 >= s3 ); + test_case( "string comparison", s3 < s2 ); + test_case( "string comparison", s3 <= s2 ); + test_case( "null escape sequence", |s3| == 3 ); + test_case( "tab escape sequence", |s4| == 3 ); + test_case( "newline escape sequence", |s5| == 3 ); + test_case( "hex escape sequence", |s6| == 3 ); + test_case( "hex escape sequence", |s7| == 3 ); + test_case( "hex escape sequence", |s8| == 3 ); + test_case( "octal escape sequence", |s9| == 3 ); + test_case( "quote escape sequence", |s10| == 3 ); + test_case( "backslash escape sequence", |s11| == 3 ); + test_case( "null escape sequence", s3 == s7 ); + test_case( "newline escape sequence", s5 == s8 ); + test_case( "tab escape sequence", s4 == s9 ); + test_case( "string concatenation", |s12| == 4 ); + s13 += s2; + test_case( "string concatenation", s13 == "testx" ); + test_case( "long string initialization", |s14| == 65 ); + test_case( "in operator", s15 in s14 ); + test_case( "!in operator", s15 !in s13 ); + + # type inference + local x = "x"; + test_case( "type inference", x == s2 ); +} + diff --git a/testing/btest/language/subnet.bro b/testing/btest/language/subnet.bro new file mode 100644 index 0000000000..63d09f916b --- /dev/null +++ b/testing/btest/language/subnet.bro @@ -0,0 +1,48 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +# TODO: "subnet inequality" tests (i.e., tests with "!=") always fail + +event bro_init() +{ + # IPv4 addr + local a1: addr = 192.1.2.3; + + # IPv4 subnets + local s1: subnet = 0.0.0.0/0; + local s2: subnet = 192.0.0.0/8; + local s3: subnet = 255.255.255.255/32; + + test_case( "IPv4 subnet equality", a1/8 == s2 ); + test_case( "IPv4 subnet inequality", a1/4 != s2 ); + test_case( "IPv4 subnet in operator", a1 in s2 ); + test_case( "IPv4 subnet !in operator", a1 !in s3 ); + + # IPv6 addr + local b1: addr = [ffff::]; + local b2: addr = [ffff::1]; + local b3: addr = [ffff:1::1]; + + # IPv6 subnets + local t1: subnet = [::]/0; + local t2: subnet = [ffff::]/64; + + test_case( "IPv6 subnet equality", b1/64 == t2 ); + test_case( "IPv6 subnet inequality", b3/64 != t2 ); + test_case( "IPv6 subnet in operator", b2 in t2 ); + test_case( "IPv6 subnet !in operator", b3 !in t2 ); + + test_case( "IPv4 and IPv6 subnet inequality", s1 != t1 ); + test_case( "IPv4 address and IPv6 subnet", a1 !in t2 ); + + # type inference + local x = 10.0.0.0/16; + local y = [a::]/32; +} + diff --git a/testing/btest/language/table.bro b/testing/btest/language/table.bro new file mode 100644 index 0000000000..d7fc677a6d --- /dev/null +++ b/testing/btest/language/table.bro @@ -0,0 +1,131 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local t1: table[count] of string = table( [5] = "test", [0] = "example" ); + local t2: table[count] of string = table(); + local t3: table[count] of string; + local t4 = table( [1] = "type inference" ); + local t5: table[count] of string = { [1] = "curly", [3] = "braces" }; + local t6: table[port, string, bool] of string = table( + [1/tcp, "test", T] = "test1", + [2/tcp, "example", F] = "test2" ); + local t7: table[port, string, bool] of string = table(); + local t8: table[port, string, bool] of string; + local t9 = table( [8/tcp, "type inference", T] = "this" ); + local t10: table[port, string, bool] of string = { + [10/udp, "curly", F] = "first", + [11/udp, "braces", T] = "second" }; + + # Test the size of each table + test_case( "cardinality", |t1| == 2 ); + test_case( "cardinality", |t2| == 0 ); + test_case( "cardinality", |t3| == 0 ); + test_case( "cardinality", |t4| == 1 ); + test_case( "cardinality", |t5| == 2 ); + test_case( "cardinality", |t6| == 2 ); + test_case( "cardinality", |t7| == 0 ); + test_case( "cardinality", |t8| == 0 ); + test_case( "cardinality", |t9| == 1 ); + test_case( "cardinality", |t10| == 2 ); + + # Test iterating over each table + local ct: count; + ct = 0; + for ( c in t1 ) + { + if ( type_name(c) != "count" ) + print "Error: wrong index type"; + if ( type_name(t1[c]) != "string" ) + print "Error: wrong table type"; + ++ct; + } + test_case( "iterate over table", ct == 2 ); + + ct = 0; + for ( c in t2 ) + { + ++ct; + } + test_case( "iterate over table", ct == 0 ); + + ct = 0; + for ( c in t3 ) + { + ++ct; + } + test_case( "iterate over table", ct == 0 ); + + ct = 0; + for ( [c1, c2, c3] in t6 ) + { + ++ct; + } + test_case( "iterate over table", ct == 2 ); + + ct = 0; + for ( [c1, c2, c3] in t7 ) + { + ++ct; + } + test_case( "iterate over table", ct == 0 ); + + # Test adding elements to each table + t1[1] = "added"; + test_case( "add element", |t1| == 3 ); + test_case( "in operator", 1 in t1 ); + + t2[11] = "another"; + test_case( "add element", |t2| == 1 ); + t2[0] = "test"; + test_case( "add element", |t2| == 2 ); + test_case( "in operator", 11 in t2 ); + test_case( "in operator", 0 in t2 ); + + t3[3] = "foo"; + test_case( "add element", |t3| == 1 ); + test_case( "in operator", 3 in t3 ); + + t4[4] = "local"; + test_case( "add element", |t4| == 2 ); + test_case( "in operator", 4 in t4 ); + + t5[10] = "local2"; + test_case( "add element", |t5| == 3 ); + test_case( "in operator", 10 in t5 ); + + # Note: cannot add elements to tables of multiple types + + # Test removing elements from each table + delete t1[0]; + delete t1[17]; # element does not exist + test_case( "remove element", |t1| == 2 ); + test_case( "!in operator", 0 !in t1 ); + + delete t2[0]; + test_case( "remove element", |t2| == 1 ); + test_case( "!in operator", 0 !in t2 ); + + delete t3[3]; + test_case( "remove element", |t3| == 0 ); + test_case( "!in operator", 3 !in t3 ); + + delete t4[1]; + test_case( "remove element", |t4| == 1 ); + test_case( "!in operator", 1 !in t4 ); + + delete t5[1]; + test_case( "remove element", |t5| == 2 ); + test_case( "!in operator", 1 !in t5 ); + + # Note: cannot remove elements from tables of multiple types + +} + diff --git a/testing/btest/language/time.bro b/testing/btest/language/time.bro new file mode 100644 index 0000000000..588cbf8887 --- /dev/null +++ b/testing/btest/language/time.bro @@ -0,0 +1,28 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local t1: time = current_time(); + local t2: time = t1 + 3 sec; + local t3: time = t2 - 10 sec; + local t4: time = t1; + local t5: interval = t2 - t1; + + test_case( "add interval", t1 < t2 ); + test_case( "subtract interval", t1 > t3 ); + test_case( "inequality", t1 != t3 ); + test_case( "equality", t1 == t4 ); + test_case( "subtract time", t5 == 3sec); + test_case( "size operator", |t1| > 1.0); + + local x = current_time(); + test_case( "type inference", x > t1 ); +} + diff --git a/testing/btest/language/timeout.bro b/testing/btest/language/timeout.bro new file mode 100644 index 0000000000..6bc0419b2f --- /dev/null +++ b/testing/btest/language/timeout.bro @@ -0,0 +1,19 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + + +event bro_init() +{ + local h1: addr = 1.2.3.4; + + when ( local h1name = lookup_addr(h1) ) + { + print "lookup successful"; + } + timeout 3 secs + { + print "timeout"; + } + +} + diff --git a/testing/btest/language/vector.bro b/testing/btest/language/vector.bro new file mode 100644 index 0000000000..320736238e --- /dev/null +++ b/testing/btest/language/vector.bro @@ -0,0 +1,104 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +# Note: only global vectors can be initialized with curly braces +global v5: vector of string = { "curly", "braces" }; + +event bro_init() +{ + local v1: vector of string = vector( "test", "example" ); + local v2: vector of string = vector(); + local v3: vector of string; + local v4 = vector( "type inference" ); + + # Test the size of each vector + + test_case( "cardinality", |v1| == 2 ); + test_case( "cardinality", |v2| == 0 ); + test_case( "cardinality", |v3| == 0 ); + test_case( "cardinality", |v4| == 1 ); + test_case( "cardinality", |v5| == 2 ); + + # Test iterating over each vector + + local ct: count; + ct = 0; + for ( c in v1 ) + { + if ( type_name(c) != "int" ) + print "Error: wrong index type"; + if ( type_name(v1[c]) != "string" ) + print "Error: wrong vector type"; + ++ct; + } + test_case( "iterate over vector", ct == 2 ); + + ct = 0; + for ( c in v2 ) + { + ++ct; + } + test_case( "iterate over vector", ct == 0 ); + + ct = 0; + for ( c in v5 ) + { + ++ct; + } + test_case( "iterate over vector", ct == 2 ); + + # Test adding elements to each vector + + v1[2] = "added"; + test_case( "add element", |v1| == 3 ); + test_case( "access element", v1[2] == "added" ); + + v2[0] = "another"; + test_case( "add element", |v2| == 1 ); + v2[1] = "test"; + test_case( "add element", |v2| == 2 ); + test_case( "access element", v2[0] == "another" ); + test_case( "access element", v2[1] == "test" ); + + v3[0] = "foo"; + test_case( "add element", |v3| == 1 ); + test_case( "access element", v3[0] == "foo" ); + + v4[1] = "local"; + test_case( "add element", |v4| == 2 ); + test_case( "access element", v4[1] == "local" ); + + v5[2] = "global"; + test_case( "add element", |v5| == 3 ); + test_case( "access element", v5[2] == "global" ); + + # Test overwriting elements of each vector + + v1[0] = "new1"; + test_case( "overwrite element", |v1| == 3 ); + test_case( "access element", v1[0] == "new1" ); + + v2[1] = "new2"; + test_case( "overwrite element", |v2| == 2 ); + test_case( "access element", v2[0] == "another" ); + test_case( "access element", v2[1] == "new2" ); + + v3[0] = "new3"; + test_case( "overwrite element", |v3| == 1 ); + test_case( "access element", v3[0] == "new3" ); + + v4[0] = "new4"; + test_case( "overwrite element", |v4| == 2 ); + test_case( "access element", v4[0] == "new4" ); + + v5[1] = "new5"; + test_case( "overwrite element", |v5| == 3 ); + test_case( "access element", v5[1] == "new5" ); +} + diff --git a/testing/btest/language/when.bro b/testing/btest/language/when.bro new file mode 100644 index 0000000000..9ad45ab49b --- /dev/null +++ b/testing/btest/language/when.bro @@ -0,0 +1,15 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + + +event bro_init() +{ + local h1: addr = 1.2.3.4; + + when ( local h1name = lookup_addr(h1) ) + { + print "lookup successful"; + } + print "done"; +} + From 70f1403f1420b738d559c0675bd94703cd5af9aa Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 24 Aug 2012 13:18:51 -0700 Subject: [PATCH 598/651] Updating submodule(s). [nomail] --- CHANGES | 2 +- VERSION | 2 +- aux/broctl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 066ee784a8..87da7378b0 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ -2.1-beta-54 | 2012-08-23 11:58:50 -0700 +2.1 | 2012-08-24 13:18:51 -0700 * Update documentation for builtin types. (Daniel Thayer) diff --git a/VERSION b/VERSION index fd6e9996db..879b416e60 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1-beta-54 +2.1 diff --git a/aux/broctl b/aux/broctl index 5b3f9e5906..6b24757768 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 5b3f9e5906c90b76c5aa1626e112d4c991cb3fd8 +Subproject commit 6b24757768cd9aa742fd678d6864235519740ee8 From b5c694518904a5f122bc643c02f0518e11c3dade Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 24 Aug 2012 15:11:49 -0700 Subject: [PATCH 599/651] Updating submodule(s). [nomail] --- CHANGES | 2 +- aux/bro-aux | 2 +- aux/broctl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 87da7378b0..1c6e9dfafe 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ -2.1 | 2012-08-24 13:18:51 -0700 +2.1 | 2012-08-24 15:11:49 -0700 * Update documentation for builtin types. (Daniel Thayer) diff --git a/aux/bro-aux b/aux/bro-aux index 4bc1a6f6a8..6748ec3a96 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 4bc1a6f6a8816dfacd8288fcf182ba35520e589b +Subproject commit 6748ec3a96d582a977cd9114ef19c76fe75c57ff diff --git a/aux/broctl b/aux/broctl index 6b24757768..2fb9ff62bf 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 6b24757768cd9aa742fd678d6864235519740ee8 +Subproject commit 2fb9ff62bf08f78071753016863640022fbfe338 From 124c985d7af91a98eb8a7aff8f66b0300849e854 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 26 Aug 2012 14:49:37 -0700 Subject: [PATCH 600/651] Bug found bei Keith & Seth: input framework was not handling counts and ints out of 32-bit-range correctly. Note - another bugfix will be coming later (problem reading sets containing zero-length-strings & un-escaping-bug in sets) --- src/input/readers/Ascii.cc | 6 +-- .../out | 3 ++ .../base/frameworks/input/bignumber.bro | 44 +++++++++++++++++++ 3 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out create mode 100644 testing/btest/scripts/base/frameworks/input/bignumber.bro diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index fd936b07b6..28b1ed29c9 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -238,7 +238,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) break; case TYPE_INT: - val->val.int_val = atoi(s.c_str()); + val->val.int_val = strtoll(s.c_str(), (char**) NULL, 10); break; case TYPE_DOUBLE: @@ -249,7 +249,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) case TYPE_COUNT: case TYPE_COUNTER: - val->val.uint_val = atoi(s.c_str()); + val->val.uint_val = strtoull(s.c_str(),(char**) NULL, 10); break; case TYPE_PORT: @@ -344,7 +344,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) if ( pos != length ) { - Error("Internal error while parsing set: did not find all elements"); + Error(Fmt("Internal error while parsing set: did not find all elements: %s", s.c_str())); return 0; } diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out b/testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out new file mode 100644 index 0000000000..ab095ca36c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out @@ -0,0 +1,3 @@ +{ +[9223372036854775800] = [c=18446744073709551612] +} diff --git a/testing/btest/scripts/base/frameworks/input/bignumber.bro b/testing/btest/scripts/base/frameworks/input/bignumber.bro new file mode 100644 index 0000000000..519992be05 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/bignumber.bro @@ -0,0 +1,44 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#fields i c +#types int count +9223372036854775800 18446744073709551612 +@TEST-END-FILE + +@load frameworks/communication/listen + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + c: count; +}; + +global servers: table[int] of Val = table(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); + } + +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } From 977c1d7c5adbf1b3bb2be55a99c4bd018e78a524 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 26 Aug 2012 17:52:07 -0700 Subject: [PATCH 601/651] make set_separators different from , work for input framework. 1-line-patch + test. --- .../out | 10 ++++ .../base/frameworks/input/setseparator.bro | 46 +++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.setseparator/out create mode 100644 testing/btest/scripts/base/frameworks/input/setseparator.bro diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.setseparator/out b/testing/btest/Baseline/scripts.base.frameworks.input.setseparator/out new file mode 100644 index 0000000000..d0e0f53310 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.setseparator/out @@ -0,0 +1,10 @@ +{ +[1] = [s={ +b, +e, +d, +c, +f, +a +}, ss=[1, 2, 3, 4, 5, 6]] +} diff --git a/testing/btest/scripts/base/frameworks/input/setseparator.bro b/testing/btest/scripts/base/frameworks/input/setseparator.bro new file mode 100644 index 0000000000..44b9d08d54 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/setseparator.bro @@ -0,0 +1,46 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#fields i s ss +1 a|b|c|d|e|f 1|2|3|4|5|6 +@TEST-END-FILE + +redef InputAscii::set_separator = "|"; + +@load frameworks/communication/listen + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + s: set[string]; + ss:vector of count; +}; + +global servers: table[int] of Val = table(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); + } + +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } From 6bf733ce513a39804ba73b1e281adba5322f2de6 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 26 Aug 2012 17:53:34 -0700 Subject: [PATCH 602/651] sorry. the patch for the set_separator. --- src/input/readers/Ascii.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 28b1ed29c9..e0be235700 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -288,7 +288,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) // how many entries do we have... unsigned int length = 1; for ( unsigned int i = 0; i < s.size(); i++ ) - if ( s[i] == ',' ) length++; + if ( s[i] == set_separator[0] ) length++; unsigned int pos = 0; From a9e6d9ae8154eecb415f86ca9f786f21886fff94 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 26 Aug 2012 19:17:21 -0700 Subject: [PATCH 603/651] Fix two little bugs: Escaped ,'s in sets and vectors were unescaped before tokenization Handling of zero-length-strings as last element in a set was broken (sets ending with a ,). Hashing of lines just containing zero-length-strings was broken (now a \0 is appended to each string before it is hashed - giving us a hash of something for a line just consisting of \0s. This also allows to differentiate between vectors with varying numbers of zero-length-strings). --- src/input/Manager.cc | 6 ++- src/input/readers/Ascii.cc | 18 ++++++- .../out | 20 ++++++++ .../base/frameworks/input/setspecialcases.bro | 49 +++++++++++++++++++ 4 files changed, 90 insertions(+), 3 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out create mode 100644 testing/btest/scripts/base/frameworks/input/setspecialcases.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 3c29f14928..07ce5b20fc 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1718,7 +1718,7 @@ int Manager::GetValueLength(const Value* val) { case TYPE_STRING: case TYPE_ENUM: { - length += val->val.string_val.length; + length += val->val.string_val.length+1; break; } @@ -1818,7 +1818,9 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) case TYPE_ENUM: { memcpy(data+startpos, val->val.string_val.data, val->val.string_val.length); - return val->val.string_val.length; + // and add a \0 to the end. To be able to hash zero-length strings and differentiate from !present + memset(data+startpos+val->val.string_val.length, 0, 1); + return val->val.string_val.length+1; } case TYPE_ADDR: diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index e0be235700..4bf82c6a13 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -220,6 +220,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) switch ( field.type ) { case TYPE_ENUM: case TYPE_STRING: + s = get_unescaped_string(s); val->val.string_val.length = s.size(); val->val.string_val.data = copy_string(s.c_str()); break; @@ -259,6 +260,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) case TYPE_SUBNET: { + s = get_unescaped_string(s); size_t pos = s.find("/"); if ( pos == s.npos ) { @@ -275,6 +277,7 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) } case TYPE_ADDR: + s = get_unescaped_string(s); val->val.addr_val = StringToAddr(s); break; @@ -342,6 +345,20 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) pos++; } + // test if the string ends with a set_separator... if it does we have to push an zero-lenght + // val on top of it. + if ( *s.rbegin() == set_separator[0] ) + { + lvals[pos] = EntryToVal("", field.subType()); + if ( lvals[pos] == 0 ) + { + Error("Error while trying to add empty set element"); + return 0; + } + + pos++; + } + if ( pos != length ) { Error(Fmt("Internal error while parsing set: did not find all elements: %s", s.c_str())); @@ -438,7 +455,6 @@ bool Ascii::DoUpdate() if ( ! getline(splitstream, s, separator[0]) ) break; - s = get_unescaped_string(s); stringfields[pos] = s; pos++; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out b/testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out new file mode 100644 index 0000000000..28d1e025bf --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out @@ -0,0 +1,20 @@ +{ +[2] = [s={ +, +testing +}, s=[testing, , testing]], +[4] = [s={ +, +testing +}, s=[testing, ]], +[1] = [s={ +testing,testing,testing, +}, s=[testing,testing,testing,]], +[5] = [s={ + +}, s=[, , , ]], +[3] = [s={ +, +testing +}, s=[, testing]] +} diff --git a/testing/btest/scripts/base/frameworks/input/setspecialcases.bro b/testing/btest/scripts/base/frameworks/input/setspecialcases.bro new file mode 100644 index 0000000000..29819a795f --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/setspecialcases.bro @@ -0,0 +1,49 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#fields i s ss +1 testing\x2ctesting\x2ctesting\x2c testing\x2ctesting\x2ctesting\x2c +2 testing,,testing testing,,testing +3 ,testing ,testing +4 testing, testing, +5 ,,, ,,, +@TEST-END-FILE + + +@load frameworks/communication/listen + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + s: set[string]; + s: vector of string; +}; + +global servers: table[int] of Val = table(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); + } + +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + close(outfile); + terminate(); + } From fbe464ffa348c59b980584ad321e206d9a794ac2 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 26 Aug 2012 20:26:08 -0700 Subject: [PATCH 604/651] another small bug found while searching for something else... ...one of the change events got the wrong parameters. This actually is a bit embarassing... --- src/input/Manager.cc | 2 +- .../scripts.base.frameworks.input.reread/out | 240 ++++++++++++++++-- 2 files changed, 223 insertions(+), 19 deletions(-) diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 07ce5b20fc..44d7140485 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1210,7 +1210,7 @@ void Manager::EndCurrentSend(ReaderFrontend* reader) Ref(predidx); Ref(val); Ref(ev); - SendEvent(stream->event, 3, ev, predidx, val); + SendEvent(stream->event, 4, stream->description->Ref(), ev, predidx, val); } if ( predidx ) // if we have a stream or an event... diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index 8b55ced2ac..acc9bfe846 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -1174,10 +1174,45 @@ BB }, vc=[10, 20, 30], ve=[]] ============EVENT============ Description -Input::EVENT_REMOVED +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] Type +Input::EVENT_REMOVED +Left [i=-43] -Left +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1190,13 +1225,47 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -Right ============EVENT============ Description -Input::EVENT_REMOVED +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] Type +Input::EVENT_REMOVED +Left [i=-46] -Left +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1209,13 +1278,47 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -Right ============EVENT============ Description -Input::EVENT_REMOVED +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] Type +Input::EVENT_REMOVED +Left [i=-44] -Left +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1228,13 +1331,47 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -Right ============EVENT============ Description -Input::EVENT_REMOVED +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] Type +Input::EVENT_REMOVED +Left [i=-47] -Left +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1247,13 +1384,47 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -Right ============EVENT============ Description -Input::EVENT_REMOVED +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] Type +Input::EVENT_REMOVED +Left [i=-45] -Left +Right [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1266,13 +1437,47 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -Right ============EVENT============ Description -Input::EVENT_REMOVED +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] Type -[i=-42] +Input::EVENT_REMOVED Left +[i=-42] +Right [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1285,7 +1490,6 @@ BB }, se={ }, vc=[10, 20, 30], ve=[]] -Right ==========SERVERS============ { [-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ From 7e46936728f08b1214a6610e194793eb145a1f37 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 26 Aug 2012 20:49:21 -0700 Subject: [PATCH 605/651] Ok, this one is not really necessary for 2.1 and more of a nice-to-have Before this patch, empty values were not hashed at all. Which had the unfortunate side-effect that e.g. the lines TEST - and - TEST have the same hash values. On re-reads that means that the change will be ignored. This is probably pretty academic, but this patch changes it and adds a testcase. Output of the reread test changes due to re-ordering of the output (probably due to the fact that the internal hash values are changed and thus transferred in a different order) --- src/input/Manager.cc | 17 +- .../out | 155 +++++++++++ .../scripts.base.frameworks.input.reread/out | 248 +++++++++--------- .../frameworks/input/empty-values-hashing.bro | 89 +++++++ 4 files changed, 382 insertions(+), 127 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.empty-values-hashing/out create mode 100644 testing/btest/scripts/base/frameworks/input/empty-values-hashing.bro diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 44d7140485..e230c0e489 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -1911,11 +1911,16 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) const Value* val = vals[i]; if ( val->present ) length += GetValueLength(val); - } - if ( length == 0 ) + // and in any case add 1 for the end-of-field-identifier + length++; + } + + + assert ( length >= num_elements ); + + if ( length == num_elements ) { - reporter->Error("Input reader sent line where all elements are null values. Ignoring line"); return NULL; } @@ -1929,6 +1934,12 @@ HashKey* Manager::HashValues(const int num_elements, const Value* const *vals) const Value* val = vals[i]; if ( val->present ) position += CopyValue(data, position, val); + + memset(data+position, 1, 1); // add end-of-field-marker. does not really matter which value it is, + // it just has to be... something + + position++; + } HashKey *key = new HashKey(data, length); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.empty-values-hashing/out b/testing/btest/Baseline/scripts.base.frameworks.input.empty-values-hashing/out new file mode 100644 index 0000000000..474ef45cc2 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.empty-values-hashing/out @@ -0,0 +1,155 @@ +============PREDICATE============ +Input::EVENT_NEW +[i=1] +[s=, ss=TEST] +============PREDICATE============ +Input::EVENT_NEW +[i=2] +[s=, ss=] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[2] = [s=, ss=], +[1] = [s=, ss=TEST] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] +Type +Input::EVENT_NEW +Left +[i=1] +Right +[s=, ss=TEST] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[2] = [s=, ss=], +[1] = [s=, ss=TEST] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] +Type +Input::EVENT_NEW +Left +[i=2] +Right +[s=, ss=] +==========SERVERS============ +{ +[2] = [s=, ss=], +[1] = [s=, ss=TEST] +} +============PREDICATE============ +Input::EVENT_CHANGED +[i=1] +[s=TEST, ss=] +============PREDICATE============ +Input::EVENT_CHANGED +[i=2] +[s=TEST, ss=TEST] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[2] = [s=TEST, ss=TEST], +[1] = [s=TEST, ss=] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] +Type +Input::EVENT_CHANGED +Left +[i=1] +Right +[s=, ss=TEST] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[2] = [s=TEST, ss=TEST], +[1] = [s=TEST, ss=] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] +Type +Input::EVENT_CHANGED +Left +[i=2] +Right +[s=, ss=] +==========SERVERS============ +{ +[2] = [s=TEST, ss=TEST], +[1] = [s=TEST, ss=] +} +done diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out index acc9bfe846..538a6dec18 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.reread/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.reread/out @@ -1084,7 +1084,7 @@ BB } ============PREDICATE============ Input::EVENT_REMOVED -[i=-43] +[i=-44] [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1096,6 +1096,21 @@ AA, BB }, se={ +}, vc=[10, 20, 30], ve=[]] +============PREDICATE============ +Input::EVENT_REMOVED +[i=-42] +[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + }, vc=[10, 20, 30], ve=[]] ============PREDICATE============ Input::EVENT_REMOVED @@ -1111,21 +1126,6 @@ AA, BB }, se={ -}, vc=[10, 20, 30], ve=[]] -============PREDICATE============ -Input::EVENT_REMOVED -[i=-44] -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - }, vc=[10, 20, 30], ve=[]] ============PREDICATE============ Input::EVENT_REMOVED @@ -1159,7 +1159,113 @@ BB }, vc=[10, 20, 30], ve=[]] ============PREDICATE============ Input::EVENT_REMOVED +[i=-43] +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] +Type +Input::EVENT_REMOVED +Left +[i=-44] +Right +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +============EVENT============ +Description +[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ +[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +2, +4, +1, +3 +}, ss={ +CC, +AA, +BB +}, se={ + +}, vc=[10, 20, 30], ve=[]] +}, idx=, val=, want_record=T, ev=line +{ +print A::outfile, ============EVENT============; +print A::outfile, Description; +print A::outfile, A::description; +print A::outfile, Type; +print A::outfile, A::tpe; +print A::outfile, Left; +print A::outfile, A::left; +print A::outfile, Right; +print A::outfile, A::right; +}, pred=anonymous-function +{ +print A::outfile, ============PREDICATE============; +print A::outfile, A::typ; +print A::outfile, A::left; +print A::outfile, A::right; +return (T); +}, config={ + +}] +Type +Input::EVENT_REMOVED +Left [i=-42] +Right [b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, @@ -1207,59 +1313,6 @@ print A::outfile, A::right; return (T); }, config={ -}] -Type -Input::EVENT_REMOVED -Left -[i=-43] -Right -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ -[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}, config={ - }] Type Input::EVENT_REMOVED @@ -1313,59 +1366,6 @@ print A::outfile, A::right; return (T); }, config={ -}] -Type -Input::EVENT_REMOVED -Left -[i=-44] -Right -[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -============EVENT============ -Description -[source=../input.log, reader=Input::READER_ASCII, mode=Input::REREAD, name=ssh, destination={ -[-48] = [b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ -2, -4, -1, -3 -}, ss={ -CC, -AA, -BB -}, se={ - -}, vc=[10, 20, 30], ve=[]] -}, idx=, val=, want_record=T, ev=line -{ -print A::outfile, ============EVENT============; -print A::outfile, Description; -print A::outfile, A::description; -print A::outfile, Type; -print A::outfile, A::tpe; -print A::outfile, Left; -print A::outfile, A::left; -print A::outfile, Right; -print A::outfile, A::right; -}, pred=anonymous-function -{ -print A::outfile, ============PREDICATE============; -print A::outfile, A::typ; -print A::outfile, A::left; -print A::outfile, A::right; -return (T); -}, config={ - }] Type Input::EVENT_REMOVED @@ -1476,9 +1476,9 @@ return (T); Type Input::EVENT_REMOVED Left -[i=-42] +[i=-43] Right -[b=T, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ +[b=F, e=SSH::LOG, c=21, p=123/unknown, sn=10.0.0.0/24, a=1.2.3.4, d=3.14, t=1315801931.273616, iv=100.0, s=hurz, sc={ 2, 4, 1, diff --git a/testing/btest/scripts/base/frameworks/input/empty-values-hashing.bro b/testing/btest/scripts/base/frameworks/input/empty-values-hashing.bro new file mode 100644 index 0000000000..b66febba82 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/empty-values-hashing.bro @@ -0,0 +1,89 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: sleep 2 +# @TEST-EXEC: cp input2.log input.log +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +#separator \x09 +#fields i s ss +#types int sting string +1 - TEST +2 - - +@TEST-END-FILE +@TEST-START-FILE input2.log +#separator \x09 +#fields i s ss +#types int sting string +1 TEST - +2 TEST TEST +@TEST-END-FILE + +@load frameworks/communication/listen + + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + s: string; + ss: string; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print outfile, "============EVENT============"; + print outfile, "Description"; + print outfile, description; + print outfile, "Type"; + print outfile, tpe; + print outfile, "Left"; + print outfile, left; + print outfile, "Right"; + print outfile, right; + } + +event bro_init() + { + outfile = open("../out"); + try = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print outfile, "============PREDICATE============"; + print outfile, typ; + print outfile, left; + print outfile, right; + return T; + } + ]); + } + + +event Input::update_finished(name: string, source: string) + { + print outfile, "==========SERVERS============"; + print outfile, servers; + + try = try + 1; + if ( try == 2 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + terminate(); + } + } From f133e8808a0f8b199f47141f497cb33ed6a6955f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Sun, 26 Aug 2012 22:00:37 -0700 Subject: [PATCH 606/651] ok, this one might really be a bit too big for 2.1 Give all kinds of errors when encountering invalid numbers (like out-of-range-warnings, etc). --- src/input/readers/Ascii.cc | 57 ++++++++++++++++--- src/input/readers/Ascii.h | 1 + .../out | 3 +- .../.stderrwithoutfirstline | 8 +++ .../out | 5 ++ .../base/frameworks/input/bignumber.bro | 1 + .../base/frameworks/input/invalidnumbers.bro | 55 ++++++++++++++++++ 7 files changed, 122 insertions(+), 8 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline create mode 100644 testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out create mode 100644 testing/btest/scripts/base/frameworks/input/invalidnumbers.bro diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 4bf82c6a13..1923532103 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -11,6 +11,7 @@ #include #include #include +#include using namespace input::reader; using threading::Value; @@ -209,6 +210,34 @@ bool Ascii::GetLine(string& str) return false; } +bool Ascii::CheckNumberError(const string & s, const char * end) + { + + if ( s.length() == 0 ) + { + Error("Got empty string for number field"); + return true; + } + + if ( end == s.c_str() ) { + Error(Fmt("String '%s' contained no parseable number", s.c_str())); + return true; + } + + if ( *end != '\0' ) + Error(Fmt("Number '%s' contained non-numeric trailing characters. Ignored trailing characters '%s'", s.c_str(), end)); + + if ( errno == EINVAL ) + { + Error(Fmt("String '%s' could not be converted to a number", s.c_str())); + return true; + } + else if ( errno == ERANGE ) + Error(Fmt("Number '%s' out of supported range. Number was truncated", s.c_str())); + + return false; + } + Value* Ascii::EntryToVal(string s, FieldMapping field) { @@ -216,6 +245,8 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) return new Value(field.type, false); Value* val = new Value(field.type, true); + char* end; + errno = 0; switch ( field.type ) { case TYPE_ENUM: @@ -239,22 +270,31 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) break; case TYPE_INT: - val->val.int_val = strtoll(s.c_str(), (char**) NULL, 10); + val->val.int_val = strtoll(s.c_str(), &end, 10); + if ( CheckNumberError(s, end) ) + return 0; break; case TYPE_DOUBLE: case TYPE_TIME: case TYPE_INTERVAL: - val->val.double_val = atof(s.c_str()); + val->val.double_val = strtod(s.c_str(), &end); + if ( CheckNumberError(s, end) ) + return 0; break; case TYPE_COUNT: case TYPE_COUNTER: - val->val.uint_val = strtoull(s.c_str(),(char**) NULL, 10); + val->val.uint_val = strtoull(s.c_str(), &end, 10); + if ( CheckNumberError(s, end) ) + return 0; break; - + case TYPE_PORT: - val->val.port_val.port = atoi(s.c_str()); + val->val.port_val.port = strtoull(s.c_str(), &end, 10); + if ( CheckNumberError(s, end) ) + return 0; + val->val.port_val.proto = TRANSPORT_UNKNOWN; break; @@ -268,8 +308,11 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) return 0; } - int width = atoi(s.substr(pos+1).c_str()); + uint8_t width = (uint8_t) strtol(s.substr(pos+1).c_str(), &end, 10); string addr = s.substr(0, pos); + + if ( CheckNumberError(s, end) ) + return 0; val->val.subnet_val.prefix = StringToAddr(addr); val->val.subnet_val.length = width; @@ -490,7 +533,7 @@ bool Ascii::DoUpdate() Value* val = EntryToVal(stringfields[(*fit).position], *fit); if ( val == 0 ) { - Error("Could not convert String value to Val"); + Error(Fmt("Could not convert line '%s' to Val. Aborting file read.", line.c_str())); return false; } diff --git a/src/input/readers/Ascii.h b/src/input/readers/Ascii.h index e1506cbe82..2228e491b0 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/Ascii.h @@ -48,6 +48,7 @@ private: bool ReadHeader(bool useCached); bool GetLine(string& str); threading::Value* EntryToVal(string s, FieldMapping type); + bool CheckNumberError(const string & s, const char * end); ifstream* file; time_t mtime; diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out b/testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out index ab095ca36c..8b95ed8b19 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.bignumber/out @@ -1,3 +1,4 @@ { -[9223372036854775800] = [c=18446744073709551612] +[9223372036854775800] = [c=18446744073709551612], +[-9223372036854775800] = [c=18446744073709551612] } diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline new file mode 100644 index 0000000000..bd32495a6f --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline @@ -0,0 +1,8 @@ +error: ../input.log/Input::READER_ASCII: Number '12129223372036854775800' out of supported range. Number was truncated +error: ../input.log/Input::READER_ASCII: Number '121218446744073709551612' out of supported range. Number was truncated +error: ../input.log/Input::READER_ASCII: Number '9223372036854775801TEXTHERE' contained non-numeric trailing characters. Ignored trailing characters 'TEXTHERE' +error: ../input.log/Input::READER_ASCII: Number '1Justtext' contained non-numeric trailing characters. Ignored trailing characters 'Justtext' +error: ../input2.log/Input::READER_ASCII: String 'Justtext' contained no parseable number +error: ../input2.log/Input::READER_ASCII: Could not convert line 'Justtext 1' to Val. Aborting file read. +received termination signal +>>> diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out new file mode 100644 index 0000000000..9be82c13a9 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out @@ -0,0 +1,5 @@ +{ +[9223372036854775807] = [c=18446744073709551615], +[9223372036854775800] = [c=4], +[9223372036854775801] = [c=1] +} diff --git a/testing/btest/scripts/base/frameworks/input/bignumber.bro b/testing/btest/scripts/base/frameworks/input/bignumber.bro index 519992be05..250f84bbb2 100644 --- a/testing/btest/scripts/base/frameworks/input/bignumber.bro +++ b/testing/btest/scripts/base/frameworks/input/bignumber.bro @@ -10,6 +10,7 @@ #fields i c #types int count 9223372036854775800 18446744073709551612 +-9223372036854775800 18446744073709551612 @TEST-END-FILE @load frameworks/communication/listen diff --git a/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro b/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro new file mode 100644 index 0000000000..7914b53d94 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro @@ -0,0 +1,55 @@ +# (uses listen.bro just to ensure input sources are more reliably fully-read). +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: sed 1d .stderr > .stderrwithoutfirstline +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderrwithoutfirstline + +@TEST-START-FILE input.log +#separator \x09 +#fields i c +#types int count +12129223372036854775800 121218446744073709551612 +9223372036854775801TEXTHERE 1Justtext +9223372036854775800 -18446744073709551612 +@TEST-END-FILE + +@TEST-START-FILE input2.log +#separator \x09 +#fields i c +#types int count +Justtext 1 +@TEST-END-FILE + + +@load frameworks/communication/listen + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + c: count; +}; + +global servers: table[int] of Val = table(); + +event bro_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::remove("ssh"); + } + +event Input::update_finished(name: string, source:string) + { + print outfile, servers; + Input::add_table([$source="../input2.log", $name="ssh2", $idx=Idx, $val=Val, $destination=servers]); + } From a4ca5b0d829fa61a706913848620d85f2b125dd6 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 27 Aug 2012 09:49:57 -0700 Subject: [PATCH 607/651] fix handline of sets only containing a zero-length string. Thank you Robin... --- src/input/readers/Ascii.cc | 7 ++++--- .../scripts.base.frameworks.input.setspecialcases/out | 3 +++ .../scripts/base/frameworks/input/setspecialcases.bro | 1 + 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 4bf82c6a13..f1664a555a 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -345,9 +345,10 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) pos++; } - // test if the string ends with a set_separator... if it does we have to push an zero-lenght - // val on top of it. - if ( *s.rbegin() == set_separator[0] ) + // test if the string ends with a set_separator... or if the complete string is + // empty. + // In either of these cases we have to push an empty val on top of it. + if ( s.empty() || *s.rbegin() == set_separator[0] ) { lvals[pos] = EntryToVal("", field.subType()); if ( lvals[pos] == 0 ) diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out b/testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out index 28d1e025bf..62229f7f37 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.setspecialcases/out @@ -7,6 +7,9 @@ testing , testing }, s=[testing, ]], +[6] = [s={ + +}, s=[]], [1] = [s={ testing,testing,testing, }, s=[testing,testing,testing,]], diff --git a/testing/btest/scripts/base/frameworks/input/setspecialcases.bro b/testing/btest/scripts/base/frameworks/input/setspecialcases.bro index 29819a795f..239bdfe7e7 100644 --- a/testing/btest/scripts/base/frameworks/input/setspecialcases.bro +++ b/testing/btest/scripts/base/frameworks/input/setspecialcases.bro @@ -13,6 +13,7 @@ 3 ,testing ,testing 4 testing, testing, 5 ,,, ,,, +6 @TEST-END-FILE From 5c486dae7e82ce308a6553a5dc53afb2fcae9ed8 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 27 Aug 2012 10:54:33 -0700 Subject: [PATCH 608/651] Ok, this one was a little bit sneaky. If I understand things correctly, calling other string functions on an stl string may alter the contents of the buffer to which earlier .c_str()-calls pointed. Kind of makes sense when thinking about it. Basically moving around a few lines should fix this. (And thank you again Robin) --- src/input/readers/Ascii.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 1923532103..276391ef84 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -213,6 +213,9 @@ bool Ascii::GetLine(string& str) bool Ascii::CheckNumberError(const string & s, const char * end) { + bool endnotnull = (*end != '\0'); // do this check first, before executing s.c_str() or similar. + // otherwise the value to which *end is pointing at the moment might be gone... + if ( s.length() == 0 ) { Error("Got empty string for number field"); @@ -224,7 +227,7 @@ bool Ascii::CheckNumberError(const string & s, const char * end) return true; } - if ( *end != '\0' ) + if ( endnotnull ) Error(Fmt("Number '%s' contained non-numeric trailing characters. Ignored trailing characters '%s'", s.c_str(), end)); if ( errno == EINVAL ) @@ -309,11 +312,12 @@ Value* Ascii::EntryToVal(string s, FieldMapping field) } uint8_t width = (uint8_t) strtol(s.substr(pos+1).c_str(), &end, 10); - string addr = s.substr(0, pos); - + if ( CheckNumberError(s, end) ) return 0; + string addr = s.substr(0, pos); + val->val.subnet_val.prefix = StringToAddr(addr); val->val.subnet_val.length = width; break; From 56fa56ffa946581d7b4806b494821fe79f9974dc Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 27 Aug 2012 11:38:20 -0700 Subject: [PATCH 609/651] ...and another small change to error handling -> now errors in single lines do not kill processing, but simply ignore the line, log it, and continue. --- src/input/readers/Ascii.cc | 28 ++++++++++++++++--- .../.stderrwithoutfirstline | 12 ++++---- .../out | 1 - .../base/frameworks/input/invalidnumbers.bro | 11 ++------ 4 files changed, 32 insertions(+), 20 deletions(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 276391ef84..9c25953864 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -228,7 +228,7 @@ bool Ascii::CheckNumberError(const string & s, const char * end) } if ( endnotnull ) - Error(Fmt("Number '%s' contained non-numeric trailing characters. Ignored trailing characters '%s'", s.c_str(), end)); + Warning(Fmt("Number '%s' contained non-numeric trailing characters. Ignored trailing characters '%s'", s.c_str(), end)); if ( errno == EINVAL ) { @@ -236,7 +236,10 @@ bool Ascii::CheckNumberError(const string & s, const char * end) return true; } else if ( errno == ERANGE ) - Error(Fmt("Number '%s' out of supported range. Number was truncated", s.c_str())); + { + Error(Fmt("Number '%s' out of supported range.", s.c_str())); + return true; + } return false; } @@ -492,6 +495,7 @@ bool Ascii::DoUpdate() while ( GetLine(line ) ) { // split on tabs + bool error = false; istringstream splitstream(line); map stringfields; @@ -537,8 +541,9 @@ bool Ascii::DoUpdate() Value* val = EntryToVal(stringfields[(*fit).position], *fit); if ( val == 0 ) { - Error(Fmt("Could not convert line '%s' to Val. Aborting file read.", line.c_str())); - return false; + Error(Fmt("Could not convert line '%s' to Val. Ignoring line.", line.c_str())); + error = true; + break; } if ( (*fit).secondary_position != -1 ) @@ -555,6 +560,21 @@ bool Ascii::DoUpdate() fpos++; } + + if ( error ) + { + // encountered non-fatal error. ignoring line. + // first - delete all successfully read fields and the array structure. + + for ( int i = 0; i < fpos; i++ ) + delete fields[fpos]; + + delete[] fields; + continue; + } + + + //printf("fpos: %d, second.num_fields: %d\n", fpos, (*it).second.num_fields); assert ( fpos == NumFields() ); diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline index bd32495a6f..3ef51e40f2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline @@ -1,8 +1,8 @@ -error: ../input.log/Input::READER_ASCII: Number '12129223372036854775800' out of supported range. Number was truncated -error: ../input.log/Input::READER_ASCII: Number '121218446744073709551612' out of supported range. Number was truncated -error: ../input.log/Input::READER_ASCII: Number '9223372036854775801TEXTHERE' contained non-numeric trailing characters. Ignored trailing characters 'TEXTHERE' -error: ../input.log/Input::READER_ASCII: Number '1Justtext' contained non-numeric trailing characters. Ignored trailing characters 'Justtext' -error: ../input2.log/Input::READER_ASCII: String 'Justtext' contained no parseable number -error: ../input2.log/Input::READER_ASCII: Could not convert line 'Justtext 1' to Val. Aborting file read. +error: ../input.log/Input::READER_ASCII: Number '12129223372036854775800' out of supported range. +error: ../input.log/Input::READER_ASCII: Could not convert line '12129223372036854775800 121218446744073709551612' to Val. Ignoring line. +warning: ../input.log/Input::READER_ASCII: Number '9223372036854775801TEXTHERE' contained non-numeric trailing characters. Ignored trailing characters 'TEXTHERE' +warning: ../input.log/Input::READER_ASCII: Number '1Justtext' contained non-numeric trailing characters. Ignored trailing characters 'Justtext' +error: ../input.log/Input::READER_ASCII: String 'Justtext' contained no parseable number +error: ../input.log/Input::READER_ASCII: Could not convert line 'Justtext 1' to Val. Ignoring line. received termination signal >>> diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out index 9be82c13a9..56b2736006 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/out @@ -1,5 +1,4 @@ { -[9223372036854775807] = [c=18446744073709551615], [9223372036854775800] = [c=4], [9223372036854775801] = [c=1] } diff --git a/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro b/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro index 7914b53d94..3c755f1d08 100644 --- a/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro +++ b/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro @@ -13,17 +13,10 @@ #types int count 12129223372036854775800 121218446744073709551612 9223372036854775801TEXTHERE 1Justtext +Justtext 1 9223372036854775800 -18446744073709551612 @TEST-END-FILE -@TEST-START-FILE input2.log -#separator \x09 -#fields i c -#types int count -Justtext 1 -@TEST-END-FILE - - @load frameworks/communication/listen global outfile: file; @@ -51,5 +44,5 @@ event bro_init() event Input::update_finished(name: string, source:string) { print outfile, servers; - Input::add_table([$source="../input2.log", $name="ssh2", $idx=Idx, $val=Val, $destination=servers]); + terminate(); } From 26f5aee7f6376d65031517efa78a1a6e7cbf1b46 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 28 Aug 2012 00:44:39 -0700 Subject: [PATCH 610/651] on 32-bit machines only unsigned long longs are 64-bits long. Not just unsigned longs... Note that this means that up to now all outputs (including logs) of counts > 32 bits were broken on 32-bit systems. --- src/modp_numtoa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modp_numtoa.c b/src/modp_numtoa.c index 6deb8a70ed..6fa49b460f 100644 --- a/src/modp_numtoa.c +++ b/src/modp_numtoa.c @@ -56,7 +56,7 @@ void modp_uitoa10(uint32_t value, char* str) void modp_litoa10(int64_t value, char* str) { char* wstr=str; - unsigned long uvalue = (value < 0) ? -value : value; + unsigned long long uvalue = (value < 0) ? -value : value; // Conversion. Number is reversed. do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10); From 03f5795095642f89e11265ed36fda17f97a97ea9 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 28 Aug 2012 07:33:05 -0700 Subject: [PATCH 611/651] parse 64-bit consts correctly. --- src/scan.l | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/scan.l b/src/scan.l index 645ce659cd..3148ba58ad 100644 --- a/src/scan.l +++ b/src/scan.l @@ -439,7 +439,7 @@ F RET_CONST(new Val(false, TYPE_BOOL)) {D} { // TODO: check if we can use strtoull instead of atol, // and similarly for {HEX}. - RET_CONST(new Val(static_cast(atol(yytext)), + RET_CONST(new Val(static_cast(strtoll(yytext, (char**) NULL, 10)), TYPE_COUNT)) } {FLOAT} RET_CONST(new Val(atof(yytext), TYPE_DOUBLE)) @@ -483,7 +483,7 @@ F RET_CONST(new Val(false, TYPE_BOOL)) ({D}"."){3}{D} RET_CONST(new AddrVal(yytext)) -"0x"{HEX}+ RET_CONST(new Val(static_cast(strtol(yytext, 0, 16)), TYPE_COUNT)) +"0x"{HEX}+ RET_CONST(new Val(static_cast(strtoull(yytext, 0, 16)), TYPE_COUNT)) {H}("."{H})+ RET_CONST(dns_mgr->LookupHost(yytext)) From b815b7ca5c133960102409d32bb492080112dde0 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 28 Aug 2012 10:57:21 -0500 Subject: [PATCH 612/651] Fix uninitialized value for 'is_partial' in TCP analyzer. This led to non-deterministic behavior in cases where the first packet analyzed wasn't from the originator side (see the conditionals in TCP_Analyzer::CheckFlagCombos()). The 'short' test in private test suite showed this behavior most often. --- src/TCP.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/TCP.cc b/src/TCP.cc index 57e4449bf8..555adf1b57 100644 --- a/src/TCP.cc +++ b/src/TCP.cc @@ -46,6 +46,7 @@ TCP_Analyzer::TCP_Analyzer(Connection* conn) finished = 0; reassembling = 0; first_packet_seen = 0; + is_partial = 0; orig = new TCP_Endpoint(this, 1); resp = new TCP_Endpoint(this, 0); From 393ded1efe378a3f2109ccf49623e5050c12e048 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 28 Aug 2012 09:19:33 -0700 Subject: [PATCH 613/651] Set VERSION to 2.1-rc3 so that we don't get confused. --- CHANGES | 2 +- VERSION | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 02d7d74046..7df00f352c 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ -2.1 | 2012-08-24 15:11:49 -0700 +2.1-rc3 | 2012-08-24 15:11:49 -0700 * Input framework fixes, including: (Bernhard Amann) diff --git a/VERSION b/VERSION index 879b416e60..1537f322a9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1 +2.1-rc3 From cc49193f93ba8c60b65b61047a0874982ad93db3 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 28 Aug 2012 13:11:12 -0500 Subject: [PATCH 614/651] Remove automatic use of gperftools on non-Linux systems. --enable-perftools must now explicity be supplied to ./configure on non-Linux systems to link against the tcmalloc library that a gperftools installation provides. Linux systems still automatically link it if it's found. The rationale is that gperftools was developed and most throroughly tested on Linux so it's safer there. There especially seems to be potential problems with gperftools on OS X (e.g. see http://code.google.com/p/gperftools/issues/detail?id=413), and Bro currently doesn't work with gpertools there using clang or gcc. --- CMakeLists.txt | 29 ++++++++++++++++++----------- configure | 7 +++++++ 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f667c0cfe0..2c8a726a1a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -88,24 +88,30 @@ if (LIBGEOIP_FOUND) list(APPEND OPTLIBS ${LibGeoIP_LIBRARY}) endif () -set(USE_PERFTOOLS false) +set(HAVE_PERFTOOLS false) set(USE_PERFTOOLS_DEBUG false) +set(USE_PERFTOOLS_TCMALLOC false) if (NOT DISABLE_PERFTOOLS) find_package(GooglePerftools) endif () if (GOOGLEPERFTOOLS_FOUND) - include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR}) - set(USE_PERFTOOLS true) + set(HAVE_PERFTOOLS true) + # Non-Linux systems may not be well-supported by gperftools, so + # require explicit request from user to enable it in that case. + if (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ENABLE_PERFTOOLS) + set(USE_PERFTOOLS_TCMALLOC true) - if (ENABLE_PERFTOOLS_DEBUG) - # Enable heap debugging with perftools. - set(USE_PERFTOOLS_DEBUG true) - list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES_DEBUG}) - else () - # Link in tcmalloc for better performance. - list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES}) + if (ENABLE_PERFTOOLS_DEBUG) + # Enable heap debugging with perftools. + set(USE_PERFTOOLS_DEBUG true) + include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR}) + list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES_DEBUG}) + else () + # Link in tcmalloc for better performance. + list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES}) + endif () endif () endif () @@ -224,7 +230,8 @@ message( "\nAux. Tools: ${INSTALL_AUX_TOOLS}" "\n" "\nGeoIP: ${USE_GEOIP}" - "\nGoogle perftools: ${USE_PERFTOOLS}" + "\ngperftools found: ${HAVE_PERFTOOLS}" + "\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}" "\n debugging: ${USE_PERFTOOLS_DEBUG}" "\ncURL: ${USE_CURL}" "\n" diff --git a/configure b/configure index b4ca606103..8e4aaa8425 100755 --- a/configure +++ b/configure @@ -29,6 +29,8 @@ Usage: $0 [OPTION]... [VAR=VALUE]... Optional Features: --enable-debug compile in debugging mode --enable-mobile-ipv6 analyze mobile IPv6 features defined by RFC 6275 + --enable-perftools force use of Google perftools on non-Linux systems + (automatically on when perftools is present on Linux) --enable-perftools-debug use Google's perftools for debugging --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl @@ -98,6 +100,7 @@ append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry ENABLE_DEBUG BOOL false +append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true @@ -146,7 +149,11 @@ while [ $# -ne 0 ]; do --enable-mobile-ipv6) append_cache_entry ENABLE_MOBILE_IPV6 BOOL true ;; + --enable-perftools) + append_cache_entry ENABLE_PERFTOOLS BOOL true + ;; --enable-perftools-debug) + append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true ;; --disable-broccoli) From e4b7ffa8ac0718ace6d37371c8283efc50502c4f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 28 Aug 2012 16:44:30 -0700 Subject: [PATCH 615/651] Updating CHANGES and VERSION. --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 2097bb1d94..9459d4ba2a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.1-rc3-5 +1.1 From b915db86d5c7b30c7d50d8b5ddfbbbdadd32107d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 28 Aug 2012 16:46:37 -0700 Subject: [PATCH 616/651] Updating CHANGES and VERSION. --- CHANGES | 2 +- VERSION | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 232e2faa19..516c36974e 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ -1.1 | 2012-08-28 16:29:30 -0700 +2.1 | 2012-08-28 16:46:42 -0700 * Remove automatic use of gperftools on non-Linux systems. --enable-perftools must now explicity be supplied to ./configure diff --git a/VERSION b/VERSION index 9459d4ba2a..879b416e60 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.1 +2.1 From 22cf75dae553dc2aa2a103bf7721cd466b764d64 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 29 Aug 2012 08:09:44 -0700 Subject: [PATCH 617/651] Two fixes. - Typo in recent scanner fix. - Make bif.identify_magic robust against FreeBSD's libmagic config. --- CHANGES | 3 +++ src/scan.l | 4 +--- testing/btest/Baseline/bifs.identify_data/out | 2 +- testing/btest/bifs/identify_data.bro | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index 516c36974e..f8e4444f1d 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ 2.1 | 2012-08-28 16:46:42 -0700 + * Make bif.identify_magic robust against FreeBSD's libmagic config. + (Robin Sommer) + * Remove automatic use of gperftools on non-Linux systems. --enable-perftools must now explicity be supplied to ./configure on non-Linux systems to link against the tcmalloc library. diff --git a/src/scan.l b/src/scan.l index 3148ba58ad..1b3d09f879 100644 --- a/src/scan.l +++ b/src/scan.l @@ -437,9 +437,7 @@ F RET_CONST(new Val(false, TYPE_BOOL)) } {D} { - // TODO: check if we can use strtoull instead of atol, - // and similarly for {HEX}. - RET_CONST(new Val(static_cast(strtoll(yytext, (char**) NULL, 10)), + RET_CONST(new Val(static_cast(strtoul(yytext, (char**) NULL, 10)), TYPE_COUNT)) } {FLOAT} RET_CONST(new Val(atof(yytext), TYPE_DOUBLE)) diff --git a/testing/btest/Baseline/bifs.identify_data/out b/testing/btest/Baseline/bifs.identify_data/out index a2872877f9..1cadefbf6e 100644 --- a/testing/btest/Baseline/bifs.identify_data/out +++ b/testing/btest/Baseline/bifs.identify_data/out @@ -1,4 +1,4 @@ ASCII text, with no line terminators text/plain; charset=us-ascii -PNG image data +PNG image image/png; charset=binary diff --git a/testing/btest/bifs/identify_data.bro b/testing/btest/bifs/identify_data.bro index 11824b5e85..39f289d40b 100644 --- a/testing/btest/bifs/identify_data.bro +++ b/testing/btest/bifs/identify_data.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: bro %INPUT | sed 's/PNG image data/PNG image/g' >out # @TEST-EXEC: btest-diff out event bro_init() From 621a90d24821f5dafd4939e6b67248d0c1e98a8c Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 29 Aug 2012 17:14:03 -0500 Subject: [PATCH 618/651] Add more language tests Added tests for the conditional operator, operator precedence, modules ("module" and "export" keywords, and the "::" operator), and for the "copy" keyword. Also improved tests of max/min values of int, count, and double constants. --- .../language.conditional-expression/out | 7 ++ testing/btest/Baseline/language.copy/out | 2 + testing/btest/Baseline/language.count/out | 1 + testing/btest/Baseline/language.double/out | 2 +- testing/btest/Baseline/language.int/out | 2 + testing/btest/Baseline/language.module/out | 4 + .../btest/Baseline/language.precedence/out | 31 +++++ .../btest/language/conditional-expression.bro | 66 +++++++++++ testing/btest/language/copy.bro | 30 +++++ testing/btest/language/count.bro | 9 +- testing/btest/language/double.bro | 6 +- testing/btest/language/int.bro | 10 +- testing/btest/language/module.bro | 41 +++++++ testing/btest/language/precedence.bro | 110 ++++++++++++++++++ 14 files changed, 312 insertions(+), 9 deletions(-) create mode 100644 testing/btest/Baseline/language.conditional-expression/out create mode 100644 testing/btest/Baseline/language.copy/out create mode 100644 testing/btest/Baseline/language.module/out create mode 100644 testing/btest/Baseline/language.precedence/out create mode 100644 testing/btest/language/conditional-expression.bro create mode 100644 testing/btest/language/copy.bro create mode 100644 testing/btest/language/module.bro create mode 100644 testing/btest/language/precedence.bro diff --git a/testing/btest/Baseline/language.conditional-expression/out b/testing/btest/Baseline/language.conditional-expression/out new file mode 100644 index 0000000000..0dcbdbd7c7 --- /dev/null +++ b/testing/btest/Baseline/language.conditional-expression/out @@ -0,0 +1,7 @@ +true condition (PASS) +false condition (PASS) +true condition (PASS) +false condition (PASS) +associativity (PASS) +associativity (PASS) +associativity (PASS) diff --git a/testing/btest/Baseline/language.copy/out b/testing/btest/Baseline/language.copy/out new file mode 100644 index 0000000000..675d38aa5d --- /dev/null +++ b/testing/btest/Baseline/language.copy/out @@ -0,0 +1,2 @@ +direct assignment (PASS) +using copy (PASS) diff --git a/testing/btest/Baseline/language.count/out b/testing/btest/Baseline/language.count/out index 7dba9ea24c..a5de991e22 100644 --- a/testing/btest/Baseline/language.count/out +++ b/testing/btest/Baseline/language.count/out @@ -14,3 +14,4 @@ division operator (PASS) assignment operator (PASS) assignment operator (PASS) max count value = 4294967295 (PASS) +max count value = 9223372036854775807 (PASS) diff --git a/testing/btest/Baseline/language.double/out b/testing/btest/Baseline/language.double/out index 01e3047743..9711e70d9b 100644 --- a/testing/btest/Baseline/language.double/out +++ b/testing/btest/Baseline/language.double/out @@ -22,4 +22,4 @@ relational operator (PASS) relational operator (PASS) relational operator (PASS) division operator (PASS) -max double value = 1.7e+308 (PASS) +max double value = 1.7976931348623157e+308 (PASS) diff --git a/testing/btest/Baseline/language.int/out b/testing/btest/Baseline/language.int/out index a50887999a..223d520e25 100644 --- a/testing/btest/Baseline/language.int/out +++ b/testing/btest/Baseline/language.int/out @@ -18,4 +18,6 @@ assignment operator (PASS) assignment operator (PASS) max int value = 4294967295 (PASS) min int value = -4294967295 (PASS) +max int value = 9223372036854775807 (PASS) +min int value = -9223372036854775807 (PASS) type inference (PASS) diff --git a/testing/btest/Baseline/language.module/out b/testing/btest/Baseline/language.module/out new file mode 100644 index 0000000000..5b011543b5 --- /dev/null +++ b/testing/btest/Baseline/language.module/out @@ -0,0 +1,4 @@ +function (PASS) +global variable (PASS) +const (PASS) +event (PASS) diff --git a/testing/btest/Baseline/language.precedence/out b/testing/btest/Baseline/language.precedence/out new file mode 100644 index 0000000000..263ca83529 --- /dev/null +++ b/testing/btest/Baseline/language.precedence/out @@ -0,0 +1,31 @@ +++ and * (PASS) +++ and * (PASS) +* and ++ (PASS) +* and % (PASS) +* and % (PASS) +* and % (PASS) +% and * (PASS) +% and * (PASS) +% and * (PASS) ++ and * (PASS) ++ and * (PASS) ++ and * (PASS) +< and + (PASS) +< and + (PASS) ++ and < (PASS) ++ and < (PASS) ++= and + (PASS) ++= and + (PASS) ++= and + (PASS) +&& and || (PASS) +&& and || (PASS) +&& and || (PASS) +|| and && (PASS) +|| and && (PASS) +|| and && (PASS) +|| and conditional operator (PASS) +|| and conditional operator (PASS) +|| and conditional operator (PASS) +conditional operator and || (PASS) +conditional operator and || (PASS) +conditional operator and || (PASS) diff --git a/testing/btest/language/conditional-expression.bro b/testing/btest/language/conditional-expression.bro new file mode 100644 index 0000000000..74648b6ce8 --- /dev/null +++ b/testing/btest/language/conditional-expression.bro @@ -0,0 +1,66 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global ct: count; + +function f1(): bool + { + ct += 1; + return T; + } + +function f2(): bool + { + ct += 4; + return F; + } + + +event bro_init() +{ + local a: count; + local b: count; + local res: count; + local res2: bool; + + # Test that the correct operand is evaluated + + a = b = 0; + res = T ? ++a : ++b; + test_case( "true condition", a == 1 && b == 0 && res == 1); + + a = b = 0; + res = F ? ++a : ++b; + test_case( "false condition", a == 0 && b == 1 && res == 1); + + # Test again using function calls as operands + + ct = 0; + res2 = ct == 0 ? f1() : f2(); + test_case( "true condition", ct == 1 && res2 == T); + + ct = 0; + res2 = ct != 0 ? f1() : f2(); + test_case( "false condition", ct == 4 && res2 == F); + + # Test that the conditional operator is right-associative + + ct = 0; + T ? f1() : T ? f1() : f2(); + test_case( "associativity", ct == 1 ); + + ct = 0; + T ? f1() : (T ? f1() : f2()); + test_case( "associativity", ct == 1 ); + + ct = 0; + (T ? f1() : T) ? f1() : f2(); + test_case( "associativity", ct == 2 ); + +} + diff --git a/testing/btest/language/copy.bro b/testing/btest/language/copy.bro new file mode 100644 index 0000000000..6740a080c7 --- /dev/null +++ b/testing/btest/language/copy.bro @@ -0,0 +1,30 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + + +event bro_init() +{ + # "b" is not a copy of "a" + local a: set[string] = set("this", "test"); + local b: set[string] = a; + + delete a["this"]; + + test_case( "direct assignment", |b| == 1 && "this" !in b ); + + # "d" is a copy of "c" + local c: set[string] = set("this", "test"); + local d: set[string] = copy(c); + + delete c["this"]; + + test_case( "using copy", |d| == 2 && "this" in d); + +} + diff --git a/testing/btest/language/count.bro b/testing/btest/language/count.bro index f2c248eae9..97fb13ce51 100644 --- a/testing/btest/language/count.bro +++ b/testing/btest/language/count.bro @@ -11,10 +11,11 @@ event bro_init() { local c1: count = 0; local c2: count = 5; - local c3: count = 0xff; + local c3: count = 0xFF; local c4: count = 255; local c5: count = 4294967295; # maximum allowed value - local c6: counter = 5; + local c6: count = 0x7fffffffffffffff; # maximum allowed value + local c7: counter = 5; test_case( "inequality operator", c1 != c2 ); test_case( "relational operator", c1 < c2 ); @@ -22,7 +23,7 @@ event bro_init() test_case( "relational operator", c2 > c1 ); test_case( "relational operator", c2 >= c1 ); test_case( "hexadecimal", c3 == c4 ); - test_case( "counter alias", c2 == c6 ); + test_case( "counter alias", c2 == c7 ); test_case( "absolute value", |c1| == 0 ); test_case( "absolute value", |c2| == 5 ); test_case( "pre-increment operator", ++c2 == 6 ); @@ -35,6 +36,8 @@ event bro_init() test_case( "assignment operator", c2 == 6 ); local str1 = fmt("max count value = %d", c5); test_case( str1, str1 == "max count value = 4294967295" ); + local str2 = fmt("max count value = %d", c6); + test_case( str2, str2 == "max count value = 9223372036854775807" ); # type inference local x = 1; diff --git a/testing/btest/language/double.bro b/testing/btest/language/double.bro index bee7e41a94..f56d291631 100644 --- a/testing/btest/language/double.bro +++ b/testing/btest/language/double.bro @@ -27,7 +27,7 @@ event bro_init() local d16: double = .03E2; local d17: double = 3.0001; local d18: double = -3.0001; - local d19: double = 1.7e308; # almost maximum allowed value + local d19: double = 1.7976931348623157e308; # maximum allowed value test_case( "double representations", d1 == d2 ); test_case( "double representations", d1 == d3 ); @@ -55,8 +55,8 @@ event bro_init() test_case( "relational operator", d17 >= d3 ); test_case( "relational operator", d17 > d3 ); test_case( "division operator", d3/2 == 1.5 ); - local str1 = fmt("max double value = %.1e", d19); - test_case( str1, str1 == "max double value = 1.7e+308" ); + local str1 = fmt("max double value = %.16e", d19); + test_case( str1, str1 == "max double value = 1.7976931348623157e+308" ); # type inference local x = 7.0; diff --git a/testing/btest/language/int.bro b/testing/btest/language/int.bro index 0c11b94235..7cc91dd9d8 100644 --- a/testing/btest/language/int.bro +++ b/testing/btest/language/int.bro @@ -19,8 +19,10 @@ event bro_init() local i8: int = 0xC; local i9: int = -0xC; local i10: int = -12; - local i11: int = 4294967295; - local i12: int = -4294967295; + local i11: int = 4294967295; # max. allowed value + local i12: int = -4294967295; # min. allowed value + local i13: int = 0x7fffffffffffffff; # max. allowed value + local i14: int = -0x7fffffffffffffff; # min. allowed value test_case( "optional '+' sign", i1 == i2 ); test_case( "negative vs. positive", i1 != i3 ); @@ -46,6 +48,10 @@ event bro_init() test_case( str1, str1 == "max int value = 4294967295" ); local str2 = fmt("min int value = %d", i12); test_case( str2, str2 == "min int value = -4294967295" ); + local str3 = fmt("max int value = %d", i13); + test_case( str3, str3 == "max int value = 9223372036854775807" ); + local str4 = fmt("min int value = %d", i14); + test_case( str4, str4 == "min int value = -9223372036854775807" ); # type inference local x = +3; diff --git a/testing/btest/language/module.bro b/testing/btest/language/module.bro new file mode 100644 index 0000000000..4c70546406 --- /dev/null +++ b/testing/btest/language/module.bro @@ -0,0 +1,41 @@ +# @TEST-EXEC: bro %INPUT secondtestfile >out +# @TEST-EXEC: btest-diff out + +# In this source file, we define a module and export some objects + +module thisisatest; + +export { + global test_case: function(msg: string, expect: bool); + + global testevent: event(msg: string); + + global num: count = 123; + + const daysperyear: count = 365; +} + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event testevent(msg: string) + { + test_case( "event", T ); + } + + +# @TEST-START-FILE secondtestfile + +# In this source file, we try to access each exported object from the module + +event bro_init() +{ + thisisatest::test_case( "function", T ); + thisisatest::test_case( "global variable", thisisatest::num == 123 ); + thisisatest::test_case( "const", thisisatest::daysperyear == 365 ); + event thisisatest::testevent( "foo" ); +} + +# @TEST-END-FILE diff --git a/testing/btest/language/precedence.bro b/testing/btest/language/precedence.bro new file mode 100644 index 0000000000..da8fef311c --- /dev/null +++ b/testing/btest/language/precedence.bro @@ -0,0 +1,110 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +# This is an incomplete set of tests to demonstrate the order of precedence +# of bro script operators + +event bro_init() +{ + local n1: int; + local n2: int; + local n3: int; + + # Tests that show "++" has higher precedence than "*" + + n1 = n2 = 5; + n1 = ++n1 * 3; + n2 = (++n2) * 3; + test_case( "++ and *", n1 == 18 ); + test_case( "++ and *", n2 == 18 ); + + n1 = 5; + n1 = 3 * ++n1; + test_case( "* and ++", n1 == 18 ); + + # Tests that show "*" has same precedence as "%" + + n1 = 3 * 5 % 2; + n2 = (3 * 5) % 2; + n3 = 3 * (5 % 2); + test_case( "* and %", n1 == 1 ); + test_case( "* and %", n2 == 1 ); + test_case( "* and %", n3 == 3 ); + + n1 = 7 % 3 * 2; + n2 = (7 % 3) * 2; + n3 = 7 % (3 * 2); + test_case( "% and *", n1 == 2 ); + test_case( "% and *", n2 == 2 ); + test_case( "% and *", n3 == 1 ); + + # Tests that show "*" has higher precedence than "+" + + n1 = 1 + 2 * 3; + n2 = 1 + (2 * 3); + n3 = (1 + 2) * 3; + test_case( "+ and *", n1 == 7 ); + test_case( "+ and *", n2 == 7 ); + test_case( "+ and *", n3 == 9 ); + + # Tests that show "+" has higher precedence than "<" + + test_case( "< and +", 5 < 3 + 7 ); + test_case( "< and +", 5 < (3 + 7) ); + + test_case( "+ and <", 7 + 3 > 5 ); + test_case( "+ and <", (7 + 3) > 5 ); + + # Tests that show "+" has higher precedence than "+=" + + n1 = n2 = n3 = 0; + n1 += 1 + 2; + n2 += (1 + 2); + (n3 += 1) + 2; + test_case( "+= and +", n1 == 3 ); + test_case( "+= and +", n2 == 3 ); + test_case( "+= and +", n3 == 1 ); + + local r1: bool; + local r2: bool; + local r3: bool; + + # Tests that show "&&" has higher precedence than "||" + + r1 = F && F || T; + r2 = (F && F) || T; + r3 = F && (F || T); + test_case( "&& and ||", r1 ); + test_case( "&& and ||", r2 ); + test_case( "&& and ||", !r3 ); + + r1 = T || F && F; + r2 = T || (F && F); + r3 = (T || F) && F; + test_case( "|| and &&", r1 ); + test_case( "|| and &&", r2 ); + test_case( "|| and &&", !r3 ); + + # Tests that show "||" has higher precedence than conditional operator + + r1 = T || T ? F : F; + r2 = (T || T) ? F : F; + r3 = T || (T ? F : F); + test_case( "|| and conditional operator", !r1 ); + test_case( "|| and conditional operator", !r2 ); + test_case( "|| and conditional operator", r3 ); + + r1 = T ? F : F || T; + r2 = T ? F : (F || T); + r3 = (T ? F : F) || T; + test_case( "conditional operator and ||", !r1 ); + test_case( "conditional operator and ||", !r2 ); + test_case( "conditional operator and ||", r3 ); + +} + From 44c6ed5e8cb216028377c071902956b68ba48f9e Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 29 Aug 2012 17:53:37 -0500 Subject: [PATCH 619/651] Update language tests Updated the int and count max/min constant value tests based on latest fixes in master. --- testing/btest/Baseline/language.count/out | 4 ++-- testing/btest/Baseline/language.int/out | 6 +++--- testing/btest/language/count.bro | 8 ++++---- testing/btest/language/int.bro | 12 ++++++------ 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/testing/btest/Baseline/language.count/out b/testing/btest/Baseline/language.count/out index a5de991e22..cab1ff90df 100644 --- a/testing/btest/Baseline/language.count/out +++ b/testing/btest/Baseline/language.count/out @@ -13,5 +13,5 @@ modulus operator (PASS) division operator (PASS) assignment operator (PASS) assignment operator (PASS) -max count value = 4294967295 (PASS) -max count value = 9223372036854775807 (PASS) +max count value = 18446744073709551615 (PASS) +max count value = 18446744073709551615 (PASS) diff --git a/testing/btest/Baseline/language.int/out b/testing/btest/Baseline/language.int/out index 223d520e25..6defb35b20 100644 --- a/testing/btest/Baseline/language.int/out +++ b/testing/btest/Baseline/language.int/out @@ -16,8 +16,8 @@ modulus operator (PASS) division operator (PASS) assignment operator (PASS) assignment operator (PASS) -max int value = 4294967295 (PASS) -min int value = -4294967295 (PASS) max int value = 9223372036854775807 (PASS) -min int value = -9223372036854775807 (PASS) +min int value = -9223372036854775808 (PASS) +max int value = 9223372036854775807 (PASS) +min int value = -9223372036854775808 (PASS) type inference (PASS) diff --git a/testing/btest/language/count.bro b/testing/btest/language/count.bro index 97fb13ce51..e58fb47b54 100644 --- a/testing/btest/language/count.bro +++ b/testing/btest/language/count.bro @@ -13,8 +13,8 @@ event bro_init() local c2: count = 5; local c3: count = 0xFF; local c4: count = 255; - local c5: count = 4294967295; # maximum allowed value - local c6: count = 0x7fffffffffffffff; # maximum allowed value + local c5: count = 18446744073709551615; # maximum allowed value + local c6: count = 0xffffffffffffffff; # maximum allowed value local c7: counter = 5; test_case( "inequality operator", c1 != c2 ); @@ -35,9 +35,9 @@ event bro_init() c2 -= 2; test_case( "assignment operator", c2 == 6 ); local str1 = fmt("max count value = %d", c5); - test_case( str1, str1 == "max count value = 4294967295" ); + test_case( str1, str1 == "max count value = 18446744073709551615" ); local str2 = fmt("max count value = %d", c6); - test_case( str2, str2 == "max count value = 9223372036854775807" ); + test_case( str2, str2 == "max count value = 18446744073709551615" ); # type inference local x = 1; diff --git a/testing/btest/language/int.bro b/testing/btest/language/int.bro index 7cc91dd9d8..03dd52b404 100644 --- a/testing/btest/language/int.bro +++ b/testing/btest/language/int.bro @@ -19,10 +19,10 @@ event bro_init() local i8: int = 0xC; local i9: int = -0xC; local i10: int = -12; - local i11: int = 4294967295; # max. allowed value - local i12: int = -4294967295; # min. allowed value + local i11: int = 9223372036854775807; # max. allowed value + local i12: int = -9223372036854775808; # min. allowed value local i13: int = 0x7fffffffffffffff; # max. allowed value - local i14: int = -0x7fffffffffffffff; # min. allowed value + local i14: int = -0x8000000000000000; # min. allowed value test_case( "optional '+' sign", i1 == i2 ); test_case( "negative vs. positive", i1 != i3 ); @@ -45,13 +45,13 @@ event bro_init() i2 -= 2; test_case( "assignment operator", i2 == 5 ); local str1 = fmt("max int value = %d", i11); - test_case( str1, str1 == "max int value = 4294967295" ); + test_case( str1, str1 == "max int value = 9223372036854775807" ); local str2 = fmt("min int value = %d", i12); - test_case( str2, str2 == "min int value = -4294967295" ); + test_case( str2, str2 == "min int value = -9223372036854775808" ); local str3 = fmt("max int value = %d", i13); test_case( str3, str3 == "max int value = 9223372036854775807" ); local str4 = fmt("min int value = %d", i14); - test_case( str4, str4 == "min int value = -9223372036854775807" ); + test_case( str4, str4 == "min int value = -9223372036854775808" ); # type inference local x = +3; From 05ad3f95afd1e27e8899c582ecc17d722080ad45 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 31 Aug 2012 14:05:02 -0500 Subject: [PATCH 620/651] Add more language tests Added more tests and fixed a broken test. --- testing/btest/Baseline/language.at-if/out | 3 ++ testing/btest/Baseline/language.at-ifdef/out | 3 ++ testing/btest/Baseline/language.at-ifndef/out | 3 ++ testing/btest/Baseline/language.at-load/out | 4 ++ testing/btest/Baseline/language.no-module/out | 4 ++ testing/btest/Baseline/language.set/out | 8 +++ testing/btest/Baseline/language.table/out | 4 ++ testing/btest/Baseline/language.vector/out | 1 + testing/btest/Baseline/language.when/out | 1 - testing/btest/language/at-if.bro | 49 ++++++++++++++++++ testing/btest/language/at-ifdef.bro | 50 +++++++++++++++++++ testing/btest/language/at-ifndef.bro | 50 +++++++++++++++++++ testing/btest/language/at-load.bro | 43 ++++++++++++++++ testing/btest/language/no-module.bro | 34 +++++++++++++ testing/btest/language/set.bro | 15 ++++++ testing/btest/language/table.bro | 7 +++ testing/btest/language/vector.bro | 4 ++ testing/btest/language/when.bro | 2 +- 18 files changed, 283 insertions(+), 2 deletions(-) create mode 100644 testing/btest/Baseline/language.at-if/out create mode 100644 testing/btest/Baseline/language.at-ifdef/out create mode 100644 testing/btest/Baseline/language.at-ifndef/out create mode 100644 testing/btest/Baseline/language.at-load/out create mode 100644 testing/btest/Baseline/language.no-module/out create mode 100644 testing/btest/language/at-if.bro create mode 100644 testing/btest/language/at-ifdef.bro create mode 100644 testing/btest/language/at-ifndef.bro create mode 100644 testing/btest/language/at-load.bro create mode 100644 testing/btest/language/no-module.bro diff --git a/testing/btest/Baseline/language.at-if/out b/testing/btest/Baseline/language.at-if/out new file mode 100644 index 0000000000..b63cbbb714 --- /dev/null +++ b/testing/btest/Baseline/language.at-if/out @@ -0,0 +1,3 @@ +@if (PASS) +@if...@else (PASS) +@if...@else (PASS) diff --git a/testing/btest/Baseline/language.at-ifdef/out b/testing/btest/Baseline/language.at-ifdef/out new file mode 100644 index 0000000000..644a42d407 --- /dev/null +++ b/testing/btest/Baseline/language.at-ifdef/out @@ -0,0 +1,3 @@ +@ifdef (PASS) +@ifdef...@else (PASS) +@ifdef...@else (PASS) diff --git a/testing/btest/Baseline/language.at-ifndef/out b/testing/btest/Baseline/language.at-ifndef/out new file mode 100644 index 0000000000..70abba9b3f --- /dev/null +++ b/testing/btest/Baseline/language.at-ifndef/out @@ -0,0 +1,3 @@ +@ifndef (PASS) +@ifndef...@else (PASS) +@ifndef...@else (PASS) diff --git a/testing/btest/Baseline/language.at-load/out b/testing/btest/Baseline/language.at-load/out new file mode 100644 index 0000000000..5b011543b5 --- /dev/null +++ b/testing/btest/Baseline/language.at-load/out @@ -0,0 +1,4 @@ +function (PASS) +global variable (PASS) +const (PASS) +event (PASS) diff --git a/testing/btest/Baseline/language.no-module/out b/testing/btest/Baseline/language.no-module/out new file mode 100644 index 0000000000..5b011543b5 --- /dev/null +++ b/testing/btest/Baseline/language.no-module/out @@ -0,0 +1,4 @@ +function (PASS) +global variable (PASS) +const (PASS) +event (PASS) diff --git a/testing/btest/Baseline/language.set/out b/testing/btest/Baseline/language.set/out index b4801ac799..fc157cf7d9 100644 --- a/testing/btest/Baseline/language.set/out +++ b/testing/btest/Baseline/language.set/out @@ -1,3 +1,7 @@ +type inference (PASS) +type inference (PASS) +type inference (PASS) +cardinality (PASS) cardinality (PASS) cardinality (PASS) cardinality (PASS) @@ -24,6 +28,10 @@ add element (PASS) in operator (PASS) add element (PASS) in operator (PASS) +add element (PASS) +in operator (PASS) +remove element (PASS) +!in operator (PASS) remove element (PASS) !in operator (PASS) remove element (PASS) diff --git a/testing/btest/Baseline/language.table/out b/testing/btest/Baseline/language.table/out index 8a45707e2d..5d32cb29fd 100644 --- a/testing/btest/Baseline/language.table/out +++ b/testing/btest/Baseline/language.table/out @@ -1,3 +1,7 @@ +type inference (PASS) +type inference (PASS) +type inference (PASS) +cardinality (PASS) cardinality (PASS) cardinality (PASS) cardinality (PASS) diff --git a/testing/btest/Baseline/language.vector/out b/testing/btest/Baseline/language.vector/out index 4196b36141..4bf909725c 100644 --- a/testing/btest/Baseline/language.vector/out +++ b/testing/btest/Baseline/language.vector/out @@ -1,3 +1,4 @@ +type inference (PASS) cardinality (PASS) cardinality (PASS) cardinality (PASS) diff --git a/testing/btest/Baseline/language.when/out b/testing/btest/Baseline/language.when/out index 3a052217ab..19f86f493a 100644 --- a/testing/btest/Baseline/language.when/out +++ b/testing/btest/Baseline/language.when/out @@ -1,2 +1 @@ done -lookup successful diff --git a/testing/btest/language/at-if.bro b/testing/btest/language/at-if.bro new file mode 100644 index 0000000000..979ed0bb9a --- /dev/null +++ b/testing/btest/language/at-if.bro @@ -0,0 +1,49 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event bro_init() +{ + local xyz = 0; + + # Test "if" without "else" + + @if ( F ) + xyz += 1; + @endif + + @if ( T ) + xyz += 2; + @endif + + test_case( "@if", xyz == 2 ); + + # Test "if" with an "else" + + xyz = 0; + + @if ( F ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@if...@else", xyz == 2 ); + + xyz = 0; + + @if ( T ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@if...@else", xyz == 1 ); + +} + diff --git a/testing/btest/language/at-ifdef.bro b/testing/btest/language/at-ifdef.bro new file mode 100644 index 0000000000..c30236f204 --- /dev/null +++ b/testing/btest/language/at-ifdef.bro @@ -0,0 +1,50 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global thisisdefined = 123; + +event bro_init() +{ + local xyz = 0; + + # Test "ifdef" without "else" + + @ifdef ( notdefined ) + xyz += 1; + @endif + + @ifdef ( thisisdefined ) + xyz += 2; + @endif + + test_case( "@ifdef", xyz == 2 ); + + # Test "ifdef" with an "else" + + xyz = 0; + + @ifdef ( doesnotexist ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifdef...@else", xyz == 2 ); + + xyz = 0; + + @ifdef ( thisisdefined ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifdef...@else", xyz == 1 ); + +} + diff --git a/testing/btest/language/at-ifndef.bro b/testing/btest/language/at-ifndef.bro new file mode 100644 index 0000000000..c98287590f --- /dev/null +++ b/testing/btest/language/at-ifndef.bro @@ -0,0 +1,50 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global thisisdefined = 123; + +event bro_init() +{ + local xyz = 0; + + # Test "ifndef" without "else" + + @ifndef ( notdefined ) + xyz += 1; + @endif + + @ifndef ( thisisdefined ) + xyz += 2; + @endif + + test_case( "@ifndef", xyz == 1 ); + + # Test "ifndef" with an "else" + + xyz = 0; + + @ifndef ( doesnotexist ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifndef...@else", xyz == 1 ); + + xyz = 0; + + @ifndef ( thisisdefined ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifndef...@else", xyz == 2 ); + +} + diff --git a/testing/btest/language/at-load.bro b/testing/btest/language/at-load.bro new file mode 100644 index 0000000000..b51594be16 --- /dev/null +++ b/testing/btest/language/at-load.bro @@ -0,0 +1,43 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +# In this script, we try to access each object defined in a "@load"ed script + +@load secondtestfile + +event bro_init() +{ + test_case( "function", T ); + test_case( "global variable", num == 123 ); + test_case( "const", daysperyear == 365 ); + event testevent( "foo" ); +} + + +# @TEST-START-FILE secondtestfile + +# In this script, we define some objects to be used in another script + +# Note: this script is not listed on the bro command-line (instead, it +# is "@load"ed from the other script) + +global test_case: function(msg: string, expect: bool); + +global testevent: event(msg: string); + +global num: count = 123; + +const daysperyear: count = 365; + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event testevent(msg: string) + { + test_case( "event", T ); + } + +# @TEST-END-FILE + diff --git a/testing/btest/language/no-module.bro b/testing/btest/language/no-module.bro new file mode 100644 index 0000000000..eadce66c18 --- /dev/null +++ b/testing/btest/language/no-module.bro @@ -0,0 +1,34 @@ +# @TEST-EXEC: bro %INPUT secondtestfile >out +# @TEST-EXEC: btest-diff out + +# This is the same test as "module.bro", but here we omit the module definition + + +global num: count = 123; + +const daysperyear: count = 365; + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event testevent(msg: string) + { + test_case( "event", T ); + } + + +# @TEST-START-FILE secondtestfile + +# In this script, we try to access each object defined in the other script + +event bro_init() +{ + test_case( "function", T ); + test_case( "global variable", num == 123 ); + test_case( "const", daysperyear == 365 ); + event testevent( "foo" ); +} + +# @TEST-END-FILE diff --git a/testing/btest/language/set.bro b/testing/btest/language/set.bro index 66b2ebc3af..bfea2b729b 100644 --- a/testing/btest/language/set.bro +++ b/testing/btest/language/set.bro @@ -11,6 +11,7 @@ function test_case(msg: string, expect: bool) global s10: set[string] = { "curly", "braces" }; global s11: set[port, string, bool] = { [10/udp, "curly", F], [11/udp, "braces", T] }; +global s12 = { "more", "curly", "braces" }; event bro_init() { @@ -24,6 +25,11 @@ event bro_init() local s7: set[port, string, bool]; local s8 = set( [8/tcp, "type inference", T] ); + # Type inference test + test_case( "type inference", type_name(s4) == "set[string]" ); + test_case( "type inference", type_name(s8) == "set[port,string,bool]" ); + test_case( "type inference", type_name(s12) == "set[string]" ); + # Test the size of each set test_case( "cardinality", |s1| == 2 ); test_case( "cardinality", |s2| == 0 ); @@ -35,6 +41,7 @@ event bro_init() test_case( "cardinality", |s8| == 1 ); test_case( "cardinality", |s10| == 2 ); test_case( "cardinality", |s11| == 2 ); + test_case( "cardinality", |s12| == 3 ); # Test iterating over each set local ct: count; @@ -94,6 +101,10 @@ event bro_init() test_case( "add element", |s10| == 3 ); test_case( "in operator", "global" in s10 ); + add s12["more global"]; + test_case( "add element", |s12| == 4 ); + test_case( "in operator", "more global" in s12 ); + # Test removing elements from each set delete s1["test"]; delete s1["foobar"]; # element does not exist @@ -117,5 +128,9 @@ event bro_init() delete s10["braces"]; test_case( "remove element", |s10| == 2 ); test_case( "!in operator", "braces" !in s10 ); + + delete s12["curly"]; + test_case( "remove element", |s12| == 3 ); + test_case( "!in operator", "curly" !in s12 ); } diff --git a/testing/btest/language/table.bro b/testing/btest/language/table.bro index d7fc677a6d..83f9377d68 100644 --- a/testing/btest/language/table.bro +++ b/testing/btest/language/table.bro @@ -6,6 +6,7 @@ function test_case(msg: string, expect: bool) print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); } +global t11 = { [1] = "type", [2] = "inference", [3] = "test" }; event bro_init() { @@ -24,6 +25,11 @@ event bro_init() [10/udp, "curly", F] = "first", [11/udp, "braces", T] = "second" }; + # Type inference test + test_case( "type inference", type_name(t4) == "table[count] of string" ); + test_case( "type inference", type_name(t9) == "table[port,string,bool] of string" ); + test_case( "type inference", type_name(t11) == "table[count] of string" ); + # Test the size of each table test_case( "cardinality", |t1| == 2 ); test_case( "cardinality", |t2| == 0 ); @@ -35,6 +41,7 @@ event bro_init() test_case( "cardinality", |t8| == 0 ); test_case( "cardinality", |t9| == 1 ); test_case( "cardinality", |t10| == 2 ); + test_case( "cardinality", |t11| == 3 ); # Test iterating over each table local ct: count; diff --git a/testing/btest/language/vector.bro b/testing/btest/language/vector.bro index 320736238e..d09b474b08 100644 --- a/testing/btest/language/vector.bro +++ b/testing/btest/language/vector.bro @@ -17,6 +17,10 @@ event bro_init() local v3: vector of string; local v4 = vector( "type inference" ); + # Type inference test + + test_case( "type inference", type_name(v4) == "vector of string" ); + # Test the size of each vector test_case( "cardinality", |v1| == 2 ); diff --git a/testing/btest/language/when.bro b/testing/btest/language/when.bro index 9ad45ab49b..d6b08b67e1 100644 --- a/testing/btest/language/when.bro +++ b/testing/btest/language/when.bro @@ -4,7 +4,7 @@ event bro_init() { - local h1: addr = 1.2.3.4; + local h1: addr = 127.0.0.1; when ( local h1name = lookup_addr(h1) ) { From 76420e4b618899ba26e022fb3cb4d8ddd8612d06 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Sun, 2 Sep 2012 12:55:31 -0500 Subject: [PATCH 621/651] Add more language tests --- testing/btest/Baseline/language.vector/out | 26 ++++++++ testing/btest/language/string.bro | 6 +- testing/btest/language/vector.bro | 76 +++++++++++++++++++--- 3 files changed, 96 insertions(+), 12 deletions(-) diff --git a/testing/btest/Baseline/language.vector/out b/testing/btest/Baseline/language.vector/out index 4bf909725c..54d19346d3 100644 --- a/testing/btest/Baseline/language.vector/out +++ b/testing/btest/Baseline/language.vector/out @@ -1,9 +1,22 @@ type inference (PASS) +type inference (PASS) cardinality (PASS) cardinality (PASS) cardinality (PASS) cardinality (PASS) cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +cardinality (PASS) +zero-based indexing (PASS) iterate over vector (PASS) iterate over vector (PASS) iterate over vector (PASS) @@ -19,6 +32,8 @@ add element (PASS) access element (PASS) add element (PASS) access element (PASS) +add element (PASS) +access element (PASS) overwrite element (PASS) access element (PASS) overwrite element (PASS) @@ -30,3 +45,14 @@ overwrite element (PASS) access element (PASS) overwrite element (PASS) access element (PASS) +overwrite element (PASS) +access element (PASS) +++ operator (PASS) +-- operator (PASS) ++ operator (PASS) +- operator (PASS) +* operator (PASS) +/ operator (PASS) +% operator (PASS) +&& operator (PASS) +|| operator (PASS) diff --git a/testing/btest/language/string.bro b/testing/btest/language/string.bro index b9a17e3645..eb3757ed70 100644 --- a/testing/btest/language/string.bro +++ b/testing/btest/language/string.bro @@ -15,9 +15,9 @@ event bro_init() local s4: string = "a\tb"; # tab local s5: string = "a\nb"; # newline local s6: string = "a\xffb"; # hex value - local s7: string = "a\x00b"; # hex value - local s8: string = "a\x0ab"; # hex value - local s9: string = "a\011b"; # octal value + local s7: string = "a\x00b"; # hex value (null character) + local s8: string = "a\x0ab"; # hex value (newline character) + local s9: string = "a\011b"; # octal value (tab character) local s10: string = "a\"b"; # double quote local s11: string = "a\\b"; # backslash local s12: string = s2 + s3; # string concatenation diff --git a/testing/btest/language/vector.bro b/testing/btest/language/vector.bro index d09b474b08..2e3ecb8eee 100644 --- a/testing/btest/language/vector.bro +++ b/testing/btest/language/vector.bro @@ -8,7 +8,7 @@ function test_case(msg: string, expect: bool) # Note: only global vectors can be initialized with curly braces -global v5: vector of string = { "curly", "braces" }; +global v20: vector of string = { "curly", "braces" }; event bro_init() { @@ -16,10 +16,22 @@ event bro_init() local v2: vector of string = vector(); local v3: vector of string; local v4 = vector( "type inference" ); + local v5 = vector( 1, 2, 3 ); + local v6 = vector( 10, 20, 30 ); + local v7 = v5 + v6; + local v8 = v6 - v5; + local v9 = v5 * v6; + local v10 = v6 / v5; + local v11 = v6 % v5; + local v12 = vector( T, F, T ); + local v13 = vector( F, F, T ); + local v14 = v12 && v13; + local v15 = v12 || v13; # Type inference test test_case( "type inference", type_name(v4) == "vector of string" ); + test_case( "type inference", type_name(v5) == "vector of count" ); # Test the size of each vector @@ -27,7 +39,22 @@ event bro_init() test_case( "cardinality", |v2| == 0 ); test_case( "cardinality", |v3| == 0 ); test_case( "cardinality", |v4| == 1 ); - test_case( "cardinality", |v5| == 2 ); + test_case( "cardinality", |v5| == 3 ); + test_case( "cardinality", |v6| == 3 ); + test_case( "cardinality", |v7| == 3 ); + test_case( "cardinality", |v8| == 3 ); + test_case( "cardinality", |v9| == 3 ); + test_case( "cardinality", |v10| == 3 ); + test_case( "cardinality", |v11| == 3 ); + test_case( "cardinality", |v12| == 3 ); + test_case( "cardinality", |v13| == 3 ); + test_case( "cardinality", |v14| == 3 ); + test_case( "cardinality", |v15| == 3 ); + test_case( "cardinality", |v20| == 2 ); + + # Test that vectors use zero-based indexing + + test_case( "zero-based indexing", v1[0] == "test" && v5[0] == 1 ); # Test iterating over each vector @@ -51,7 +78,7 @@ event bro_init() test_case( "iterate over vector", ct == 0 ); ct = 0; - for ( c in v5 ) + for ( c in v20 ) { ++ct; } @@ -78,9 +105,13 @@ event bro_init() test_case( "add element", |v4| == 2 ); test_case( "access element", v4[1] == "local" ); - v5[2] = "global"; - test_case( "add element", |v5| == 3 ); - test_case( "access element", v5[2] == "global" ); + v5[3] = 77; + test_case( "add element", |v5| == 4 ); + test_case( "access element", v5[3] == 77 ); + + v20[2] = "global"; + test_case( "add element", |v20| == 3 ); + test_case( "access element", v20[2] == "global" ); # Test overwriting elements of each vector @@ -101,8 +132,35 @@ event bro_init() test_case( "overwrite element", |v4| == 2 ); test_case( "access element", v4[0] == "new4" ); - v5[1] = "new5"; - test_case( "overwrite element", |v5| == 3 ); - test_case( "access element", v5[1] == "new5" ); + v5[0] = 0; + test_case( "overwrite element", |v5| == 4 ); + test_case( "access element", v5[0] == 0 ); + + v20[1] = "new5"; + test_case( "overwrite element", |v20| == 3 ); + test_case( "access element", v20[1] == "new5" ); + + # Test increment/decrement operators + + ++v5; + test_case( "++ operator", |v5| == 4 && v5[0] == 1 && v5[1] == 3 + && v5[2] == 4 && v5[3] == 78 ); + --v5; + test_case( "-- operator", |v5| == 4 && v5[0] == 0 && v5[1] == 2 + && v5[2] == 3 && v5[3] == 77 ); + + # Test +,-,*,/,% of two vectors + + test_case( "+ operator", v7[0] == 11 && v7[1] == 22 && v7[2] == 33 ); + test_case( "- operator", v8[0] == 9 && v8[1] == 18 && v8[2] == 27 ); + test_case( "* operator", v9[0] == 10 && v9[1] == 40 && v9[2] == 90 ); + test_case( "/ operator", v10[0] == 10 && v10[1] == 10 && v10[2] == 10 ); + test_case( "% operator", v11[0] == 0 && v11[1] == 0 && v11[2] == 0 ); + + # Test &&,|| of two vectors + + test_case( "&& operator", v14[0] == F && v14[1] == F && v14[2] == T ); + test_case( "|| operator", v15[0] == T && v15[1] == F && v15[2] == T ); + } From d5bf5eb38c56860cbcb4232c26343d8182b7634f Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 4 Sep 2012 17:39:00 -0500 Subject: [PATCH 622/651] Add more language tests --- testing/btest/Baseline/language.addr/out | 2 + testing/btest/Baseline/language.any/out | 14 +++ testing/btest/Baseline/language.bool/out | 2 + testing/btest/Baseline/language.count/out | 5 +- testing/btest/Baseline/language.double/out | 3 + testing/btest/Baseline/language.int/out | 2 +- testing/btest/Baseline/language.interval/out | 4 +- testing/btest/Baseline/language.pattern/out | 2 + testing/btest/Baseline/language.port/out | 1 + testing/btest/Baseline/language.string/out | 33 ++++--- testing/btest/Baseline/language.subnet/out | 2 + testing/btest/Baseline/language.table/out | 1 + testing/btest/Baseline/language.time/out | 2 +- testing/btest/Baseline/language.vector/out | 1 + testing/btest/language/addr.bro | 7 +- testing/btest/language/any.bro | 40 ++++++++ testing/btest/language/bool.bro | 7 +- testing/btest/language/count.bro | 22 ++++- testing/btest/language/double.bro | 21 ++++- testing/btest/language/file.bro | 4 +- testing/btest/language/int.bro | 18 +++- testing/btest/language/interval.bro | 38 ++++++-- testing/btest/language/pattern.bro | 12 ++- testing/btest/language/port.bro | 27 +++--- testing/btest/language/set.bro | 58 ++++++------ testing/btest/language/string.bro | 99 +++++++++++--------- testing/btest/language/subnet.bro | 9 +- testing/btest/language/table.bro | 31 ++++-- testing/btest/language/time.bro | 15 ++- testing/btest/language/vector.bro | 21 +++-- 30 files changed, 341 insertions(+), 162 deletions(-) create mode 100644 testing/btest/Baseline/language.any/out create mode 100644 testing/btest/language/any.bro diff --git a/testing/btest/Baseline/language.addr/out b/testing/btest/Baseline/language.addr/out index 79a88d6dcb..b04aac5ce3 100644 --- a/testing/btest/Baseline/language.addr/out +++ b/testing/btest/Baseline/language.addr/out @@ -3,6 +3,7 @@ IPv4 address equality (PASS) IPv4 address comparison (PASS) IPv4 address comparison (PASS) size of IPv4 address (PASS) +IPv4 address type inference (PASS) IPv6 address inequality (PASS) IPv6 address equality (PASS) IPv6 address equality (PASS) @@ -10,4 +11,5 @@ IPv6 address comparison (PASS) IPv6 address comparison (PASS) IPv6 address not case-sensitive (PASS) size of IPv6 address (PASS) +IPv6 address type inference (PASS) IPv4 and IPv6 address inequality (PASS) diff --git a/testing/btest/Baseline/language.any/out b/testing/btest/Baseline/language.any/out new file mode 100644 index 0000000000..4072ce3745 --- /dev/null +++ b/testing/btest/Baseline/language.any/out @@ -0,0 +1,14 @@ +count (PASS) +string (PASS) +pattern (PASS) +bool (PASS) +string (PASS) +count (PASS) +int (PASS) +double (PASS) +pattern (PASS) +addr (PASS) +addr (PASS) +subnet (PASS) +subnet (PASS) +port (PASS) diff --git a/testing/btest/Baseline/language.bool/out b/testing/btest/Baseline/language.bool/out index 177c6795ef..9e4c6c3d6e 100644 --- a/testing/btest/Baseline/language.bool/out +++ b/testing/btest/Baseline/language.bool/out @@ -5,3 +5,5 @@ logical and operator (PASS) negation operator (PASS) absolute value (PASS) absolute value (PASS) +type inference (PASS) +type inference (PASS) diff --git a/testing/btest/Baseline/language.count/out b/testing/btest/Baseline/language.count/out index cab1ff90df..4ef65b6098 100644 --- a/testing/btest/Baseline/language.count/out +++ b/testing/btest/Baseline/language.count/out @@ -1,10 +1,11 @@ +type inference (PASS) +counter alias (PASS) +hexadecimal (PASS) inequality operator (PASS) relational operator (PASS) relational operator (PASS) relational operator (PASS) relational operator (PASS) -hexadecimal (PASS) -counter alias (PASS) absolute value (PASS) absolute value (PASS) pre-increment operator (PASS) diff --git a/testing/btest/Baseline/language.double/out b/testing/btest/Baseline/language.double/out index 9711e70d9b..3f70635588 100644 --- a/testing/btest/Baseline/language.double/out +++ b/testing/btest/Baseline/language.double/out @@ -1,3 +1,6 @@ +type inference (PASS) +type inference (PASS) +type inference (PASS) double representations (PASS) double representations (PASS) double representations (PASS) diff --git a/testing/btest/Baseline/language.int/out b/testing/btest/Baseline/language.int/out index 6defb35b20..01f018acbe 100644 --- a/testing/btest/Baseline/language.int/out +++ b/testing/btest/Baseline/language.int/out @@ -1,3 +1,4 @@ +type inference (PASS) optional '+' sign (PASS) negative vs. positive (PASS) negative vs. positive (PASS) @@ -20,4 +21,3 @@ max int value = 9223372036854775807 (PASS) min int value = -9223372036854775808 (PASS) max int value = 9223372036854775807 (PASS) min int value = -9223372036854775808 (PASS) -type inference (PASS) diff --git a/testing/btest/Baseline/language.interval/out b/testing/btest/Baseline/language.interval/out index 3eb135de52..425ae1c15c 100644 --- a/testing/btest/Baseline/language.interval/out +++ b/testing/btest/Baseline/language.interval/out @@ -1,6 +1,8 @@ +type inference (PASS) +type inference (PASS) optional space (PASS) -different units with same numeric value (PASS) plural/singular interval are same (PASS) +different units with same numeric value (PASS) compare different time units (PASS) compare different time units (PASS) compare different time units (PASS) diff --git a/testing/btest/Baseline/language.pattern/out b/testing/btest/Baseline/language.pattern/out index 5a31e4eacb..4a5b8de670 100644 --- a/testing/btest/Baseline/language.pattern/out +++ b/testing/btest/Baseline/language.pattern/out @@ -1,6 +1,8 @@ +type inference (PASS) equality operator (PASS) equality operator (order of operands) (PASS) inequality operator (PASS) +inequality operator (order of operands) (PASS) in operator (PASS) in operator (PASS) !in operator (PASS) diff --git a/testing/btest/Baseline/language.port/out b/testing/btest/Baseline/language.port/out index 9dd7ba03c2..b307388c35 100644 --- a/testing/btest/Baseline/language.port/out +++ b/testing/btest/Baseline/language.port/out @@ -1,3 +1,4 @@ +type inference (PASS) protocol ordering (PASS) protocol ordering (PASS) protocol ordering (PASS) diff --git a/testing/btest/Baseline/language.string/out b/testing/btest/Baseline/language.string/out index 623d1cd3ba..5595445ffc 100644 --- a/testing/btest/Baseline/language.string/out +++ b/testing/btest/Baseline/language.string/out @@ -1,24 +1,29 @@ +type inference (PASS) +tab escape sequence (PASS) +newline escape sequence (PASS) +double quote escape sequence (PASS) +backslash escape sequence (PASS) +1-digit hex escape sequence (PASS) +2-digit hex escape sequence (PASS) +2-digit hex escape sequence (PASS) +2-digit hex escape sequence (PASS) +3-digit octal escape sequence (PASS) +2-digit octal escape sequence (PASS) +1-digit octal escape sequence (PASS) +tab escape sequence (PASS) +tab escape sequence (PASS) +newline escape sequence (PASS) +newline escape sequence (PASS) +double quote escape sequence (PASS) +null escape sequence (PASS) empty string (PASS) nonempty string (PASS) string comparison (PASS) string comparison (PASS) string comparison (PASS) string comparison (PASS) -null escape sequence (PASS) -tab escape sequence (PASS) -newline escape sequence (PASS) -hex escape sequence (PASS) -hex escape sequence (PASS) -hex escape sequence (PASS) -octal escape sequence (PASS) -quote escape sequence (PASS) -backslash escape sequence (PASS) -null escape sequence (PASS) -newline escape sequence (PASS) -tab escape sequence (PASS) string concatenation (PASS) string concatenation (PASS) -long string initialization (PASS) +multi-line string initialization (PASS) in operator (PASS) !in operator (PASS) -type inference (PASS) diff --git a/testing/btest/Baseline/language.subnet/out b/testing/btest/Baseline/language.subnet/out index f753d65c68..45900a291e 100644 --- a/testing/btest/Baseline/language.subnet/out +++ b/testing/btest/Baseline/language.subnet/out @@ -2,9 +2,11 @@ IPv4 subnet equality (PASS) IPv4 subnet inequality (PASS) IPv4 subnet in operator (PASS) IPv4 subnet !in operator (PASS) +IPv4 subnet type inference (PASS) IPv6 subnet equality (PASS) IPv6 subnet inequality (PASS) IPv6 subnet in operator (PASS) IPv6 subnet !in operator (PASS) +IPv6 subnet type inference (PASS) IPv4 and IPv6 subnet inequality (PASS) IPv4 address and IPv6 subnet (PASS) diff --git a/testing/btest/Baseline/language.table/out b/testing/btest/Baseline/language.table/out index 5d32cb29fd..514cb6b02d 100644 --- a/testing/btest/Baseline/language.table/out +++ b/testing/btest/Baseline/language.table/out @@ -17,6 +17,7 @@ iterate over table (PASS) iterate over table (PASS) iterate over table (PASS) iterate over table (PASS) +overwrite element (PASS) add element (PASS) in operator (PASS) add element (PASS) diff --git a/testing/btest/Baseline/language.time/out b/testing/btest/Baseline/language.time/out index 3615a17c53..5e1c8e6b26 100644 --- a/testing/btest/Baseline/language.time/out +++ b/testing/btest/Baseline/language.time/out @@ -1,7 +1,7 @@ +type inference (PASS) add interval (PASS) subtract interval (PASS) inequality (PASS) equality (PASS) subtract time (PASS) size operator (PASS) -type inference (PASS) diff --git a/testing/btest/Baseline/language.vector/out b/testing/btest/Baseline/language.vector/out index 54d19346d3..0aa3ab0a8f 100644 --- a/testing/btest/Baseline/language.vector/out +++ b/testing/btest/Baseline/language.vector/out @@ -1,5 +1,6 @@ type inference (PASS) type inference (PASS) +type inference (PASS) cardinality (PASS) cardinality (PASS) cardinality (PASS) diff --git a/testing/btest/language/addr.bro b/testing/btest/language/addr.bro index b97710ce22..1cd93bad03 100644 --- a/testing/btest/language/addr.bro +++ b/testing/btest/language/addr.bro @@ -13,12 +13,14 @@ event bro_init() local a1: addr = 0.0.0.0; local a2: addr = 10.0.0.11; local a3: addr = 255.255.255.255; + local a4 = 192.1.2.3; test_case( "IPv4 address inequality", a1 != a2 ); test_case( "IPv4 address equality", a1 == 0.0.0.0 ); test_case( "IPv4 address comparison", a1 < a2 ); test_case( "IPv4 address comparison", a3 > a2 ); test_case( "size of IPv4 address", |a1| == 32 ); + test_case( "IPv4 address type inference", type_name(a4) == "addr" ); # IPv6 addresses local b1: addr = [::]; @@ -28,6 +30,7 @@ event bro_init() local b5: addr = [0000:0000:0000:0000:0000:0000:0000:0000]; local b6: addr = [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]; local b7: addr = [AAAA:BBBB:CCCC:DDDD:EEEE:FFFF:1111:2222]; + local b8 = [a::b]; test_case( "IPv6 address inequality", b1 != b2 ); test_case( "IPv6 address equality", b1 == b5 ); @@ -36,11 +39,9 @@ event bro_init() test_case( "IPv6 address comparison", b4 > b2 ); test_case( "IPv6 address not case-sensitive", b6 == b7 ); test_case( "size of IPv6 address", |b1| == 128 ); + test_case( "IPv6 address type inference", type_name(b8) == "addr" ); test_case( "IPv4 and IPv6 address inequality", a1 != b1 ); - # type inference - local x = 192.1.2.3; - local y = [a::b]; } diff --git a/testing/btest/language/any.bro b/testing/btest/language/any.bro new file mode 100644 index 0000000000..7437ee9851 --- /dev/null +++ b/testing/btest/language/any.bro @@ -0,0 +1,40 @@ +# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +function anyarg(arg1: any, arg1type: string) + { + test_case( arg1type, type_name(arg1) == arg1type ); + } + +event bro_init() +{ + local any1: any = 5; + local any2: any = "bar"; + local any3: any = /bar/; + + # Test using variable of type "any" + + anyarg( any1, "count" ); + anyarg( any2, "string" ); + anyarg( any3, "pattern" ); + + # Test of other types + + anyarg( T, "bool" ); + anyarg( "foo", "string" ); + anyarg( 15, "count" ); + anyarg( +15, "int" ); + anyarg( 15.0, "double" ); + anyarg( /foo/, "pattern" ); + anyarg( 127.0.0.1, "addr" ); + anyarg( [::1], "addr" ); + anyarg( 127.0.0.1/16, "subnet" ); + anyarg( [ffff::1]/64, "subnet" ); + anyarg( 123/tcp, "port" ); +} + diff --git a/testing/btest/language/bool.bro b/testing/btest/language/bool.bro index 09614b516e..b75343025f 100644 --- a/testing/btest/language/bool.bro +++ b/testing/btest/language/bool.bro @@ -12,6 +12,8 @@ event bro_init() local b1: bool = T; local b2: bool = F; local b3: bool = T; + local b4 = T; + local b5 = F; test_case( "equality operator", b1 == b3 ); test_case( "inequality operator", b1 != b2 ); @@ -20,9 +22,8 @@ event bro_init() test_case( "negation operator", !b2 ); test_case( "absolute value", |b1| == 1 ); test_case( "absolute value", |b2| == 0 ); + test_case( "type inference", type_name(b4) == "bool" ); + test_case( "type inference", type_name(b5) == "bool" ); - # type inference - local x = T; - local y = F; } diff --git a/testing/btest/language/count.bro b/testing/btest/language/count.bro index e58fb47b54..d6dcf5a97e 100644 --- a/testing/btest/language/count.bro +++ b/testing/btest/language/count.bro @@ -16,14 +16,27 @@ event bro_init() local c5: count = 18446744073709551615; # maximum allowed value local c6: count = 0xffffffffffffffff; # maximum allowed value local c7: counter = 5; + local c8 = 1; + + # Type inference test + + test_case( "type inference", type_name(c8) == "count" ); + + # Counter alias test + + test_case( "counter alias", c2 == c7 ); + + # Test various constant representations + + test_case( "hexadecimal", c3 == c4 ); + + # Operator tests test_case( "inequality operator", c1 != c2 ); test_case( "relational operator", c1 < c2 ); test_case( "relational operator", c1 <= c2 ); test_case( "relational operator", c2 > c1 ); test_case( "relational operator", c2 >= c1 ); - test_case( "hexadecimal", c3 == c4 ); - test_case( "counter alias", c2 == c7 ); test_case( "absolute value", |c1| == 0 ); test_case( "absolute value", |c2| == 5 ); test_case( "pre-increment operator", ++c2 == 6 ); @@ -34,12 +47,13 @@ event bro_init() test_case( "assignment operator", c2 == 8 ); c2 -= 2; test_case( "assignment operator", c2 == 6 ); + + # Max. value tests + local str1 = fmt("max count value = %d", c5); test_case( str1, str1 == "max count value = 18446744073709551615" ); local str2 = fmt("max count value = %d", c6); test_case( str2, str2 == "max count value = 18446744073709551615" ); - # type inference - local x = 1; } diff --git a/testing/btest/language/double.bro b/testing/btest/language/double.bro index f56d291631..62ca768e22 100644 --- a/testing/btest/language/double.bro +++ b/testing/btest/language/double.bro @@ -28,6 +28,17 @@ event bro_init() local d17: double = 3.0001; local d18: double = -3.0001; local d19: double = 1.7976931348623157e308; # maximum allowed value + local d20 = 7.0; + local d21 = 7e0; + local d22 = 7e+1; + + # Type inference tests + + test_case( "type inference", type_name(d20) == "double" ); + test_case( "type inference", type_name(d21) == "double" ); + test_case( "type inference", type_name(d22) == "double" ); + + # Test various constant representations test_case( "double representations", d1 == d2 ); test_case( "double representations", d1 == d3 ); @@ -44,6 +55,9 @@ event bro_init() test_case( "double representations", d1 == d14 ); test_case( "double representations", d1 == d15 ); test_case( "double representations", d1 == d16 ); + + # Operator tests + test_case( "inequality operator", d18 != d17 ); test_case( "absolute value", |d18| == d17 ); d4 += 2; @@ -55,12 +69,11 @@ event bro_init() test_case( "relational operator", d17 >= d3 ); test_case( "relational operator", d17 > d3 ); test_case( "division operator", d3/2 == 1.5 ); + + # Max. value test + local str1 = fmt("max double value = %.16e", d19); test_case( str1, str1 == "max double value = 1.7976931348623157e+308" ); - # type inference - local x = 7.0; - local y = 7e0; - local z = 7e+1; } diff --git a/testing/btest/language/file.bro b/testing/btest/language/file.bro index 77650a6082..1f631eb4fe 100644 --- a/testing/btest/language/file.bro +++ b/testing/btest/language/file.bro @@ -5,13 +5,13 @@ event bro_init() { - # Test using "print" statement to output directly to a file local f1: file = open( "out1" ); print f1, 20; print f1, 12; close(f1); - # Test again, but without explicitly using the type name in declaration + # Type inference test + local f2 = open( "out2" ); print f2, "test", 123, 456; close(f2); diff --git a/testing/btest/language/int.bro b/testing/btest/language/int.bro index 03dd52b404..5cfa1620bd 100644 --- a/testing/btest/language/int.bro +++ b/testing/btest/language/int.bro @@ -15,7 +15,7 @@ event bro_init() local i4: int = +0; local i5: int = -0; local i6: int = 12; - local i7: int = 0xc; + local i7: int = +0xc; local i8: int = 0xC; local i9: int = -0xC; local i10: int = -12; @@ -23,6 +23,13 @@ event bro_init() local i12: int = -9223372036854775808; # min. allowed value local i13: int = 0x7fffffffffffffff; # max. allowed value local i14: int = -0x8000000000000000; # min. allowed value + local i15 = +3; + + # Type inference test + + test_case( "type inference", type_name(i15) == "int" ); + + # Test various constant representations test_case( "optional '+' sign", i1 == i2 ); test_case( "negative vs. positive", i1 != i3 ); @@ -30,6 +37,9 @@ event bro_init() test_case( "hexadecimal", i6 == i7 ); test_case( "hexadecimal", i6 == i8 ); test_case( "hexadecimal", i9 == i10 ); + + # Operator tests + test_case( "relational operator", i2 > i3 ); test_case( "relational operator", i2 >= i3 ); test_case( "relational operator", i3 < i2 ); @@ -44,6 +54,9 @@ event bro_init() test_case( "assignment operator", i2 == 7 ); i2 -= 2; test_case( "assignment operator", i2 == 5 ); + + # Max/min value tests + local str1 = fmt("max int value = %d", i11); test_case( str1, str1 == "max int value = 9223372036854775807" ); local str2 = fmt("min int value = %d", i12); @@ -53,8 +66,5 @@ event bro_init() local str4 = fmt("min int value = %d", i14); test_case( str4, str4 == "min int value = -9223372036854775808" ); - # type inference - local x = +3; - test_case( "type inference", type_name(x) == "int" ); } diff --git a/testing/btest/language/interval.bro b/testing/btest/language/interval.bro index 9467db9397..816dfd6416 100644 --- a/testing/btest/language/interval.bro +++ b/testing/btest/language/interval.bro @@ -14,7 +14,8 @@ function approx_equal(x: double, y: double): bool event bro_init() { - # constants without space and no letter "s" + # Constants without space and no letter "s" + local in11: interval = 2usec; local in12: interval = 2msec; local in13: interval = 120sec; @@ -23,7 +24,8 @@ event bro_init() # TODO: this one causes bro to fail #local in16: interval = 2.5day; - # constants with space and no letter "s" + # Constants with space and no letter "s" + local in21: interval = 2 usec; local in22: interval = 2 msec; local in23: interval = 120 sec; @@ -31,17 +33,36 @@ event bro_init() local in25: interval = -2 hr; local in26: interval = 2.5 day; - # constants with space and letter "s" + # Constants with space and letter "s" + local in31: interval = 2 usecs; local in32: interval = 2 msecs; - local in33: interval = 120 secs; + local in33: interval = 1.2e2 secs; local in34: interval = 2 mins; local in35: interval = -2 hrs; local in36: interval = 2.5 days; + # Type inference + + local in41 = 2 usec; + # TODO: this one causes bro to fail + #local in42 = 2.1usec; + local in43 = 3usecs; + + # Type inference tests + + test_case( "type inference", type_name(in41) == "interval" ); + #test_case( "type inference", type_name(in42) == "interval" ); + test_case( "type inference", type_name(in43) == "interval" ); + + # Test various constant representations + test_case( "optional space", in11 == in21 ); - test_case( "different units with same numeric value", in11 != in12 ); test_case( "plural/singular interval are same", in11 == in31 ); + + # Operator tests + + test_case( "different units with same numeric value", in11 != in12 ); test_case( "compare different time units", in13 == in34 ); test_case( "compare different time units", in13 <= in34 ); test_case( "compare different time units", in13 >= in34 ); @@ -62,16 +83,13 @@ event bro_init() test_case( "division operator", in35/2 == -1hr ); test_case( "division operator", approx_equal(in32/in31, 1e3) ); + # Test relative size of each interval unit + test_case( "relative size of units", approx_equal(1msec/1usec, 1000) ); test_case( "relative size of units", approx_equal(1sec/1msec, 1000) ); test_case( "relative size of units", approx_equal(1min/1sec, 60) ); test_case( "relative size of units", approx_equal(1hr/1min, 60) ); test_case( "relative size of units", approx_equal(1day/1hr, 24) ); - # type inference - local x = 2 usec; - # TODO: this one causes bro to fail - #local y = 2.1usec; - local z = 3usecs; } diff --git a/testing/btest/language/pattern.bro b/testing/btest/language/pattern.bro index de33e4d2b6..ec50dc66fe 100644 --- a/testing/btest/language/pattern.bro +++ b/testing/btest/language/pattern.bro @@ -12,17 +12,21 @@ event bro_init() local p1: pattern = /foo|bar/; local p2: pattern = /oob/; local p3: pattern = /^oob/; + local p4 = /foo/; + + # Type inference tests + + test_case( "type inference", type_name(p4) == "pattern" ); + + # Operator tests test_case( "equality operator", "foo" == p1 ); test_case( "equality operator (order of operands)", p1 == "foo" ); test_case( "inequality operator", "foobar" != p1 ); + test_case( "inequality operator (order of operands)", p1 != "foobar" ); test_case( "in operator", p1 in "foobar" ); test_case( "in operator", p2 in "foobar" ); test_case( "!in operator", p3 !in "foobar" ); - # type inference - local x = /foo|bar/; - local y = /foo/; - local z = /^foo/; } diff --git a/testing/btest/language/port.bro b/testing/btest/language/port.bro index b45401da7a..1874e1dca3 100644 --- a/testing/btest/language/port.bro +++ b/testing/btest/language/port.bro @@ -13,23 +13,28 @@ event bro_init() local p2: port = 2/udp; local p3: port = 3/tcp; local p4: port = 4/unknown; + local p5 = 123/tcp; # maximum allowed values for each port type - local p5: port = 255/icmp; - local p6: port = 65535/udp; - local p7: port = 65535/tcp; - local p8: port = 255/unknown; + local p6: port = 255/icmp; + local p7: port = 65535/udp; + local p8: port = 65535/tcp; + local p9: port = 255/unknown; + + # Type inference test + + test_case( "type inference", type_name(p5) == "port" ); + + # Operator tests test_case( "protocol ordering", p1 > p2 ); test_case( "protocol ordering", p2 > p3 ); test_case( "protocol ordering", p3 > p4 ); - test_case( "protocol ordering", p7 < p6 ); - test_case( "protocol ordering", p8 < p5 ); - test_case( "different protocol but same numeric value", p6 != p7 ); - test_case( "different protocol but same numeric value", p5 != p8 ); - test_case( "equality operator", 65535/tcp == p7 ); + test_case( "protocol ordering", p8 < p7 ); + test_case( "protocol ordering", p9 < p6 ); + test_case( "different protocol but same numeric value", p7 != p8 ); + test_case( "different protocol but same numeric value", p6 != p9 ); + test_case( "equality operator", 65535/tcp == p8 ); - # type inference - local x = 123/tcp; } diff --git a/testing/btest/language/set.bro b/testing/btest/language/set.bro index bfea2b729b..5e56e3b9b8 100644 --- a/testing/btest/language/set.bro +++ b/testing/btest/language/set.bro @@ -8,10 +8,10 @@ function test_case(msg: string, expect: bool) # Note: only global sets can be initialized with curly braces -global s10: set[string] = { "curly", "braces" }; -global s11: set[port, string, bool] = { [10/udp, "curly", F], +global sg1: set[string] = { "curly", "braces" }; +global sg2: set[port, string, bool] = { [10/udp, "curly", F], [11/udp, "braces", T] }; -global s12 = { "more", "curly", "braces" }; +global sg3 = { "more", "curly", "braces" }; event bro_init() { @@ -25,12 +25,14 @@ event bro_init() local s7: set[port, string, bool]; local s8 = set( [8/tcp, "type inference", T] ); - # Type inference test + # Type inference tests + test_case( "type inference", type_name(s4) == "set[string]" ); test_case( "type inference", type_name(s8) == "set[port,string,bool]" ); - test_case( "type inference", type_name(s12) == "set[string]" ); + test_case( "type inference", type_name(sg3) == "set[string]" ); # Test the size of each set + test_case( "cardinality", |s1| == 2 ); test_case( "cardinality", |s2| == 0 ); test_case( "cardinality", |s3| == 0 ); @@ -39,11 +41,12 @@ event bro_init() test_case( "cardinality", |s6| == 0 ); test_case( "cardinality", |s7| == 0 ); test_case( "cardinality", |s8| == 1 ); - test_case( "cardinality", |s10| == 2 ); - test_case( "cardinality", |s11| == 2 ); - test_case( "cardinality", |s12| == 3 ); + test_case( "cardinality", |sg1| == 2 ); + test_case( "cardinality", |sg2| == 2 ); + test_case( "cardinality", |sg3| == 3 ); # Test iterating over each set + local ct: count; ct = 0; for ( c in s1 ) @@ -69,14 +72,17 @@ event bro_init() test_case( "iterate over set", ct == 2 ); ct = 0; - for ( [c1,c2,c3] in s11 ) + for ( [c1,c2,c3] in sg2 ) { ++ct; } test_case( "iterate over set", ct == 2 ); - # Test adding elements to each set + # Test adding elements to each set (Note: cannot add elements to sets + # of multiple types) + add s1["added"]; + add s1["added"]; # element already exists (nothing happens) test_case( "add element", |s1| == 3 ); test_case( "in operator", "added" in s1 ); @@ -95,19 +101,19 @@ event bro_init() test_case( "add element", |s4| == 2 ); test_case( "in operator", "local" in s4 ); - # Note: cannot add elements to sets of multiple types + add sg1["global"]; + test_case( "add element", |sg1| == 3 ); + test_case( "in operator", "global" in sg1 ); - add s10["global"]; - test_case( "add element", |s10| == 3 ); - test_case( "in operator", "global" in s10 ); + add sg3["more global"]; + test_case( "add element", |sg3| == 4 ); + test_case( "in operator", "more global" in sg3 ); - add s12["more global"]; - test_case( "add element", |s12| == 4 ); - test_case( "in operator", "more global" in s12 ); + # Test removing elements from each set (Note: cannot remove elements + # from sets of multiple types) - # Test removing elements from each set delete s1["test"]; - delete s1["foobar"]; # element does not exist + delete s1["foobar"]; # element does not exist (nothing happens) test_case( "remove element", |s1| == 2 ); test_case( "!in operator", "test" !in s1 ); @@ -123,14 +129,12 @@ event bro_init() test_case( "remove element", |s4| == 1 ); test_case( "!in operator", "type inference" !in s4 ); - # Note: cannot remove elements from sets of multiple types + delete sg1["braces"]; + test_case( "remove element", |sg1| == 2 ); + test_case( "!in operator", "braces" !in sg1 ); - delete s10["braces"]; - test_case( "remove element", |s10| == 2 ); - test_case( "!in operator", "braces" !in s10 ); - - delete s12["curly"]; - test_case( "remove element", |s12| == 3 ); - test_case( "!in operator", "curly" !in s12 ); + delete sg3["curly"]; + test_case( "remove element", |sg3| == 3 ); + test_case( "!in operator", "curly" !in sg3 ); } diff --git a/testing/btest/language/string.bro b/testing/btest/language/string.bro index eb3757ed70..3b9137cda5 100644 --- a/testing/btest/language/string.bro +++ b/testing/btest/language/string.bro @@ -9,51 +9,66 @@ function test_case(msg: string, expect: bool) event bro_init() { - local s1: string = ""; # empty string - local s2: string = "x"; # no escape sequences - local s3: string = "a\0b"; # null character - local s4: string = "a\tb"; # tab - local s5: string = "a\nb"; # newline - local s6: string = "a\xffb"; # hex value - local s7: string = "a\x00b"; # hex value (null character) - local s8: string = "a\x0ab"; # hex value (newline character) - local s9: string = "a\011b"; # octal value (tab character) - local s10: string = "a\"b"; # double quote - local s11: string = "a\\b"; # backslash - local s12: string = s2 + s3; # string concatenation - local s13: string = "test"; - local s14: string = "this is a very long string" + + local s1: string = "a\ty"; # tab + local s2: string = "a\nb"; # newline + local s3: string = "a\"b"; # double quote + local s4: string = "a\\b"; # backslash + local s5: string = "a\x9y"; # 1-digit hex value (tab character) + local s6: string = "a\x0ab"; # 2-digit hex value (newline character) + local s7: string = "a\x22b"; # 2-digit hex value (double quote) + local s8: string = "a\x00b"; # 2-digit hex value (null character) + local s9: string = "a\011y"; # 3-digit octal value (tab character) + local s10: string = "a\12b"; # 2-digit octal value (newline character) + local s11: string = "a\0b"; # 1-digit octal value (null character) + + local s20: string = ""; + local s21: string = "x"; + local s22: string = s21 + s11; + local s23: string = "test"; + local s24: string = "this is a very long string" + "which continues on the next line" + "the end"; - local s15: string = "on"; + local s25: string = "on"; + local s26 = "x"; - test_case( "empty string", |s1| == 0 ); - test_case( "nonempty string", |s2| == 1 ); - test_case( "string comparison", s2 > s3 ); - test_case( "string comparison", s2 >= s3 ); - test_case( "string comparison", s3 < s2 ); - test_case( "string comparison", s3 <= s2 ); - test_case( "null escape sequence", |s3| == 3 ); - test_case( "tab escape sequence", |s4| == 3 ); - test_case( "newline escape sequence", |s5| == 3 ); - test_case( "hex escape sequence", |s6| == 3 ); - test_case( "hex escape sequence", |s7| == 3 ); - test_case( "hex escape sequence", |s8| == 3 ); - test_case( "octal escape sequence", |s9| == 3 ); - test_case( "quote escape sequence", |s10| == 3 ); - test_case( "backslash escape sequence", |s11| == 3 ); - test_case( "null escape sequence", s3 == s7 ); - test_case( "newline escape sequence", s5 == s8 ); - test_case( "tab escape sequence", s4 == s9 ); - test_case( "string concatenation", |s12| == 4 ); - s13 += s2; - test_case( "string concatenation", s13 == "testx" ); - test_case( "long string initialization", |s14| == 65 ); - test_case( "in operator", s15 in s14 ); - test_case( "!in operator", s15 !in s13 ); + # Type inference test + + test_case( "type inference", type_name(s26) == "string" ); + + # Escape sequence tests + + test_case( "tab escape sequence", |s1| == 3 ); + test_case( "newline escape sequence", |s2| == 3 ); + test_case( "double quote escape sequence", |s3| == 3 ); + test_case( "backslash escape sequence", |s4| == 3 ); + test_case( "1-digit hex escape sequence", |s5| == 3 ); + test_case( "2-digit hex escape sequence", |s6| == 3 ); + test_case( "2-digit hex escape sequence", |s7| == 3 ); + test_case( "2-digit hex escape sequence", |s8| == 3 ); + test_case( "3-digit octal escape sequence", |s9| == 3 ); + test_case( "2-digit octal escape sequence", |s10| == 3 ); + test_case( "1-digit octal escape sequence", |s11| == 3 ); + test_case( "tab escape sequence", s1 == s5 ); + test_case( "tab escape sequence", s5 == s9 ); + test_case( "newline escape sequence", s2 == s6 ); + test_case( "newline escape sequence", s6 == s10 ); + test_case( "double quote escape sequence", s3 == s7 ); + test_case( "null escape sequence", s8 == s11 ); + + # Operator tests + + test_case( "empty string", |s20| == 0 ); + test_case( "nonempty string", |s21| == 1 ); + test_case( "string comparison", s21 > s11 ); + test_case( "string comparison", s21 >= s11 ); + test_case( "string comparison", s11 < s21 ); + test_case( "string comparison", s11 <= s21 ); + test_case( "string concatenation", |s22| == 4 ); + s23 += s21; + test_case( "string concatenation", s23 == "testx" ); + test_case( "multi-line string initialization", |s24| == 65 ); + test_case( "in operator", s25 in s24 ); + test_case( "!in operator", s25 !in s23 ); - # type inference - local x = "x"; - test_case( "type inference", x == s2 ); } diff --git a/testing/btest/language/subnet.bro b/testing/btest/language/subnet.bro index 63d09f916b..591a42119e 100644 --- a/testing/btest/language/subnet.bro +++ b/testing/btest/language/subnet.bro @@ -18,13 +18,15 @@ event bro_init() local s1: subnet = 0.0.0.0/0; local s2: subnet = 192.0.0.0/8; local s3: subnet = 255.255.255.255/32; + local s4 = 10.0.0.0/16; test_case( "IPv4 subnet equality", a1/8 == s2 ); test_case( "IPv4 subnet inequality", a1/4 != s2 ); test_case( "IPv4 subnet in operator", a1 in s2 ); test_case( "IPv4 subnet !in operator", a1 !in s3 ); + test_case( "IPv4 subnet type inference", type_name(s4) == "subnet" ); - # IPv6 addr + # IPv6 addrs local b1: addr = [ffff::]; local b2: addr = [ffff::1]; local b3: addr = [ffff:1::1]; @@ -32,17 +34,16 @@ event bro_init() # IPv6 subnets local t1: subnet = [::]/0; local t2: subnet = [ffff::]/64; + local t3 = [a::]/32; test_case( "IPv6 subnet equality", b1/64 == t2 ); test_case( "IPv6 subnet inequality", b3/64 != t2 ); test_case( "IPv6 subnet in operator", b2 in t2 ); test_case( "IPv6 subnet !in operator", b3 !in t2 ); + test_case( "IPv6 subnet type inference", type_name(t3) == "subnet" ); test_case( "IPv4 and IPv6 subnet inequality", s1 != t1 ); test_case( "IPv4 address and IPv6 subnet", a1 !in t2 ); - # type inference - local x = 10.0.0.0/16; - local y = [a::]/32; } diff --git a/testing/btest/language/table.bro b/testing/btest/language/table.bro index 83f9377d68..d1b0751970 100644 --- a/testing/btest/language/table.bro +++ b/testing/btest/language/table.bro @@ -6,7 +6,9 @@ function test_case(msg: string, expect: bool) print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); } -global t11 = { [1] = "type", [2] = "inference", [3] = "test" }; +# Note: only global tables can be initialized with curly braces when the table +# type is not explicitly specified +global tg1 = { [1] = "type", [2] = "inference", [3] = "test" }; event bro_init() { @@ -25,12 +27,14 @@ event bro_init() [10/udp, "curly", F] = "first", [11/udp, "braces", T] = "second" }; - # Type inference test + # Type inference tests + test_case( "type inference", type_name(t4) == "table[count] of string" ); test_case( "type inference", type_name(t9) == "table[port,string,bool] of string" ); - test_case( "type inference", type_name(t11) == "table[count] of string" ); + test_case( "type inference", type_name(tg1) == "table[count] of string" ); # Test the size of each table + test_case( "cardinality", |t1| == 2 ); test_case( "cardinality", |t2| == 0 ); test_case( "cardinality", |t3| == 0 ); @@ -41,9 +45,10 @@ event bro_init() test_case( "cardinality", |t8| == 0 ); test_case( "cardinality", |t9| == 1 ); test_case( "cardinality", |t10| == 2 ); - test_case( "cardinality", |t11| == 3 ); + test_case( "cardinality", |tg1| == 3 ); # Test iterating over each table + local ct: count; ct = 0; for ( c in t1 ) @@ -84,7 +89,15 @@ event bro_init() } test_case( "iterate over table", ct == 0 ); - # Test adding elements to each table + # Test overwriting elements in each table (Note: cannot overwrite + # elements in tables of multiple types) + + t1[5] = "overwrite"; + test_case( "overwrite element", |t1| == 2 && t1[5] == "overwrite" ); + + # Test adding elements to each table (Note: cannot add elements to + # tables of multiple types) + t1[1] = "added"; test_case( "add element", |t1| == 3 ); test_case( "in operator", 1 in t1 ); @@ -108,11 +121,11 @@ event bro_init() test_case( "add element", |t5| == 3 ); test_case( "in operator", 10 in t5 ); - # Note: cannot add elements to tables of multiple types + # Test removing elements from each table (Note: cannot remove elements + # from tables of multiple types) - # Test removing elements from each table delete t1[0]; - delete t1[17]; # element does not exist + delete t1[17]; # element does not exist (nothing happens) test_case( "remove element", |t1| == 2 ); test_case( "!in operator", 0 !in t1 ); @@ -132,7 +145,5 @@ event bro_init() test_case( "remove element", |t5| == 2 ); test_case( "!in operator", 1 !in t5 ); - # Note: cannot remove elements from tables of multiple types - } diff --git a/testing/btest/language/time.bro b/testing/btest/language/time.bro index 588cbf8887..43b6694101 100644 --- a/testing/btest/language/time.bro +++ b/testing/btest/language/time.bro @@ -13,16 +13,21 @@ event bro_init() local t2: time = t1 + 3 sec; local t3: time = t2 - 10 sec; local t4: time = t1; - local t5: interval = t2 - t1; + local t5: time = double_to_time(1234567890); + local t6 = current_time(); + + # Type inference test + + test_case( "type inference", type_name(t6) == "time" ); + + # Operator tests test_case( "add interval", t1 < t2 ); test_case( "subtract interval", t1 > t3 ); test_case( "inequality", t1 != t3 ); test_case( "equality", t1 == t4 ); - test_case( "subtract time", t5 == 3sec); - test_case( "size operator", |t1| > 1.0); + test_case( "subtract time", t2 - t1 == 3sec); + test_case( "size operator", |t5| == 1234567890.0 ); - local x = current_time(); - test_case( "type inference", x > t1 ); } diff --git a/testing/btest/language/vector.bro b/testing/btest/language/vector.bro index 2e3ecb8eee..928ddcb645 100644 --- a/testing/btest/language/vector.bro +++ b/testing/btest/language/vector.bro @@ -8,7 +8,7 @@ function test_case(msg: string, expect: bool) # Note: only global vectors can be initialized with curly braces -global v20: vector of string = { "curly", "braces" }; +global vg1: vector of string = { "curly", "braces" }; event bro_init() { @@ -28,10 +28,11 @@ event bro_init() local v14 = v12 && v13; local v15 = v12 || v13; - # Type inference test + # Type inference tests test_case( "type inference", type_name(v4) == "vector of string" ); test_case( "type inference", type_name(v5) == "vector of count" ); + test_case( "type inference", type_name(v12) == "vector of bool" ); # Test the size of each vector @@ -50,7 +51,7 @@ event bro_init() test_case( "cardinality", |v13| == 3 ); test_case( "cardinality", |v14| == 3 ); test_case( "cardinality", |v15| == 3 ); - test_case( "cardinality", |v20| == 2 ); + test_case( "cardinality", |vg1| == 2 ); # Test that vectors use zero-based indexing @@ -78,7 +79,7 @@ event bro_init() test_case( "iterate over vector", ct == 0 ); ct = 0; - for ( c in v20 ) + for ( c in vg1 ) { ++ct; } @@ -109,9 +110,9 @@ event bro_init() test_case( "add element", |v5| == 4 ); test_case( "access element", v5[3] == 77 ); - v20[2] = "global"; - test_case( "add element", |v20| == 3 ); - test_case( "access element", v20[2] == "global" ); + vg1[2] = "global"; + test_case( "add element", |vg1| == 3 ); + test_case( "access element", vg1[2] == "global" ); # Test overwriting elements of each vector @@ -136,9 +137,9 @@ event bro_init() test_case( "overwrite element", |v5| == 4 ); test_case( "access element", v5[0] == 0 ); - v20[1] = "new5"; - test_case( "overwrite element", |v20| == 3 ); - test_case( "access element", v20[1] == "new5" ); + vg1[1] = "new5"; + test_case( "overwrite element", |vg1| == 3 ); + test_case( "access element", vg1[1] == "new5" ); # Test increment/decrement operators From 63a550fa9e9b2c2a84b0769c683ccd183e10fefb Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 5 Sep 2012 12:00:21 -0500 Subject: [PATCH 623/651] Fix a segfault when iterating over a set When iterating over a set with a "for" loop, bro would segfault when the number of index variables was less than required. Example: for ( [c1,c2] in s1 ) ... where s1 is defined as set[addr,port,count]. --- src/Stmt.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Stmt.cc b/src/Stmt.cc index 582323bf91..7d754d8e72 100644 --- a/src/Stmt.cc +++ b/src/Stmt.cc @@ -943,7 +943,10 @@ ForStmt::ForStmt(id_list* arg_loop_vars, Expr* loop_expr) { const type_list* indices = e->Type()->AsTableType()->IndexTypes(); if ( indices->length() != loop_vars->length() ) + { e->Error("wrong index size"); + return; + } for ( int i = 0; i < indices->length(); i++ ) { From a10093b620a1dab8d7955b43ac237c40ecfa9bcf Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 5 Sep 2012 16:20:34 -0500 Subject: [PATCH 624/651] Add sleeps to configuration_update test for better reliability. Not the greatest solution, but makes the 3 bro processes more likely to run sequentially so that the controller2 process doesn't happen to be scheduled before the controller process. In that case, the controllee gets the shutdown request before the configuration update. FreeBSD especially seemed to schedule them the unintended way frequently. --- .../scripts/base/frameworks/control/configuration_update.bro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testing/btest/scripts/base/frameworks/control/configuration_update.bro b/testing/btest/scripts/base/frameworks/control/configuration_update.bro index 920a162503..d9e62efe08 100644 --- a/testing/btest/scripts/base/frameworks/control/configuration_update.bro +++ b/testing/btest/scripts/base/frameworks/control/configuration_update.bro @@ -1,7 +1,9 @@ # @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Communication::listen_port=65531/tcp +# @TEST-EXEC: sleep 5 # @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT test-redef frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=configuration_update +# @TEST-EXEC: sleep 5 # @TEST-EXEC: btest-bg-run controller2 BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=shutdown # @TEST-EXEC: btest-bg-wait 10 # @TEST-EXEC: btest-diff controllee/.stdout From 9357aeb6b19adc0a3ab4b72de90c347a132cc000 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 5 Sep 2012 16:52:14 -0500 Subject: [PATCH 625/651] Fix "!=" operator for subnets Fixed a bug where the "!=" operator with subnet operands was treated the same as the "==" operator. --- src/Expr.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Expr.cc b/src/Expr.cc index b62f119bae..e58e20f671 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -871,11 +871,12 @@ Val* BinaryExpr::SubNetFold(Val* v1, Val* v2) const { const IPPrefix& n1 = v1->AsSubNet(); const IPPrefix& n2 = v2->AsSubNet(); + bool result = ( n1 == n2 ) ? true : false; - if ( n1 == n2 ) - return new Val(1, TYPE_BOOL); - else - return new Val(0, TYPE_BOOL); + if ( tag == EXPR_NE ) + result = !result; + + return new Val(result, TYPE_BOOL); } void BinaryExpr::SwapOps() From cd21eb5b6afe384d044c44a8bb98f3c163532ecb Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 5 Sep 2012 17:17:43 -0500 Subject: [PATCH 626/651] Fix the "-=" operator for intervals Fixed a bug where "a -= b" (both operands are intervals) was not allowed in bro scripts (although "a = a - b" is allowed). --- src/Expr.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Expr.cc b/src/Expr.cc index e58e20f671..70aab46ab5 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -1516,6 +1516,8 @@ RemoveFromExpr::RemoveFromExpr(Expr* arg_op1, Expr* arg_op2) if ( BothArithmetic(bt1, bt2) ) PromoteType(max_type(bt1, bt2), is_vector(op1) || is_vector(op2)); + else if ( BothInterval(bt1, bt2) ) + SetType(base_type(bt1)); else ExprError("requires two arithmetic operands"); } From 11f66076a18d0fb5ea07a8102c29c9b216698569 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 6 Sep 2012 23:05:57 -0700 Subject: [PATCH 627/651] Starting 2.2 release notes. --- NEWS | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/NEWS b/NEWS index d93e153252..a186fea0fc 100644 --- a/NEWS +++ b/NEWS @@ -7,6 +7,20 @@ release. For a complete list of changes, see the ``CHANGES`` file (note that submodules, such as BroControl and Broccoli, come with their own CHANGES.) +Bro 2.2 +------- + +New Functionality +~~~~~~~~~~~~~~~~~ + +- TODO: Update. + +Changed Functionality +~~~~~~~~~~~~~~~~~~~~~ + +- TODO: Update. + + Bro 2.1 ------- From 84ec139fd97114cffc2c19b6110f980a745d8679 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 7 Sep 2012 10:48:13 -0500 Subject: [PATCH 628/651] Update language tests for recent bug fixes --- testing/btest/Baseline/language.interval/out | 1 + testing/btest/language/interval.bro | 6 +++--- testing/btest/language/subnet.bro | 2 -- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/testing/btest/Baseline/language.interval/out b/testing/btest/Baseline/language.interval/out index 425ae1c15c..f42082ef5d 100644 --- a/testing/btest/Baseline/language.interval/out +++ b/testing/btest/Baseline/language.interval/out @@ -15,6 +15,7 @@ subtract different time units (PASS) absolute value (PASS) absolute value (PASS) assignment operator (PASS) +assignment operator (PASS) multiplication operator (PASS) division operator (PASS) division operator (PASS) diff --git a/testing/btest/language/interval.bro b/testing/btest/language/interval.bro index 816dfd6416..6bdbf3a8e8 100644 --- a/testing/btest/language/interval.bro +++ b/testing/btest/language/interval.bro @@ -52,6 +52,7 @@ event bro_init() # Type inference tests test_case( "type inference", type_name(in41) == "interval" ); + # TODO: uncomment when bug is fixed #test_case( "type inference", type_name(in42) == "interval" ); test_case( "type inference", type_name(in43) == "interval" ); @@ -76,9 +77,8 @@ event bro_init() test_case( "absolute value", |in36| == 2.5*86400 ); in34 += 2hr; test_case( "assignment operator", in34 == 122min ); - # TODO: this should work (subtraction works) - #in34 -= 2hr; - #test_case( "assignment operator", in34 == 2min ); + in34 -= 2hr; + test_case( "assignment operator", in34 == 2min ); test_case( "multiplication operator", in33*2 == 4min ); test_case( "division operator", in35/2 == -1hr ); test_case( "division operator", approx_equal(in32/in31, 1e3) ); diff --git a/testing/btest/language/subnet.bro b/testing/btest/language/subnet.bro index 591a42119e..ea641f6983 100644 --- a/testing/btest/language/subnet.bro +++ b/testing/btest/language/subnet.bro @@ -7,8 +7,6 @@ function test_case(msg: string, expect: bool) } -# TODO: "subnet inequality" tests (i.e., tests with "!=") always fail - event bro_init() { # IPv4 addr From 84fabf1718f85238b5de74a709c0162e526fd82c Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 7 Sep 2012 12:40:25 -0500 Subject: [PATCH 629/651] Add an item to FAQ page about broctl options --- doc/faq.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/faq.rst b/doc/faq.rst index 8545cc57ee..1579fb6313 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -46,7 +46,7 @@ directions: http://securityonion.blogspot.com/2011/10/when-is-full-packet-capture-not-full.html What does an error message like ``internal error: NB-DNS error`` mean? ---------------------------------------------------------------------------------------------------------------------------------- +---------------------------------------------------------------------- That often means that DNS is not set up correctly on the system running Bro. Try verifying from the command line that DNS lookups @@ -65,6 +65,15 @@ Generally, please note that we do not regularly test OpenBSD builds. We appreciate any patches that improve Bro's support for this platform. +How do broctl options affect Bro script variables? +-------------------------------------------------- + +Some (but not all) broctl options override a corresponding Bro script variable. +For example, setting the broctl option "LogRotationInterval" will override +the value of the Bro script variable "Log::default_rotation_interval". +See the :doc:`broctl documentation ` to find out +which broctl options override Bro script variables, and for more discussion +on site-specific customization. Usage ===== From f6c9b69eda29913c51e09b51daa8ed5a3f416513 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 7 Sep 2012 10:57:52 -0700 Subject: [PATCH 630/651] reorder a few statements in scan.l to make 1.5msecs etc work. Adresses #872 --- src/scan.l | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/scan.l b/src/scan.l index 1b3d09f879..377c74cc1a 100644 --- a/src/scan.l +++ b/src/scan.l @@ -479,12 +479,6 @@ F RET_CONST(new Val(false, TYPE_BOOL)) RET_CONST(new PortVal(p, TRANSPORT_UNKNOWN)) } -({D}"."){3}{D} RET_CONST(new AddrVal(yytext)) - -"0x"{HEX}+ RET_CONST(new Val(static_cast(strtoull(yytext, 0, 16)), TYPE_COUNT)) - -{H}("."{H})+ RET_CONST(dns_mgr->LookupHost(yytext)) - {FLOAT}{OWS}day(s?) RET_CONST(new IntervalVal(atof(yytext),Days)) {FLOAT}{OWS}hr(s?) RET_CONST(new IntervalVal(atof(yytext),Hours)) {FLOAT}{OWS}min(s?) RET_CONST(new IntervalVal(atof(yytext),Minutes)) @@ -492,6 +486,12 @@ F RET_CONST(new Val(false, TYPE_BOOL)) {FLOAT}{OWS}msec(s?) RET_CONST(new IntervalVal(atof(yytext),Milliseconds)) {FLOAT}{OWS}usec(s?) RET_CONST(new IntervalVal(atof(yytext),Microseconds)) +({D}"."){3}{D} RET_CONST(new AddrVal(yytext)) + +"0x"{HEX}+ RET_CONST(new Val(static_cast(strtoull(yytext, 0, 16)), TYPE_COUNT)) + +{H}("."{H})+ RET_CONST(dns_mgr->LookupHost(yytext)) + \"([^\\\n\"]|{ESCSEQ})*\" { const char* text = yytext; int len = strlen(text) + 1; From 67d01ab9e9d1edebb8d7b19795fc07d3023d5b22 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 7 Sep 2012 15:15:48 -0500 Subject: [PATCH 631/651] Small change to non-blocking DNS initialization. The trailing dot on "localhost." circumvents use of /etc/hosts in some environments (I saw it on FreeBSD 9.0-RELEASE-p3) and so emits an actual DNS query. When running the test suite, that would be hundreds of useless queries. --- src/nb_dns.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nb_dns.c b/src/nb_dns.c index d3b3c5c4de..3051be9bc2 100644 --- a/src/nb_dns.c +++ b/src/nb_dns.c @@ -124,7 +124,7 @@ nb_dns_init(char *errstr) nd->s = -1; /* XXX should be able to init static hostent struct some other way */ - (void)gethostbyname("localhost."); + (void)gethostbyname("localhost"); if ((_res.options & RES_INIT) == 0 && res_init() == -1) { snprintf(errstr, NB_DNS_ERRSIZE, "res_init() failed"); From bd84ff2c2051ff34a4b2060cce718875e23acf8c Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 7 Sep 2012 16:25:07 -0500 Subject: [PATCH 632/651] Adjusting some unit tests that do cluster communication. Added explicit synchronization and termination points to make the tests more reliable and exit earlier in most cases. --- .../base/frameworks/cluster/start-it-up.bro | 15 ++++++- .../base/frameworks/notice/cluster.bro | 37 +++++++++++++++-- .../frameworks/notice/suppression-cluster.bro | 40 +++++++++++++++++-- 3 files changed, 84 insertions(+), 8 deletions(-) diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro index a1069d1bd0..89f8d6b168 100644 --- a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro +++ b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro @@ -5,7 +5,7 @@ # @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT # @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-bg-wait 15 # @TEST-EXEC: btest-diff manager-1/.stdout # @TEST-EXEC: btest-diff proxy-1/.stdout # @TEST-EXEC: btest-diff proxy-2/.stdout @@ -22,7 +22,20 @@ redef Cluster::nodes = { }; @TEST-END-FILE +global peer_count = 0; + event remote_connection_handshake_done(p: event_peer) { print "Connected to a peer"; + if ( Cluster::node == "manager-1" ) + { + peer_count = peer_count + 1; + if ( peer_count == 4 ) + terminate_communication(); + } + } + +event remote_connection_closed(p: event_peer) + { + terminate(); } diff --git a/testing/btest/scripts/base/frameworks/notice/cluster.bro b/testing/btest/scripts/base/frameworks/notice/cluster.bro index 8d54a27eaf..47932edb8e 100644 --- a/testing/btest/scripts/base/frameworks/notice/cluster.bro +++ b/testing/btest/scripts/base/frameworks/notice/cluster.bro @@ -2,9 +2,9 @@ # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: sleep 1 +# @TEST-EXEC: sleep 2 # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff manager-1/notice.log @TEST-START-FILE cluster-layout.bro @@ -21,13 +21,44 @@ redef enum Notice::Type += { Test_Notice, }; +event remote_connection_closed(p: event_peer) + { + terminate(); + } + +global ready: event(); + +redef Cluster::manager2worker_events += /ready/; + event delayed_notice() { if ( Cluster::node == "worker-1" ) NOTICE([$note=Test_Notice, $msg="test notice!"]); } -event bro_init() +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event ready() { schedule 1secs { delayed_notice() }; } + +@endif + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; + +event remote_connection_handshake_done(p: event_peer) + { + peer_count = peer_count + 1; + if ( peer_count == 2 ) + event ready(); + } + +event Notice::log_notice(rec: Notice::Info) + { + terminate_communication(); + } + +@endif diff --git a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro index b812c6451d..5010da82cc 100644 --- a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro +++ b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro @@ -2,10 +2,10 @@ # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: sleep 1 +# @TEST-EXEC: sleep 2 # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT # @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff manager-1/notice.log @TEST-START-FILE cluster-layout.bro @@ -23,6 +23,15 @@ redef enum Notice::Type += { Test_Notice, }; +event remote_connection_closed(p: event_peer) + { + terminate(); + } + +global ready: event(); + +redef Cluster::manager2worker_events += /ready/; + event delayed_notice() { NOTICE([$note=Test_Notice, @@ -30,10 +39,33 @@ event delayed_notice() $identifier="this identifier is static"]); } -event bro_init() &priority=5 - { +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event ready() + { if ( Cluster::node == "worker-1" ) schedule 4secs { delayed_notice() }; if ( Cluster::node == "worker-2" ) schedule 1secs { delayed_notice() }; + } + +event Notice::suppressed(n: Notice::Info) + { + if ( Cluster::node == "worker-1" ) + terminate_communication(); } + +@endif + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; + +event remote_connection_handshake_done(p: event_peer) + { + peer_count = peer_count + 1; + if ( peer_count == 3 ) + event ready(); + } + +@endif From 292bf61ae8cbdae6773b675ad5d33884c7fc7fd4 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 13 Sep 2012 12:59:40 -0500 Subject: [PATCH 633/651] Unit test reliability adjustment. Sometimes manager node was shutting everything down before others had a chance to generate output. It now waits for all nodes to fully connect with each other. --- .../base/frameworks/cluster/start-it-up.bro | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro index 89f8d6b168..acb9c3676a 100644 --- a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro +++ b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro @@ -1,11 +1,13 @@ # @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT +# @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT +# @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT # @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-bg-wait 30 # @TEST-EXEC: btest-diff manager-1/.stdout # @TEST-EXEC: btest-diff proxy-1/.stdout # @TEST-EXEC: btest-diff proxy-2/.stdout @@ -22,17 +24,39 @@ redef Cluster::nodes = { }; @TEST-END-FILE +global fully_connected: event(); + global peer_count = 0; +global fully_connected_nodes = 0; + +event fully_connected() + { + fully_connected_nodes = fully_connected_nodes + 1; + if ( Cluster::node == "manager-1" ) + { + if ( peer_count == 4 && fully_connected_nodes == 4 ) + terminate_communication(); + } + } + +redef Cluster::worker2manager_events += /fully_connected/; +redef Cluster::proxy2manager_events += /fully_connected/; + event remote_connection_handshake_done(p: event_peer) { print "Connected to a peer"; + peer_count = peer_count + 1; if ( Cluster::node == "manager-1" ) { - peer_count = peer_count + 1; - if ( peer_count == 4 ) + if ( peer_count == 4 && fully_connected_nodes == 4 ) terminate_communication(); } + else + { + if ( peer_count == 2 ) + event fully_connected(); + } } event remote_connection_closed(p: event_peer) From 6d1abdb661b98726c2c77e171bbab0a65e024f54 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 13 Sep 2012 16:47:40 -0500 Subject: [PATCH 634/651] Adjusting Mac binary packaging script. Setting CMAKE_PREFIX_PATH helps link against standard system libs instead of ones that come from other package manager (e.g. MacPorts). Changed to allow only more recent CMake versions to create packages due to poorer clang compiler support in older versions, important since clang is now the default compiler instead of gcc on Macs. --- pkg/make-mac-packages | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pkg/make-mac-packages b/pkg/make-mac-packages index 829a64ca25..2930f8f393 100755 --- a/pkg/make-mac-packages +++ b/pkg/make-mac-packages @@ -3,7 +3,13 @@ # This script creates binary packages for Mac OS X. # They can be found in ../build/ after running. -./check-cmake || { exit 1; } +cmake -P /dev/stdin << "EOF" +if ( ${CMAKE_VERSION} VERSION_LESS 2.8.9 ) + message(FATAL_ERROR "CMake >= 2.8.9 required to build package") +endif () +EOF + +[ $? -ne 0 ] && exit 1; type sw_vers > /dev/null 2>&1 || { echo "Unable to get Mac OS X version" >&2; @@ -34,26 +40,26 @@ prefix=/opt/bro cd .. # Minimum Bro -CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ +CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ --disable-broccoli --disable-broctl --pkg-name-prefix=Bro-minimal \ --binary-package ( cd build && make package ) # Full Bro package -CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ +CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ --pkg-name-prefix=Bro --binary-package ( cd build && make package ) # Broccoli cd aux/broccoli -CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ +CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ --binary-package ( cd build && make package && mv *.dmg ../../../build/ ) cd ../.. # Broctl cd aux/broctl -CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ +CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ --binary-package ( cd build && make package && mv *.dmg ../../../build/ ) cd ../.. From 6fbbf2829023036333231ffe00f89802b1f7bee0 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 14 Sep 2012 10:28:23 -0500 Subject: [PATCH 635/651] Update compile/dependency docs for OS X. --- doc/quickstart.rst | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/doc/quickstart.rst b/doc/quickstart.rst index cc18956836..68dc4561cb 100644 --- a/doc/quickstart.rst +++ b/doc/quickstart.rst @@ -1,5 +1,6 @@ .. _CMake: http://www.cmake.org .. _SWIG: http://www.swig.org +.. _Xcode: https://developer.apple.com/xcode/ .. _MacPorts: http://www.macports.org .. _Fink: http://www.finkproject.org .. _Homebrew: http://mxcl.github.com/homebrew @@ -85,17 +86,20 @@ The following dependencies are required to build Bro: * Mac OS X - Snow Leopard (10.6) comes with all required dependencies except for CMake_. + Compiling source code on Macs requires first downloading Xcode_, + then going through its "Preferences..." -> "Downloads" menus to + install the "Command Line Tools" component. - Lion (10.7) comes with all required dependencies except for CMake_ and SWIG_. + Lion (10.7) and Mountain Lion (10.7) come with all required + dependencies except for CMake_, SWIG_, and ``libmagic``. - Distributions of these dependencies can be obtained from the project websites - linked above, but they're also likely available from your preferred Mac OS X - package management system (e.g. MacPorts_, Fink_, or Homebrew_). + Distributions of these dependencies can be obtained from the project + websites linked above, but they're also likely available from your + preferred Mac OS X package management system (e.g. MacPorts_, Fink_, + or Homebrew_). - Note that the MacPorts ``swig`` package may not include any specific - language support so you may need to also install ``swig-ruby`` and - ``swig-python``. + Specifically for MacPorts, the ``swig``, ``swig-ruby``, ``swig-python`` + and ``file`` packages provide the required dependencies. Optional Dependencies ~~~~~~~~~~~~~~~~~~~~~ From 392b99b2fa4b7bdda267eca55d4cc57d85e88641 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 18 Sep 2012 16:52:12 -0500 Subject: [PATCH 636/651] Fix construction of ip6_ah (Authentication Header) record values. Authentication Headers with a Payload Len field set to zero would cause a crash due to invalid memory allocation because the previous code assumed Payload Len would always be great enough to contain all mandatory fields of the header. This changes it so the length of the header is explicitly checked before attempting to extract fields located past the minimum length (8 bytes) of an Authentication Header. Crashes due to this are only possible when handling script-layer events ipv6_ext_headers, new_packet, esp_packet, or teredo_*. Or also when implementing one of the discarder_check_* family of functions. Otherwise, Bro correctly parses past such a header. --- scripts/base/init-bare.bro | 8 ++++---- src/IP.cc | 11 ++++++++--- .../btest/Baseline/core.ipv6_zero_len_ah/output | 2 ++ testing/btest/Traces/ipv6_zero_len_ah.trace | Bin 0 -> 1320 bytes testing/btest/core/ipv6_zero_len_ah.test | 11 +++++++++++ 5 files changed, 25 insertions(+), 7 deletions(-) create mode 100644 testing/btest/Baseline/core.ipv6_zero_len_ah/output create mode 100644 testing/btest/Traces/ipv6_zero_len_ah.trace create mode 100644 testing/btest/core/ipv6_zero_len_ah.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index ec75c76beb..cc3a40f54b 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -1135,10 +1135,10 @@ type ip6_ah: record { rsv: count; ## Security Parameter Index. spi: count; - ## Sequence number. - seq: count; - ## Authentication data. - data: string; + ## Sequence number, unset in the case that *len* field is zero. + seq: count &optional; + ## Authentication data, unset in the case that *len* field is zero. + data: string &optional; }; ## Values extracted from an IPv6 ESP extension header. diff --git a/src/IP.cc b/src/IP.cc index 45afd593a9..398aacf1ee 100644 --- a/src/IP.cc +++ b/src/IP.cc @@ -148,9 +148,14 @@ RecordVal* IPv6_Hdr::BuildRecordVal(VectorVal* chain) const rv->Assign(1, new Val(((ip6_ext*)data)->ip6e_len, TYPE_COUNT)); rv->Assign(2, new Val(ntohs(((uint16*)data)[1]), TYPE_COUNT)); rv->Assign(3, new Val(ntohl(((uint32*)data)[1]), TYPE_COUNT)); - rv->Assign(4, new Val(ntohl(((uint32*)data)[2]), TYPE_COUNT)); - uint16 off = 3 * sizeof(uint32); - rv->Assign(5, new StringVal(new BroString(data + off, Length() - off, 1))); + if ( Length() >= 12 ) + { + // Sequence Number and ICV fields can only be extracted if + // Payload Len was non-zero for this header. + rv->Assign(4, new Val(ntohl(((uint32*)data)[2]), TYPE_COUNT)); + uint16 off = 3 * sizeof(uint32); + rv->Assign(5, new StringVal(new BroString(data + off, Length() - off, 1))); + } } break; diff --git a/testing/btest/Baseline/core.ipv6_zero_len_ah/output b/testing/btest/Baseline/core.ipv6_zero_len_ah/output new file mode 100644 index 0000000000..d8db6a4c48 --- /dev/null +++ b/testing/btest/Baseline/core.ipv6_zero_len_ah/output @@ -0,0 +1,2 @@ +[orig_h=2000:1300::1, orig_p=128/icmp, resp_h=2000:1300::2, resp_p=129/icmp] +[ip=, ip6=[class=0, flow=0, len=166, nxt=51, hlim=255, src=2000:1300::1, dst=2000:1300::2, exts=[[id=51, hopopts=, dstopts=, routing=, fragment=, ah=[nxt=58, len=0, rsv=0, spi=0, seq=, data=], esp=, mobility=]]], tcp=, udp=, icmp=] diff --git a/testing/btest/Traces/ipv6_zero_len_ah.trace b/testing/btest/Traces/ipv6_zero_len_ah.trace new file mode 100644 index 0000000000000000000000000000000000000000..7c3922525c26f97d870d6c2c3aa0462e82315b4a GIT binary patch literal 1320 zcmca|c+)~A1{MYw`2U}Qff2~DHt-7wNoHd31F}JwgF$_t(qt|Mbs)R#ZUT^Gkg)o% zz#t4_!2lx~pQ(W%X{Sk8#RNw*05ZL?kclA-s1t;ZjX~Bz?0}lCfMGh*e$dHILq%IdTG28*V0EDslVVN<(c(4NM1c3&IU(bM){NMzjko*MnD-SRM zf-shlyoQ-7&_j}i@whn9k8BA*f?-&NjZp<6m0?K-6y`@?B-62kJf`VP=pm0Q4Lbni zb=oRI`S4!@D8j%g{Qo~7jb=JiJHr+^kUY9LBQzg^Y`G4!1#dn?&nZmkwstV6svp2& F3jp^*vyA`% literal 0 HcmV?d00001 diff --git a/testing/btest/core/ipv6_zero_len_ah.test b/testing/btest/core/ipv6_zero_len_ah.test new file mode 100644 index 0000000000..dc3acf8443 --- /dev/null +++ b/testing/btest/core/ipv6_zero_len_ah.test @@ -0,0 +1,11 @@ +# @TEST-EXEC: bro -r $TRACES/ipv6_zero_len_ah.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +# Shouldn't crash, but we also won't have seq and data fields set of the ip6_ah +# record. + +event ipv6_ext_headers(c: connection, p: pkt_hdr) + { + print c$id; + print p; + } From 73115dd334824b7293ea51b7222d2d92677a748a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 24 Sep 2012 11:15:43 -0700 Subject: [PATCH 637/651] Updating CHANGES and VERSION. --- CHANGES | 30 ++++++++++++++++++++++++++++++ VERSION | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index eee6aba604..0ab4fd0960 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,34 @@ +2.1-26 | 2012-09-23 08:46:03 -0700 + + * Add an item to FAQ page about broctl options. (Daniel Thayer) + + * Add more language tests. We now have tests of all built-in Bro + data types (including different representations of constant + values, and max./min. values), keywords, and operators (including + special properties of certain operators, such as short-circuit + evaluation and associativity). (Daniel Thayer) + + * Fix construction of ip6_ah (Authentication Header) record values. + + Authentication Headers with a Payload Len field set to zero would + cause a crash due to invalid memory allocation because the + previous code assumed Payload Len would always be great enough to + contain all mandatory fields of the header. (Jon Siwek) + + * Update compile/dependency docs for OS X. (Jon Siwek) + + * Adjusting Mac binary packaging script. Setting CMAKE_PREFIX_PATH + helps link against standard system libs instead of ones that come + from other package manager (e.g. MacPorts). (Jon Siwek) + + * Adjusting some unit tests that do cluster communication. (Jon Siwek) + + * Small change to non-blocking DNS initialization. (Jon Siwek) + + * Reorder a few statements in scan.l to make 1.5msecs etc work. + Adresses #872. (Bernhard Amann) + 2.1-6 | 2012-09-06 23:23:14 -0700 * Fixed a bug where "a -= b" (both operands are intervals) was not diff --git a/VERSION b/VERSION index d218cbd5c8..e71d828348 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1-6 +2.1-26 From 801f8d3de6c6d94083412815f27a8753eadd6c7f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 24 Sep 2012 11:44:23 -0700 Subject: [PATCH 638/651] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 44441a6c91..9d4e7c1d7b 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 44441a6c912c7c9f8d4771e042306ec5f44e461d +Subproject commit 9d4e7c1d7bba8dd53d16ff4b4076690c0af4a2f0 From 8cd85a9013ee157c3bfca29a700c6d73d29f5295 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 24 Sep 2012 11:45:18 -0700 Subject: [PATCH 639/651] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 9d4e7c1d7b..e83c5f6e02 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 9d4e7c1d7bba8dd53d16ff4b4076690c0af4a2f0 +Subproject commit e83c5f6e02d6294747941d7a09f2dc327e8ab646 From 45926e6932554e19abb0587255f938c04e776f55 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 24 Sep 2012 16:13:24 -0700 Subject: [PATCH 640/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index 2fb9ff62bf..1a7db43a8a 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 2fb9ff62bf08f78071753016863640022fbfe338 +Subproject commit 1a7db43a8a5186fa12b8b19527a971da8cc280ae From 101ba67203c4b8116ecf6c71b5d6c786c40699d8 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 24 Sep 2012 18:20:42 -0500 Subject: [PATCH 641/651] Fix race condition in language/when.bro test --- testing/btest/Baseline/language.when/out | 1 + testing/btest/language/when.bro | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/testing/btest/Baseline/language.when/out b/testing/btest/Baseline/language.when/out index 19f86f493a..3a052217ab 100644 --- a/testing/btest/Baseline/language.when/out +++ b/testing/btest/Baseline/language.when/out @@ -1 +1,2 @@ done +lookup successful diff --git a/testing/btest/language/when.bro b/testing/btest/language/when.bro index d6b08b67e1..19b7f48196 100644 --- a/testing/btest/language/when.bro +++ b/testing/btest/language/when.bro @@ -1,6 +1,9 @@ -# @TEST-EXEC: bro %INPUT >out +# @TEST-EXEC: btest-bg-run test1 bro %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: mv test1/.stdout out # @TEST-EXEC: btest-diff out +@load frameworks/communication/listen event bro_init() { @@ -9,6 +12,7 @@ event bro_init() when ( local h1name = lookup_addr(h1) ) { print "lookup successful"; + terminate(); } print "done"; } From d4b95e2bbfb68982c4f4fe99e40f3e405b82bbfe Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 25 Sep 2012 06:25:15 -0700 Subject: [PATCH 642/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- aux/btest | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/broctl b/aux/broctl index 1a7db43a8a..44afce440d 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 1a7db43a8a5186fa12b8b19527a971da8cc280ae +Subproject commit 44afce440d02e1aac4012d5b0f5a26875ae11c3e diff --git a/aux/btest b/aux/btest index e83c5f6e02..44a43e6245 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit e83c5f6e02d6294747941d7a09f2dc327e8ab646 +Subproject commit 44a43e62452302277f88e8fac08d1f979dc53f98 From 1044762dfa329b50a42972bb33d319ed3ae3091f Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 25 Sep 2012 14:53:51 -0500 Subject: [PATCH 643/651] Serialize language.when unit test with the "comm" group. Since it now loads the listen script. --- testing/btest/language/when.bro | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/btest/language/when.bro b/testing/btest/language/when.bro index 19b7f48196..84c1f06cef 100644 --- a/testing/btest/language/when.bro +++ b/testing/btest/language/when.bro @@ -1,3 +1,4 @@ +# @TEST-SERIALIZE: comm # @TEST-EXEC: btest-bg-run test1 bro %INPUT # @TEST-EXEC: btest-bg-wait 10 # @TEST-EXEC: mv test1/.stdout out From 6f45a8f4ef8e009b9fcf71df3ebf5024fd9c8544 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 25 Sep 2012 15:26:44 -0500 Subject: [PATCH 644/651] Fix parsing of integers This bug was seen on 32-bit systems, where the range of recognized values was less than the range of hexadecimal values. --- src/scan.l | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/scan.l b/src/scan.l index 377c74cc1a..3f7337ac47 100644 --- a/src/scan.l +++ b/src/scan.l @@ -437,7 +437,7 @@ F RET_CONST(new Val(false, TYPE_BOOL)) } {D} { - RET_CONST(new Val(static_cast(strtoul(yytext, (char**) NULL, 10)), + RET_CONST(new Val(static_cast(strtoull(yytext, (char**) NULL, 10)), TYPE_COUNT)) } {FLOAT} RET_CONST(new Val(atof(yytext), TYPE_DOUBLE)) From f7e55509a447bb11136abe6e7cb21cb3de1037af Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 25 Sep 2012 16:05:23 -0500 Subject: [PATCH 645/651] Uncomment some previously-broken tests Uncommented some tests that previously would cause Bro to exit with an error. --- testing/btest/Baseline/language.interval/out | 1 + testing/btest/language/interval.bro | 9 +++------ 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/testing/btest/Baseline/language.interval/out b/testing/btest/Baseline/language.interval/out index f42082ef5d..ae9ed5d74e 100644 --- a/testing/btest/Baseline/language.interval/out +++ b/testing/btest/Baseline/language.interval/out @@ -1,5 +1,6 @@ type inference (PASS) type inference (PASS) +type inference (PASS) optional space (PASS) plural/singular interval are same (PASS) different units with same numeric value (PASS) diff --git a/testing/btest/language/interval.bro b/testing/btest/language/interval.bro index 6bdbf3a8e8..66d44206d3 100644 --- a/testing/btest/language/interval.bro +++ b/testing/btest/language/interval.bro @@ -21,8 +21,7 @@ event bro_init() local in13: interval = 120sec; local in14: interval = 2min; local in15: interval = -2hr; - # TODO: this one causes bro to fail - #local in16: interval = 2.5day; + local in16: interval = 2.5day; # Constants with space and no letter "s" @@ -45,15 +44,13 @@ event bro_init() # Type inference local in41 = 2 usec; - # TODO: this one causes bro to fail - #local in42 = 2.1usec; + local in42 = 2.1usec; local in43 = 3usecs; # Type inference tests test_case( "type inference", type_name(in41) == "interval" ); - # TODO: uncomment when bug is fixed - #test_case( "type inference", type_name(in42) == "interval" ); + test_case( "type inference", type_name(in42) == "interval" ); test_case( "type inference", type_name(in43) == "interval" ); # Test various constant representations From d6f671494ef2768b45c2eaf39cae00135379a886 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 26 Sep 2012 12:14:11 -0500 Subject: [PATCH 646/651] Reliability adjustments to istate tests with network communication. --- testing/btest/istate/bro-ipv6-socket.bro | 4 ++-- testing/btest/istate/broccoli-ipv6-socket.bro | 3 ++- testing/btest/istate/broccoli-ipv6.bro | 3 ++- testing/btest/istate/broccoli-ssl.bro | 3 ++- testing/btest/istate/broccoli.bro | 3 ++- testing/btest/istate/events-ssl.bro | 4 ++-- testing/btest/istate/events.bro | 4 ++-- testing/btest/istate/sync.bro | 3 ++- 8 files changed, 16 insertions(+), 11 deletions(-) diff --git a/testing/btest/istate/bro-ipv6-socket.bro b/testing/btest/istate/bro-ipv6-socket.bro index b339bf4487..305f32caab 100644 --- a/testing/btest/istate/bro-ipv6-socket.bro +++ b/testing/btest/istate/bro-ipv6-socket.bro @@ -4,7 +4,7 @@ # # @TEST-EXEC: btest-bg-run recv bro -b ../recv.bro # @TEST-EXEC: btest-bg-run send bro -b ../send.bro -# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-bg-wait 20 # # @TEST-EXEC: btest-diff recv/.stdout # @TEST-EXEC: btest-diff send/.stdout @@ -14,7 +14,7 @@ @load base/frameworks/communication redef Communication::nodes += { - ["foo"] = [$host=[::1], $connect=T, $events=/my_event/] + ["foo"] = [$host=[::1], $connect=T, $retry=1sec, $events=/my_event/] }; global my_event: event(s: string); diff --git a/testing/btest/istate/broccoli-ipv6-socket.bro b/testing/btest/istate/broccoli-ipv6-socket.bro index 21067c1b23..be6266fdec 100644 --- a/testing/btest/istate/broccoli-ipv6-socket.bro +++ b/testing/btest/istate/broccoli-ipv6-socket.bro @@ -4,7 +4,8 @@ # @TEST-REQUIRES: ifconfig | grep -q -E "inet6 ::1|inet6 addr: ::1" # # @TEST-EXEC: btest-bg-run bro bro $DIST/aux/broccoli/test/broccoli-v6addrs.bro "Communication::listen_ipv6=T" +# @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run broccoli $BUILD/aux/broccoli/test/broccoli-v6addrs -6 ::1 -# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff bro/.stdout # @TEST-EXEC: btest-diff broccoli/.stdout diff --git a/testing/btest/istate/broccoli-ipv6.bro b/testing/btest/istate/broccoli-ipv6.bro index ba181d4987..b4fdfb5fcf 100644 --- a/testing/btest/istate/broccoli-ipv6.bro +++ b/testing/btest/istate/broccoli-ipv6.bro @@ -3,7 +3,8 @@ # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # # @TEST-EXEC: btest-bg-run bro bro $DIST/aux/broccoli/test/broccoli-v6addrs.bro +# @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run broccoli $BUILD/aux/broccoli/test/broccoli-v6addrs -# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff bro/.stdout # @TEST-EXEC: btest-diff broccoli/.stdout diff --git a/testing/btest/istate/broccoli-ssl.bro b/testing/btest/istate/broccoli-ssl.bro index 4465cd1bb3..dcbea93150 100644 --- a/testing/btest/istate/broccoli-ssl.bro +++ b/testing/btest/istate/broccoli-ssl.bro @@ -4,8 +4,9 @@ # # @TEST-EXEC: chmod 600 broccoli.conf # @TEST-EXEC: btest-bg-run bro bro $DIST/aux/broccoli/test/broccoli-v6addrs.bro "Communication::listen_ssl=T" "ssl_ca_certificate=../ca_cert.pem" "ssl_private_key=../bro.pem" +# @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run broccoli BROCCOLI_CONFIG_FILE=../broccoli.conf $BUILD/aux/broccoli/test/broccoli-v6addrs -# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff bro/.stdout # @TEST-EXEC: btest-diff broccoli/.stdout diff --git a/testing/btest/istate/broccoli.bro b/testing/btest/istate/broccoli.bro index 2bae5dc080..2fdd4cbda4 100644 --- a/testing/btest/istate/broccoli.bro +++ b/testing/btest/istate/broccoli.bro @@ -3,8 +3,9 @@ # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # # @TEST-EXEC: btest-bg-run bro bro %INPUT $DIST/aux/broccoli/test/broping-record.bro +# @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run broccoli $BUILD/aux/broccoli/test/broping -r -c 3 127.0.0.1 -# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: cat bro/ping.log | sed 's/one-way.*//g' >bro.log # @TEST-EXEC: cat broccoli/.stdout | sed 's/time=.*//g' >broccoli.log # @TEST-EXEC: btest-diff bro.log diff --git a/testing/btest/istate/events-ssl.bro b/testing/btest/istate/events-ssl.bro index e09bf112fd..1d285869b4 100644 --- a/testing/btest/istate/events-ssl.bro +++ b/testing/btest/istate/events-ssl.bro @@ -2,7 +2,7 @@ # # @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro -# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-bg-wait 20 # # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log @@ -55,7 +55,7 @@ event bro_init() redef peer_description = "events-rcv"; redef Communication::nodes += { - ["foo"] = [$host = 127.0.0.1, $events = /http_.*|signature_match/, $connect=T, $ssl=T] + ["foo"] = [$host = 127.0.0.1, $events = /http_.*|signature_match/, $connect=T, $ssl=T, $retry=1sec] }; redef ssl_ca_certificate = "../ca_cert.pem"; diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index 70726a9f20..590aabcd23 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -2,7 +2,7 @@ # # @TEST-EXEC: btest-bg-run sender bro -Bthreading,logging,comm -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro -Bthreading,logging,comm ../receiver.bro -# @TEST-EXEC: btest-bg-wait -k 20 +# @TEST-EXEC: btest-bg-wait 20 # # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log @@ -50,7 +50,7 @@ event bro_init() redef peer_description = "events-rcv"; redef Communication::nodes += { - ["foo"] = [$host = 127.0.0.1, $events = /http_.*|signature_match/, $connect=T] + ["foo"] = [$host = 127.0.0.1, $events = /http_.*|signature_match/, $connect=T, $retry=1sec] }; event remote_connection_closed(p: event_peer) diff --git a/testing/btest/istate/sync.bro b/testing/btest/istate/sync.bro index 776ddfd2fa..e1364a9553 100644 --- a/testing/btest/istate/sync.bro +++ b/testing/btest/istate/sync.bro @@ -154,7 +154,8 @@ event bro_init() } redef Communication::nodes += { - ["foo"] = [$host = 127.0.0.1, $events = /.*/, $connect=T, $sync=T] + ["foo"] = [$host = 127.0.0.1, $events = /.*/, $connect=T, $sync=T, + $retry=1sec] }; event remote_connection_closed(p: event_peer) From 5593f339bdd4dfd9e35c24ededd1b4457350c7e5 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 26 Sep 2012 13:09:54 -0500 Subject: [PATCH 647/651] Remove unused reserved keyword "this" Removed unused reserved keyword "this" (a script using it would cause Bro to segfault). --- src/parse.y | 9 +-------- src/scan.l | 1 - 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/parse.y b/src/parse.y index 75e09dc60f..27af150254 100644 --- a/src/parse.y +++ b/src/parse.y @@ -14,7 +14,7 @@ %token TOK_NEXT TOK_OF TOK_PATTERN TOK_PATTERN_TEXT %token TOK_PORT TOK_PRINT TOK_RECORD TOK_REDEF %token TOK_REMOVE_FROM TOK_RETURN TOK_SCHEDULE TOK_SET -%token TOK_STRING TOK_SUBNET TOK_SWITCH TOK_TABLE TOK_THIS +%token TOK_STRING TOK_SUBNET TOK_SWITCH TOK_TABLE %token TOK_TIME TOK_TIMEOUT TOK_TIMER TOK_TYPE TOK_UNION TOK_VECTOR TOK_WHEN %token TOK_ATTR_ADD_FUNC TOK_ATTR_ATTR TOK_ATTR_ENCRYPT TOK_ATTR_DEFAULT @@ -118,7 +118,6 @@ extern const char* g_curr_debug_error; #define YYLTYPE yyltype -Expr* bro_this = 0; int in_init = 0; int in_record = 0; bool resolving_global_ID = false; @@ -584,12 +583,6 @@ expr: $$ = new ConstExpr(new PatternVal($1)); } - | TOK_THIS - { - set_location(@1); - $$ = bro_this->Ref(); - } - | '|' expr '|' { set_location(@1, @3); diff --git a/src/scan.l b/src/scan.l index 3f7337ac47..d213b60012 100644 --- a/src/scan.l +++ b/src/scan.l @@ -306,7 +306,6 @@ string return TOK_STRING; subnet return TOK_SUBNET; switch return TOK_SWITCH; table return TOK_TABLE; -this return TOK_THIS; time return TOK_TIME; timeout return TOK_TIMEOUT; timer return TOK_TIMER; From f00a7c3ee401405559d13a0597011cf1a1edaa7e Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 26 Sep 2012 14:20:30 -0500 Subject: [PATCH 648/651] Remove deprecated built-in functions --- src/bro.bif | 78 ------------------------------------------------- src/strings.bif | 9 ------ 2 files changed, 87 deletions(-) diff --git a/src/bro.bif b/src/bro.bif index bc791d6858..3cac8c8da5 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -5683,12 +5683,6 @@ function match_signatures%(c: connection, pattern_type: int, s: string, # # =========================================================================== -## Deprecated. Will be removed. -function parse_dotted_addr%(s: string%): addr - %{ - IPAddr a(s->CheckString()); - return new AddrVal(a); - %} %%{ @@ -5788,75 +5782,3 @@ function anonymize_addr%(a: addr, cl: IPAddrAnonymizationClass%): addr } %} -## Deprecated. Will be removed. -function dump_config%(%) : bool - %{ - return new Val(persistence_serializer->WriteConfig(true), TYPE_BOOL); - %} - -## Deprecated. Will be removed. -function make_connection_persistent%(c: connection%) : any - %{ - c->MakePersistent(); - return 0; - %} - -%%{ -// Experimental code to add support for IDMEF XML output based on -// notices. For now, we're implementing it as a builtin you can call on an -// notices record. - -#ifdef USE_IDMEF -extern "C" { -#include -} -#endif - -#include - -char* port_to_string(PortVal* port) - { - char buf[256]; // to hold sprintf results on port numbers - snprintf(buf, sizeof(buf), "%u", port->Port()); - return copy_string(buf); - } - -%%} - -## Deprecated. Will be removed. -function generate_idmef%(src_ip: addr, src_port: port, - dst_ip: addr, dst_port: port%) : bool - %{ -#ifdef USE_IDMEF - xmlNodePtr message = - newIDMEF_Message(newAttribute("version","1.0"), - newAlert(newCreateTime(NULL), - newSource( - newNode(newAddress( - newAttribute("category","ipv4-addr"), - newSimpleElement("address", - copy_string(src_ip->AsAddr().AsString().c_str())), - NULL), NULL), - newService( - newSimpleElement("port", - port_to_string(src_port)), - NULL), NULL), - newTarget( - newNode(newAddress( - newAttribute("category","ipv4-addr"), - newSimpleElement("address", - copy_string(dst_ip->AsAddr().AsString().c_str())), - NULL), NULL), - newService( - newSimpleElement("port", - port_to_string(dst_port)), - NULL), NULL), NULL), NULL); - - // if ( validateCurrentDoc() ) - printCurrentMessage(stderr); - return new Val(1, TYPE_BOOL); -#else - builtin_error("Bro was not configured for IDMEF support"); - return new Val(0, TYPE_BOOL); -#endif - %} diff --git a/src/strings.bif b/src/strings.bif index 22e29950ee..43dee25c1b 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -552,15 +552,6 @@ function split_n%(str: string, re: pattern, return do_split(str, re, 0, incl_sep, max_num_sep); %} -## Deprecated. Will be removed. -# Reason: the parameter ``other`` does nothing. -function split_complete%(str: string, - re: pattern, other: string_set, - incl_sep: bool, max_num_sep: count%): string_array - %{ - return do_split(str, re, other->AsTableVal(), incl_sep, max_num_sep); - %} - ## Substitutes a given replacement string for the first occurrence of a pattern ## in a given string. ## From 72f16f26426ac34b7cf452c1a65f13fd5651491a Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 26 Sep 2012 15:20:54 -0500 Subject: [PATCH 649/651] Remove unused argument of helper function Removed an unused argument of the "do_split" helper function. The unused argument was previously used by a now-removed BIF. --- src/strings.bif | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/src/strings.bif b/src/strings.bif index 43dee25c1b..dc5e064dc6 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -311,15 +311,9 @@ static int match_prefix(int s_len, const char* s, int t_len, const char* t) return 1; } -Val* do_split(StringVal* str_val, RE_Matcher* re, TableVal* other_sep, - int incl_sep, int max_num_sep) +Val* do_split(StringVal* str_val, RE_Matcher* re, int incl_sep, int max_num_sep) { TableVal* a = new TableVal(string_array); - ListVal* other_strings = 0; - - if ( other_sep && other_sep->Size() > 0 ) - other_strings = other_sep->ConvertToPureList(); - const u_char* s = str_val->Bytes(); int n = str_val->Len(); const u_char* end_of_s = s + n; @@ -373,9 +367,6 @@ Val* do_split(StringVal* str_val, RE_Matcher* re, TableVal* other_sep, reporter->InternalError("RegMatch in split goes beyond the string"); } - if ( other_strings ) - delete other_strings; - return a; } @@ -483,7 +474,7 @@ Val* do_sub(StringVal* str_val, RE_Matcher* re, StringVal* repl, int do_all) ## function split%(str: string, re: pattern%): string_array %{ - return do_split(str, re, 0, 0, 0); + return do_split(str, re, 0, 0); %} ## Splits a string *once* into a two-element array of strings according to a @@ -503,7 +494,7 @@ function split%(str: string, re: pattern%): string_array ## .. bro:see:: split split_all split_n str_split function split1%(str: string, re: pattern%): string_array %{ - return do_split(str, re, 0, 0, 1); + return do_split(str, re, 0, 1); %} ## Splits a string into an array of strings according to a pattern. This @@ -523,7 +514,7 @@ function split1%(str: string, re: pattern%): string_array ## .. bro:see:: split split1 split_n str_split function split_all%(str: string, re: pattern%): string_array %{ - return do_split(str, re, 0, 1, 0); + return do_split(str, re, 1, 0); %} ## Splits a string a given number of times into an array of strings according @@ -549,7 +540,7 @@ function split_all%(str: string, re: pattern%): string_array function split_n%(str: string, re: pattern, incl_sep: bool, max_num_sep: count%): string_array %{ - return do_split(str, re, 0, incl_sep, max_num_sep); + return do_split(str, re, incl_sep, max_num_sep); %} ## Substitutes a given replacement string for the first occurrence of a pattern From 254715eaaa30d4888511cbfc1ee81fc2f9c2d2bf Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 26 Sep 2012 16:47:51 -0500 Subject: [PATCH 650/651] Remove deprecated attribute &disable_print_hook --- doc/ext/bro_lexer/bro.py | 2 +- doc/ext/bro_lexer/bro.pyc | Bin 2702 -> 2585 bytes doc/scripts/builtins.rst | 4 ---- src/Attr.cc | 7 +------ src/Attr.h | 1 - src/File.cc | 3 --- src/File.h | 2 +- src/bro.bif | 4 ++-- src/parse.y | 6 ++---- src/scan.l | 1 - 10 files changed, 7 insertions(+), 23 deletions(-) diff --git a/doc/ext/bro_lexer/bro.py b/doc/ext/bro_lexer/bro.py index 8cb4475f3b..ae2566a8de 100644 --- a/doc/ext/bro_lexer/bro.py +++ b/doc/ext/bro_lexer/bro.py @@ -29,7 +29,7 @@ class BroLexer(RegexLexer): r'|vector)\b', Keyword.Type), (r'(T|F)\b', Keyword.Constant), (r'(&)((?:add|delete|expire)_func|attr|(create|read|write)_expire' - r'|default|disable_print_hook|raw_output|encrypt|group|log' + r'|default|raw_output|encrypt|group|log' r'|mergeable|optional|persistent|priority|redef' r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation, Keyword)), diff --git a/doc/ext/bro_lexer/bro.pyc b/doc/ext/bro_lexer/bro.pyc index 6471e1528d8d02296dbdedc0548e86cd80a3c439..c7b4fde790bb48f424f4a4bedbc75e693112baf0 100644 GIT binary patch delta 46 ycmeAZohib>{F#?)VM%hpMvjLpj5{a4V)5GS&ib60g^huMVRAC(6$m4TO9=p8_YF({ delta 163 zcmbO!(kIHn{F#^QMC2U5jT{eI7|%?8#o{HDl3ARXl#?1?P?VWh5}%QupS{_b^#!x3 zIRgWOerR!OQL%njab|gHwthfSepYI7NwI!XQNDh8dAWXZa#3ahgdd-iT9I0$KUtph L3Yj{;b1DG<_P0C1 diff --git a/doc/scripts/builtins.rst b/doc/scripts/builtins.rst index 0501067409..d274de6b7b 100644 --- a/doc/scripts/builtins.rst +++ b/doc/scripts/builtins.rst @@ -600,10 +600,6 @@ scripting language supports the following built-in attributes. .. TODO: needs to be documented. -.. bro:attr:: &disable_print_hook - - Deprecated. Will be removed. - .. bro:attr:: &raw_output Opens a file in raw mode, i.e., non-ASCII characters are not diff --git a/src/Attr.cc b/src/Attr.cc index 2e4e090c0b..bdf247b4f5 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -15,7 +15,7 @@ const char* attr_name(attr_tag t) "&add_func", "&delete_func", "&expire_func", "&read_expire", "&write_expire", "&create_expire", "&persistent", "&synchronized", "&postprocessor", - "&encrypt", "&match", "&disable_print_hook", + "&encrypt", "&match", "&raw_output", "&mergeable", "&priority", "&group", "&log", "&error_handler", "&type_column", "(&tracked)", @@ -385,11 +385,6 @@ void Attributes::CheckAttr(Attr* a) // FIXME: Check here for global ID? break; - case ATTR_DISABLE_PRINT_HOOK: - if ( type->Tag() != TYPE_FILE ) - Error("&disable_print_hook only applicable to files"); - break; - case ATTR_RAW_OUTPUT: if ( type->Tag() != TYPE_FILE ) Error("&raw_output only applicable to files"); diff --git a/src/Attr.h b/src/Attr.h index e6b09cf96b..c9a0dedb33 100644 --- a/src/Attr.h +++ b/src/Attr.h @@ -28,7 +28,6 @@ typedef enum { ATTR_POSTPROCESSOR, ATTR_ENCRYPT, ATTR_MATCH, - ATTR_DISABLE_PRINT_HOOK, ATTR_RAW_OUTPUT, ATTR_MERGEABLE, ATTR_PRIORITY, diff --git a/src/File.cc b/src/File.cc index 3b9f3be33b..880fd254ef 100644 --- a/src/File.cc +++ b/src/File.cc @@ -514,9 +514,6 @@ void BroFile::SetAttrs(Attributes* arg_attrs) InitEncrypt(log_encryption_key->AsString()->CheckString()); } - if ( attrs->FindAttr(ATTR_DISABLE_PRINT_HOOK) ) - DisablePrintHook(); - if ( attrs->FindAttr(ATTR_RAW_OUTPUT) ) EnableRawOutput(); diff --git a/src/File.h b/src/File.h index 37f844867b..8e3d0ca6e7 100644 --- a/src/File.h +++ b/src/File.h @@ -57,7 +57,7 @@ public: RecordVal* Rotate(); // Set &rotate_interval, &rotate_size, &postprocessor, - // &disable_print_hook, and &raw_output attributes. + // and &raw_output attributes. void SetAttrs(Attributes* attrs); // Returns the current size of the file, after fresh stat'ing. diff --git a/src/bro.bif b/src/bro.bif index 3cac8c8da5..8ddde6ef86 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -4858,7 +4858,7 @@ function file_size%(f: string%) : double %} ## Disables sending :bro:id:`print_hook` events to remote peers for a given -## file. This function is equivalent to :bro:attr:`&disable_print_hook`. In a +## file. In a ## distributed setup, communicating Bro instances generate the event ## :bro:id:`print_hook` for each print statement and send it to the remote ## side. When disabled for a particular file, these events will not be @@ -4874,7 +4874,7 @@ function disable_print_hook%(f: file%): any %} ## Prevents escaping of non-ASCII characters when writing to a file. -## This function is equivalent to :bro:attr:`&disable_print_hook`. +## This function is equivalent to :bro:attr:`&raw_output`. ## ## f: The file to disable raw output for. ## diff --git a/src/parse.y b/src/parse.y index 27af150254..c1f6ddd96e 100644 --- a/src/parse.y +++ b/src/parse.y @@ -2,7 +2,7 @@ // See the file "COPYING" in the main distribution directory for copyright. %} -%expect 90 +%expect 87 %token TOK_ADD TOK_ADD_TO TOK_ADDR TOK_ANY %token TOK_ATENDIF TOK_ATELSE TOK_ATIF TOK_ATIFDEF TOK_ATIFNDEF @@ -22,7 +22,7 @@ %token TOK_ATTR_ROTATE_SIZE TOK_ATTR_DEL_FUNC TOK_ATTR_EXPIRE_FUNC %token TOK_ATTR_EXPIRE_CREATE TOK_ATTR_EXPIRE_READ TOK_ATTR_EXPIRE_WRITE %token TOK_ATTR_PERSISTENT TOK_ATTR_SYNCHRONIZED -%token TOK_ATTR_DISABLE_PRINT_HOOK TOK_ATTR_RAW_OUTPUT TOK_ATTR_MERGEABLE +%token TOK_ATTR_RAW_OUTPUT TOK_ATTR_MERGEABLE %token TOK_ATTR_PRIORITY TOK_ATTR_GROUP TOK_ATTR_LOG TOK_ATTR_ERROR_HANDLER %token TOK_ATTR_TYPE_COLUMN @@ -1290,8 +1290,6 @@ attr: { $$ = new Attr(ATTR_ENCRYPT); } | TOK_ATTR_ENCRYPT '=' expr { $$ = new Attr(ATTR_ENCRYPT, $3); } - | TOK_ATTR_DISABLE_PRINT_HOOK - { $$ = new Attr(ATTR_DISABLE_PRINT_HOOK); } | TOK_ATTR_RAW_OUTPUT { $$ = new Attr(ATTR_RAW_OUTPUT); } | TOK_ATTR_MERGEABLE diff --git a/src/scan.l b/src/scan.l index d213b60012..6c87766781 100644 --- a/src/scan.l +++ b/src/scan.l @@ -319,7 +319,6 @@ when return TOK_WHEN; &create_expire return TOK_ATTR_EXPIRE_CREATE; &default return TOK_ATTR_DEFAULT; &delete_func return TOK_ATTR_DEL_FUNC; -&disable_print_hook return TOK_ATTR_DISABLE_PRINT_HOOK; &raw_output return TOK_ATTR_RAW_OUTPUT; &encrypt return TOK_ATTR_ENCRYPT; &error_handler return TOK_ATTR_ERROR_HANDLER; From 474ab86b9c6d6d02850c032d451d2cf6c95c8280 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sat, 29 Sep 2012 14:44:58 -0700 Subject: [PATCH 651/651] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index 44afce440d..b0e3c0d846 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 44afce440d02e1aac4012d5b0f5a26875ae11c3e +Subproject commit b0e3c0d84643878c135dcb8a9774ed78147dd648