From 93d9dde969a9bc3983b4bcb656970453730ae3eb Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 10 Dec 2013 16:26:34 -0800 Subject: [PATCH 001/106] IOSource reorg. A bunch of infrastructure work to move IOSource, IOSourceRegistry (now iosource::Manager) and PktSrc/PktDumper code into iosource/, and over to a plugin structure. Other IOSources aren't touched yet, they are still in src/*. It compiles and does something with a small trace, but that's all I've tested so far. There are quite certainly a number of problems left, as well as various TODOs and cleanup; and nothing's cast in stone yet. Will continue to work on this. --- TODO.iosources | 9 + aux/broccoli | 2 +- aux/broctl | 2 +- src/CMakeLists.txt | 7 +- src/DNS_Mgr.cc | 13 +- src/DNS_Mgr.h | 6 +- src/DebugLogger.cc | 3 +- src/DebugLogger.h | 1 + src/FlowSrc.cc | 15 +- src/FlowSrc.h | 4 +- src/Net.cc | 119 +-- src/Net.h | 22 +- src/PacketSort.cc | 2 +- src/PacketSort.h | 8 +- src/PktSrc.cc | 796 ------------------ src/PktSrc.h | 258 ------ src/RemoteSerializer.cc | 34 +- src/RemoteSerializer.h | 4 +- src/Serializer.cc | 19 +- src/Serializer.h | 4 +- src/Sessions.cc | 24 +- src/Sessions.h | 4 +- src/bro.bif | 73 +- src/iosource/CMakeLists.txt | 23 + src/iosource/Component.cc | 44 + src/iosource/Component.h | 56 ++ src/{ => iosource}/IOSource.h | 76 +- src/{IOSource.cc => iosource/Manager.cc} | 151 +++- src/iosource/Manager.h | 75 ++ src/iosource/pktsrc/CMakeLists.txt | 2 + src/iosource/pktsrc/Component.cc | 130 +++ src/iosource/pktsrc/Component.h | 132 +++ src/iosource/pktsrc/PktDumper.cc | 79 ++ src/iosource/pktsrc/PktDumper.h | 57 ++ src/iosource/pktsrc/PktSrc.cc | 411 +++++++++ src/iosource/pktsrc/PktSrc.h | 140 +++ src/iosource/pktsrc/old-2ndary-code.h | 69 ++ src/{ => iosource/pktsrc/pcap}/BPF_Program.cc | 0 src/{ => iosource/pktsrc/pcap}/BPF_Program.h | 0 src/iosource/pktsrc/pcap/CMakeLists.txt | 8 + src/iosource/pktsrc/pcap/Dumper.cc | 111 +++ src/iosource/pktsrc/pcap/Dumper.h | 40 + src/iosource/pktsrc/pcap/Plugin.cc | 12 + src/iosource/pktsrc/pcap/Source.cc | 343 ++++++++ src/iosource/pktsrc/pcap/Source.h | 60 ++ src/main.cc | 16 +- src/plugin/Component.cc | 12 + src/plugin/Component.h | 7 +- src/plugin/Macros.h | 27 + src/threading/Manager.cc | 8 +- src/threading/Manager.h | 4 +- src/util.cc | 7 +- 52 files changed, 2223 insertions(+), 1306 deletions(-) create mode 100644 TODO.iosources delete mode 100644 src/PktSrc.cc delete mode 100644 src/PktSrc.h create mode 100644 src/iosource/CMakeLists.txt create mode 100644 src/iosource/Component.cc create mode 100644 src/iosource/Component.h rename src/{ => iosource}/IOSource.h (51%) rename src/{IOSource.cc => iosource/Manager.cc} (52%) create mode 100644 src/iosource/Manager.h create mode 100644 src/iosource/pktsrc/CMakeLists.txt create mode 100644 src/iosource/pktsrc/Component.cc create mode 100644 src/iosource/pktsrc/Component.h create mode 100644 src/iosource/pktsrc/PktDumper.cc create mode 100644 src/iosource/pktsrc/PktDumper.h create mode 100644 src/iosource/pktsrc/PktSrc.cc create mode 100644 src/iosource/pktsrc/PktSrc.h create mode 100644 src/iosource/pktsrc/old-2ndary-code.h rename src/{ => iosource/pktsrc/pcap}/BPF_Program.cc (100%) rename src/{ => iosource/pktsrc/pcap}/BPF_Program.h (100%) create mode 100644 src/iosource/pktsrc/pcap/CMakeLists.txt create mode 100644 src/iosource/pktsrc/pcap/Dumper.cc create mode 100644 src/iosource/pktsrc/pcap/Dumper.h create mode 100644 src/iosource/pktsrc/pcap/Plugin.cc create mode 100644 src/iosource/pktsrc/pcap/Source.cc create mode 100644 src/iosource/pktsrc/pcap/Source.h diff --git a/TODO.iosources b/TODO.iosources new file mode 100644 index 0000000000..7380c89b92 --- /dev/null +++ b/TODO.iosources @@ -0,0 +1,9 @@ +- Move the current_{iosrc,pkt_src,etc.} into manager +- Remove all 2ndary path code +- Remove all flow src code. +- Move pktsrc/*.{h,cc} up a level? Or create a subsublibrary there? +- Create a global Packet data structure and pass that around instead + of the pcap_* stuff? +- PktDumper: Move Dump() to public and remove Record() +- Wrap BPF_Program into namespace and clean up +- Tests, in particular the packet dumping needs testing. diff --git a/aux/broccoli b/aux/broccoli index e02ccc0a27..17ec437752 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit e02ccc0a27e64b147f01e4c7deb5b897864d59d5 +Subproject commit 17ec437752837fb4214abfb0a2da49df74668d5d diff --git a/aux/broctl b/aux/broctl index 2e07720b4f..6e01d6972f 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 2e07720b4f129802e07ca99498e2aff4542c737a +Subproject commit 6e01d6972f02d68ee82d05f392d1a00725595b7f diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 8e22b504e4..1aede44934 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -151,10 +151,12 @@ list(APPEND BINPAC_OUTPUTS "${BINPAC_OUTPUT_CC}") set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE) set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE) +add_subdirectory(iosource) add_subdirectory(analyzer) add_subdirectory(file_analysis) add_subdirectory(probabilistic) add_subdirectory(broxygen) +add_subdirectory(iosource) set(bro_SUBDIRS ${bro_SUBDIR_LIBS} @@ -249,7 +251,8 @@ set(bro_SRCS Anon.cc Attr.cc Base64.cc - BPF_Program.cc + BroDoc.cc + BroDocObj.cc Brofiler.cc BroString.cc CCL.cc @@ -281,7 +284,6 @@ set(bro_SRCS Hash.cc ID.cc IntSet.cc - IOSource.cc IP.cc IPAddr.cc List.cc @@ -295,7 +297,6 @@ set(bro_SRCS PacketFilter.cc PacketSort.cc PersistenceSerializer.cc - PktSrc.cc PolicyFile.cc PrefixTable.cc PriorityQueue.cc diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index 17409a930b..4d96e21a3e 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -34,6 +34,7 @@ #include "Net.h" #include "Var.h" #include "Reporter.h" +#include "iosource/Manager.h" extern "C" { extern int select(int, fd_set *, fd_set *, fd_set *, struct timeval *); @@ -404,17 +405,17 @@ DNS_Mgr::~DNS_Mgr() delete [] dir; } -bool DNS_Mgr::Init() +void DNS_Mgr::Init() { if ( did_init ) - return true; + return; const char* cache_dir = dir ? dir : "."; if ( mode == DNS_PRIME && ! ensure_dir(cache_dir) ) { did_init = 0; - return false; + return; } cache_name = new char[strlen(cache_dir) + 64]; @@ -433,14 +434,12 @@ bool DNS_Mgr::Init() did_init = 1; - io_sources.Register(this, true); + iosource_mgr->Register(this, true); // We never set idle to false, having the main loop only calling us from // time to time. If we're issuing more DNS requests than we can handle // in this way, we are having problems anyway ... - idle = true; - - return true; + SetIdle(true); } TableVal* DNS_Mgr::LookupHost(const char* name) diff --git a/src/DNS_Mgr.h b/src/DNS_Mgr.h index bfcc70a5c2..069c7e1a2b 100644 --- a/src/DNS_Mgr.h +++ b/src/DNS_Mgr.h @@ -12,7 +12,7 @@ #include "BroList.h" #include "Dict.h" #include "EventHandler.h" -#include "IOSource.h" +#include "iosource/IOSource.h" #include "IPAddr.h" class Val; @@ -40,12 +40,12 @@ enum DNS_MgrMode { // Number of seconds we'll wait for a reply. #define DNS_TIMEOUT 5 -class DNS_Mgr : public IOSource { +class DNS_Mgr : public iosource::IOSource { public: DNS_Mgr(DNS_MgrMode mode); virtual ~DNS_Mgr(); - bool Init(); + void Init(); void Flush(); // Looks up the address or addresses of the given host, and returns diff --git a/src/DebugLogger.cc b/src/DebugLogger.cc index 78377eafcf..05f0f9e89f 100644 --- a/src/DebugLogger.cc +++ b/src/DebugLogger.cc @@ -17,7 +17,8 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = { { "dpd", 0, false }, { "tm", 0, false }, { "logging", 0, false }, {"input", 0, false }, { "threading", 0, false }, { "file_analysis", 0, false }, - { "plugins", 0, false }, { "broxygen", 0, false } + { "plugins", 0, false }, { "broxygen", 0, false }, + { "pktio", 0, false} }; DebugLogger::DebugLogger(const char* filename) diff --git a/src/DebugLogger.h b/src/DebugLogger.h index d1f053788e..b098430a9a 100644 --- a/src/DebugLogger.h +++ b/src/DebugLogger.h @@ -29,6 +29,7 @@ enum DebugStream { DBG_FILE_ANALYSIS, // File analysis DBG_PLUGINS, DBG_BROXYGEN, + DBG_PKTIO, // Packet sources and dumpers. NUM_DBGS // Has to be last }; diff --git a/src/FlowSrc.cc b/src/FlowSrc.cc index 32aa4c4e3a..fb9676ab5b 100644 --- a/src/FlowSrc.cc +++ b/src/FlowSrc.cc @@ -15,7 +15,6 @@ FlowSrc::FlowSrc() { // TODO: v9. selectable_fd = -1; - idle = false; data = 0; pdu_len = -1; exporter_ip = 0; @@ -80,7 +79,7 @@ int FlowSocketSrc::ExtractNextPDU() reporter->Error("problem reading NetFlow data from socket"); data = 0; next_timestamp = -1.0; - closed = 1; + SetClosed(true); return 0; } @@ -115,7 +114,7 @@ FlowSocketSrc::FlowSocketSrc(const char* listen_parms) snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, "parsing your listen-spec went nuts: laddr='%s', port='%s'\n", laddr[0] ? laddr : "", port[0] ? port : ""); - closed = 1; + SetClosed(true); return; } @@ -131,7 +130,7 @@ FlowSocketSrc::FlowSocketSrc(const char* listen_parms) snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, "getaddrinfo(%s, %s, ...): %s", laddr, port, gai_strerror(ret)); - closed = 1; + SetClosed(true); return; } @@ -139,7 +138,7 @@ FlowSocketSrc::FlowSocketSrc(const char* listen_parms) { snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, "socket: %s", strerror(errno)); - closed = 1; + SetClosed(true); goto cleanup; } @@ -147,7 +146,7 @@ FlowSocketSrc::FlowSocketSrc(const char* listen_parms) { snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, "bind: %s", strerror(errno)); - closed = 1; + SetClosed(true); goto cleanup; } @@ -211,7 +210,7 @@ FlowFileSrc::FlowFileSrc(const char* readfile) selectable_fd = open(this->readfile, O_RDONLY); if ( selectable_fd < 0 ) { - closed = 1; + SetClosed(true); snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, "open: %s", strerror(errno)); } @@ -223,6 +222,6 @@ int FlowFileSrc::Error(int errlvl, const char* errmsg) "%s: %s", errmsg, strerror(errlvl)); data = 0; next_timestamp = -1.0; - closed = 1; + SetClosed(true); return 0; } diff --git a/src/FlowSrc.h b/src/FlowSrc.h index 03dda2761d..71c8b0cd11 100644 --- a/src/FlowSrc.h +++ b/src/FlowSrc.h @@ -5,7 +5,7 @@ #ifndef flowsrc_h #define flowsrc_h -#include "IOSource.h" +#include "iosource/IOSource.h" #include "NetVar.h" #include "binpac.h" @@ -28,7 +28,7 @@ namespace binpac { } } -class FlowSrc : public IOSource { +class FlowSrc : public iosource::IOSource { public: virtual ~FlowSrc(); diff --git a/src/Net.cc b/src/Net.cc index ac4dacf9b8..64baeff4aa 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -30,6 +30,9 @@ #include "PacketSort.h" #include "Serializer.h" #include "PacketDumper.h" +#include "iosource/Manager.h" +#include "iosource/pktsrc/PktSrc.h" +#include "iosource/pktsrc/PktDumper.h" extern "C" { #include "setsignal.h" @@ -39,10 +42,7 @@ extern "C" { extern int select(int, fd_set *, fd_set *, fd_set *, struct timeval *); } -PList(PktSrc) pkt_srcs; - -// FIXME: We should really merge PktDumper and PacketDumper. -PktDumper* pkt_dumper = 0; +iosource::PktDumper* pkt_dumper = 0; int reading_live = 0; int reading_traces = 0; @@ -65,8 +65,8 @@ const u_char* current_pkt = 0; int current_dispatched = 0; int current_hdr_size = 0; double current_timestamp = 0.0; -PktSrc* current_pktsrc = 0; -IOSource* current_iosrc; +iosource::PktSrc* current_pktsrc = 0; +iosource::IOSource* current_iosrc = 0; std::list files_scanned; std::vector sig_files; @@ -115,8 +115,8 @@ RETSIGTYPE watchdog(int /* signo */) // saving the packet which caused the // watchdog to trigger may be helpful, // so we'll save that one nevertheless. - pkt_dumper = new PktDumper("watchdog-pkt.pcap"); - if ( pkt_dumper->IsError() ) + pkt_dumper = iosource_mgr->OpenPktDumper("watchdog-pkt.pcap", false); + if ( ! pkt_dumper || pkt_dumper->IsError() ) { reporter->Error("watchdog: can't open watchdog-pkt.pcap for writing\n"); delete pkt_dumper; @@ -125,7 +125,12 @@ RETSIGTYPE watchdog(int /* signo */) } if ( pkt_dumper ) - pkt_dumper->Dump(current_hdr, current_pkt); + { + iosource::PktDumper::Packet p; + p.hdr = current_hdr; + p.data = current_pkt; + pkt_dumper->Record(&p); + } } net_get_final_stats(); @@ -157,18 +162,10 @@ void net_init(name_list& interfaces, name_list& readfiles, reading_traces = 1; for ( int i = 0; i < readfiles.length(); ++i ) - { - PktFileSrc* ps = new PktFileSrc(readfiles[i], filter); - - if ( ! ps->IsOpen() ) - reporter->FatalError("%s: problem with trace file %s - %s\n", - prog, readfiles[i], ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } + iosource_mgr->OpenPktSrc(readfiles[i], filter, false); + } +#if 0 if ( secondary_filter ) { // We use a second PktFileSrc for the @@ -189,7 +186,6 @@ void net_init(name_list& interfaces, name_list& readfiles, ps->AddSecondaryTablePrograms(); } - } for ( int i = 0; i < flowfiles.length(); ++i ) { @@ -203,7 +199,7 @@ void net_init(name_list& interfaces, name_list& readfiles, io_sources.Register(fs); } } - } +#endif else if ((interfaces.length() > 0 || netflows.length() > 0)) { @@ -211,22 +207,13 @@ void net_init(name_list& interfaces, name_list& readfiles, reading_traces = 0; for ( int i = 0; i < interfaces.length(); ++i ) - { - PktSrc* ps; - ps = new PktInterfaceSrc(interfaces[i], filter); - - if ( ! ps->IsOpen() ) - reporter->FatalError("%s: problem with interface %s - %s\n", - prog, interfaces[i], ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } + iosource_mgr->OpenPktSrc(interfaces[i], filter, true); + } +#if 0 if ( secondary_filter ) { - PktSrc* ps; + iosource::PktSrc* ps; ps = new PktInterfaceSrc(interfaces[i], filter, TYPE_FILTER_SECONDARY); @@ -258,8 +245,7 @@ void net_init(name_list& interfaces, name_list& readfiles, else io_sources.Register(fs); } - - } +#endif else // have_pending_timers = 1, possibly. We don't set @@ -270,12 +256,7 @@ void net_init(name_list& interfaces, name_list& readfiles, if ( writefile ) { - // ### This will fail horribly if there are multiple - // interfaces with different-lengthed media. - pkt_dumper = new PktDumper(writefile); - if ( pkt_dumper->IsError() ) - reporter->FatalError("%s: can't open write file \"%s\" - %s\n", - prog, writefile, pkt_dumper->ErrorMsg()); + pkt_dumper = iosource_mgr->OpenPktDumper(writefile, false); ID* id = global_scope()->Lookup("trace_output_file"); if ( ! id ) @@ -299,7 +280,7 @@ void net_init(name_list& interfaces, name_list& readfiles, } } -void expire_timers(PktSrc* src_ps) +void expire_timers(iosource::PktSrc* src_ps) { SegmentProfiler(segment_logger, "expiring-timers"); TimerMgr* tmgr = @@ -313,7 +294,7 @@ void expire_timers(PktSrc* src_ps) void net_packet_dispatch(double t, const struct pcap_pkthdr* hdr, const u_char* pkt, int hdr_size, - PktSrc* src_ps, PacketSortElement* pkt_elem) + iosource::PktSrc* src_ps, PacketSortElement* pkt_elem) { if ( ! bro_start_network_time ) bro_start_network_time = t; @@ -394,7 +375,7 @@ int process_packet_sorter(double latest_packet_time) void net_packet_arrival(double t, const struct pcap_pkthdr* hdr, const u_char* pkt, int hdr_size, - PktSrc* src_ps) + iosource::PktSrc* src_ps) { if ( packet_sorter ) { @@ -421,12 +402,12 @@ void net_run() { set_processing_status("RUNNING", "net_run"); - while ( io_sources.Size() || + while ( iosource_mgr->Size() || (packet_sorter && ! packet_sorter->Empty()) || (BifConst::exit_only_after_terminate && ! terminating) ) { double ts; - IOSource* src = io_sources.FindSoonest(&ts); + iosource::IOSource* src = iosource_mgr->FindSoonest(&ts); #ifdef DEBUG static int loop_counter = 0; @@ -535,16 +516,19 @@ void net_run() void net_get_final_stats() { - loop_over_list(pkt_srcs, i) + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) { - PktSrc* ps = pkt_srcs[i]; + iosource::PktSrc* ps = *i; if ( ps->IsLive() ) { - struct PktSrc::Stats s; + iosource::PktSrc::Stats s; ps->Statistics(&s); reporter->Info("%d packets received on interface %s, %d dropped\n", - s.received, ps->Interface(), s.dropped); + s.received, ps->Path().c_str(), s.dropped); } } } @@ -587,29 +571,6 @@ void net_delete() delete ip_anonymizer[i]; } -// net_packet_match -// -// Description: -// - Checks if a packet matches a filter. It just wraps up a call to -// [pcap.h's] bpf_filter(). -// -// Inputs: -// - fp: a BPF-compiled filter -// - pkt: a pointer to the packet -// - len: the original packet length -// - caplen: the captured packet length. This is pkt length -// -// Output: -// - return: 1 if the packet matches the filter, 0 otherwise - -int net_packet_match(BPF_Program* fp, const u_char* pkt, - u_int len, u_int caplen) - { - // NOTE: I don't like too much un-const'ing the pkt variable. - return bpf_filter(fp->GetProgram()->bf_insns, (u_char*) pkt, len, caplen); - } - - int _processing_suspended = 0; static double suspend_start = 0; @@ -627,8 +588,12 @@ void net_continue_processing() if ( _processing_suspended == 1 ) { reporter->Info("processing continued"); - loop_over_list(pkt_srcs, i) - pkt_srcs[i]->ContinueAfterSuspend(); + + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + (*i)->ContinueAfterSuspend(); } --_processing_suspended; diff --git a/src/Net.h b/src/Net.h index 07c856d1dd..421bee5911 100644 --- a/src/Net.h +++ b/src/Net.h @@ -4,13 +4,18 @@ #define net_h #include "net_util.h" +<<<<<<< HEAD #include "util.h" #include "BPF_Program.h" +======= +>>>>>>> 5493253... Checkpoint. #include "List.h" -#include "PktSrc.h" #include "FlowSrc.h" #include "Func.h" #include "RemoteSerializer.h" +#include "iosource/IOSource.h" +#include "iosource/pktsrc/PktSrc.h" +#include "iosource/pktsrc/PktDumper.h" extern void net_init(name_list& interfaces, name_list& readfiles, name_list& netflows, name_list& flowfiles, @@ -22,10 +27,8 @@ extern void net_finish(int drain_events); extern void net_delete(); // Reclaim all memory, etc. extern void net_packet_arrival(double t, const struct pcap_pkthdr* hdr, const u_char* pkt, int hdr_size, - PktSrc* src_ps); -extern int net_packet_match(BPF_Program* fp, const u_char* pkt, - u_int len, u_int caplen); -extern void expire_timers(PktSrc* src_ps = 0); + iosource::PktSrc* src_ps); +extern void expire_timers(iosource::PktSrc* src_ps = 0); extern void termination_signal(); // Functions to temporarily suspend processing of live input (network packets @@ -82,13 +85,10 @@ extern const u_char* current_pkt; extern int current_dispatched; extern int current_hdr_size; extern double current_timestamp; -extern PktSrc* current_pktsrc; -extern IOSource* current_iosrc; +extern iosource::PktSrc* current_pktsrc; +extern iosource::IOSource* current_iosrc; -declare(PList,PktSrc); -extern PList(PktSrc) pkt_srcs; - -extern PktDumper* pkt_dumper; // where to save packets +extern iosource::PktDumper* pkt_dumper; // where to save packets extern char* writefile; diff --git a/src/PacketSort.cc b/src/PacketSort.cc index 429d8e2720..606d21b689 100644 --- a/src/PacketSort.cc +++ b/src/PacketSort.cc @@ -3,7 +3,7 @@ const bool DEBUG_packetsort = false; -PacketSortElement::PacketSortElement(PktSrc* arg_src, +PacketSortElement::PacketSortElement(iosource::PktSrc* arg_src, double arg_timestamp, const struct pcap_pkthdr* arg_hdr, const u_char* arg_pkt, int arg_hdr_size) { diff --git a/src/PacketSort.h b/src/PacketSort.h index 199da0732f..d61f66994e 100644 --- a/src/PacketSort.h +++ b/src/PacketSort.h @@ -16,16 +16,14 @@ enum { NUM_OF_PQ_LEVEL, }; -class PktSrc; - class PacketSortElement { public: - PacketSortElement(PktSrc* src, double timestamp, + PacketSortElement(iosource::PktSrc* src, double timestamp, const struct pcap_pkthdr* hdr, const u_char* pkt, int hdr_size); ~PacketSortElement(); - PktSrc* Src() const { return src; } + iosource::PktSrc* Src() const { return src; } double TimeStamp() const { return timestamp; } const struct pcap_pkthdr* Hdr() const { return &hdr; } const u_char* Pkt() const { return pkt; } @@ -33,7 +31,7 @@ public: const IP_Hdr* IPHdr() const { return ip_hdr; } protected: - PktSrc* src; + iosource::PktSrc* src; double timestamp; struct pcap_pkthdr hdr; u_char* pkt; diff --git a/src/PktSrc.cc b/src/PktSrc.cc deleted file mode 100644 index 9d6bce6fe9..0000000000 --- a/src/PktSrc.cc +++ /dev/null @@ -1,796 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include -#include - -#include "config.h" - -#include "util.h" -#include "PktSrc.h" -#include "Hash.h" -#include "Net.h" -#include "Sessions.h" - - -// ### This needs auto-confing. -#ifdef HAVE_PCAP_INT_H -#include -#endif - -PktSrc::PktSrc() - { - interface = readfile = 0; - data = last_data = 0; - memset(&hdr, 0, sizeof(hdr)); - hdr_size = 0; - datalink = 0; - netmask = 0xffffff00; - pd = 0; - idle = false; - - next_sync_point = 0; - first_timestamp = current_timestamp = next_timestamp = 0.0; - first_wallclock = current_wallclock = 0; - - stats.received = stats.dropped = stats.link = 0; - } - -PktSrc::~PktSrc() - { - Close(); - - loop_over_list(program_list, i) - delete program_list[i]; - - BPF_Program* code; - IterCookie* cookie = filters.InitForIteration(); - while ( (code = filters.NextEntry(cookie)) ) - delete code; - - delete [] interface; - delete [] readfile; - } - -void PktSrc::GetFds(int* read, int* write, int* except) - { - if ( pseudo_realtime ) - { - // Select would give erroneous results. But we simulate it - // by setting idle accordingly. - idle = CheckPseudoTime() == 0; - return; - } - - if ( selectable_fd >= 0 ) - *read = selectable_fd; - } - -int PktSrc::ExtractNextPacket() - { - // Don't return any packets if processing is suspended (except for the - // very first packet which we need to set up times). - if ( net_is_processing_suspended() && first_timestamp ) - { - idle = true; - return 0; - } - - data = last_data = pcap_next(pd, &hdr); - - if ( data && (hdr.len == 0 || hdr.caplen == 0) ) - { - sessions->Weird("empty_pcap_header", &hdr, data); - return 0; - } - - if ( data ) - next_timestamp = hdr.ts.tv_sec + double(hdr.ts.tv_usec) / 1e6; - - if ( pseudo_realtime ) - current_wallclock = current_time(true); - - if ( ! first_timestamp ) - first_timestamp = next_timestamp; - - idle = (data == 0); - - if ( data ) - ++stats.received; - - // Source has gone dry. If it's a network interface, this just means - // it's timed out. If it's a file, though, then the file has been - // exhausted. - if ( ! data && ! IsLive() ) - { - closed = true; - - if ( pseudo_realtime && using_communication ) - { - if ( remote_trace_sync_interval ) - remote_serializer->SendFinalSyncPoint(); - else - remote_serializer->Terminate(); - } - } - - return data != 0; - } - -double PktSrc::NextTimestamp(double* local_network_time) - { - if ( ! data && ! ExtractNextPacket() ) - return -1.0; - - if ( pseudo_realtime ) - { - // Delay packet if necessary. - double packet_time = CheckPseudoTime(); - if ( packet_time ) - return packet_time; - - idle = true; - return -1.0; - } - - return next_timestamp; - } - -void PktSrc::ContinueAfterSuspend() - { - current_wallclock = current_time(true); - } - -double PktSrc::CurrentPacketWallClock() - { - // We stop time when we are suspended. - if ( net_is_processing_suspended() ) - current_wallclock = current_time(true); - - return current_wallclock; - } - -double PktSrc::CheckPseudoTime() - { - if ( ! data && ! ExtractNextPacket() ) - return 0; - - if ( ! current_timestamp ) - return bro_start_time; - - if ( remote_trace_sync_interval ) - { - if ( next_sync_point == 0 || next_timestamp >= next_sync_point ) - { - int n = remote_serializer->SendSyncPoint(); - next_sync_point = first_timestamp + - n * remote_trace_sync_interval; - remote_serializer->Log(RemoteSerializer::LogInfo, - fmt("stopping at packet %.6f, next sync-point at %.6f", - current_timestamp, next_sync_point)); - - return 0; - } - } - - double pseudo_time = next_timestamp - first_timestamp; - double ct = (current_time(true) - first_wallclock) * pseudo_realtime; - - return pseudo_time <= ct ? bro_start_time + pseudo_time : 0; - } - -void PktSrc::Process() - { - if ( ! data && ! ExtractNextPacket() ) - return; - - current_timestamp = next_timestamp; - - int pkt_hdr_size = hdr_size; - - // Unfortunately some packets on the link might have MPLS labels - // while others don't. That means we need to ask the link-layer if - // labels are in place. - bool have_mpls = false; - - int protocol = 0; - - switch ( datalink ) { - case DLT_NULL: - { - protocol = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0]; - - // From the Wireshark Wiki: "AF_INET6, unfortunately, has - // different values in {NetBSD,OpenBSD,BSD/OS}, - // {FreeBSD,DragonFlyBSD}, and {Darwin/Mac OS X}, so an IPv6 - // packet might have a link-layer header with 24, 28, or 30 - // as the AF_ value." As we may be reading traces captured on - // platforms other than what we're running on, we accept them - // all here. - if ( protocol != AF_INET - && protocol != AF_INET6 - && protocol != 24 - && protocol != 28 - && protocol != 30 ) - { - sessions->Weird("non_ip_packet_in_null_transport", &hdr, data); - data = 0; - return; - } - - break; - } - - case DLT_EN10MB: - { - // Get protocol being carried from the ethernet frame. - protocol = (data[12] << 8) + data[13]; - - switch ( protocol ) - { - // MPLS carried over the ethernet frame. - case 0x8847: - have_mpls = true; - break; - - // VLAN carried over the ethernet frame. - case 0x8100: - data += get_link_header_size(datalink); - data += 4; // Skip the vlan header - pkt_hdr_size = 0; - - // Check for 802.1ah (Q-in-Q) containing IP. - // Only do a second layer of vlan tag - // stripping because there is no - // specification that allows for deeper - // nesting. - if ( ((data[2] << 8) + data[3]) == 0x0800 ) - data += 4; - - break; - - // PPPoE carried over the ethernet frame. - case 0x8864: - data += get_link_header_size(datalink); - protocol = (data[6] << 8) + data[7]; - data += 8; // Skip the PPPoE session and PPP header - pkt_hdr_size = 0; - - if ( protocol != 0x0021 && protocol != 0x0057 ) - { - // Neither IPv4 nor IPv6. - sessions->Weird("non_ip_packet_in_pppoe_encapsulation", &hdr, data); - data = 0; - return; - } - break; - } - - break; - } - - case DLT_PPP_SERIAL: - { - // Get PPP protocol. - protocol = (data[2] << 8) + data[3]; - - if ( protocol == 0x0281 ) - // MPLS Unicast - have_mpls = true; - - else if ( protocol != 0x0021 && protocol != 0x0057 ) - { - // Neither IPv4 nor IPv6. - sessions->Weird("non_ip_packet_in_ppp_encapsulation", &hdr, data); - data = 0; - return; - } - break; - } - } - - if ( have_mpls ) - { - // Remove the data link layer - data += get_link_header_size(datalink); - - // Denote a header size of zero before the IP header - pkt_hdr_size = 0; - - // Skip the MPLS label stack. - bool end_of_stack = false; - - while ( ! end_of_stack ) - { - end_of_stack = *(data + 2) & 0x01; - data += 4; - } - } - - if ( pseudo_realtime ) - { - current_pseudo = CheckPseudoTime(); - net_packet_arrival(current_pseudo, &hdr, data, pkt_hdr_size, this); - if ( ! first_wallclock ) - first_wallclock = current_time(true); - } - - else - net_packet_arrival(current_timestamp, &hdr, data, pkt_hdr_size, this); - - data = 0; - } - -bool PktSrc::GetCurrentPacket(const struct pcap_pkthdr** arg_hdr, - const u_char** arg_pkt) - { - if ( ! last_data ) - return false; - - *arg_hdr = &hdr; - *arg_pkt = last_data; - return true; - } - -int PktSrc::PrecompileFilter(int index, const char* filter) - { - // Compile filter. - BPF_Program* code = new BPF_Program(); - - if ( ! code->Compile(pd, filter, netmask, errbuf, sizeof(errbuf)) ) - { - delete code; - return 0; - } - - // Store it in hash. - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* oldcode = filters.Lookup(hash); - if ( oldcode ) - delete oldcode; - - filters.Insert(hash, code); - delete hash; - - return 1; - } - -int PktSrc::SetFilter(int index) - { - // We don't want load-level filters for the secondary path. - if ( filter_type == TYPE_FILTER_SECONDARY && index > 0 ) - return 1; - - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* code = filters.Lookup(hash); - delete hash; - - if ( ! code ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "No precompiled pcap filter for index %d", - index); - return 0; - } - - if ( pcap_setfilter(pd, code->GetProgram()) < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_setfilter(%d): %s", - index, pcap_geterr(pd)); - return 0; - } - -#ifndef HAVE_LINUX - // Linux doesn't clear counters when resetting filter. - stats.received = stats.dropped = stats.link = 0; -#endif - - return 1; - } - -void PktSrc::SetHdrSize() - { - int dl = pcap_datalink(pd); - hdr_size = get_link_header_size(dl); - - if ( hdr_size < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "unknown data link type 0x%x", dl); - Close(); - } - - datalink = dl; - } - -void PktSrc::Close() - { - if ( pd ) - { - pcap_close(pd); - pd = 0; - closed = true; - } - } - -void PktSrc::AddSecondaryTablePrograms() - { - BPF_Program* program; - - loop_over_list(secondary_path->EventTable(), i) - { - SecondaryEvent* se = secondary_path->EventTable()[i]; - program = new BPF_Program(); - - if ( ! program->Compile(snaplen, datalink, se->Filter(), - netmask, errbuf, sizeof(errbuf)) ) - { - delete program; - Close(); - return; - } - - SecondaryProgram* sp = new SecondaryProgram(program, se); - program_list.append(sp); - } - } - -void PktSrc::Statistics(Stats* s) - { - if ( reading_traces ) - s->received = s->dropped = s->link = 0; - - else - { - struct pcap_stat pstat; - if ( pcap_stats(pd, &pstat) < 0 ) - { - reporter->Error("problem getting packet filter statistics: %s", - ErrorMsg()); - s->received = s->dropped = s->link = 0; - } - - else - { - s->dropped = pstat.ps_drop; - s->link = pstat.ps_recv; - } - } - - s->received = stats.received; - - if ( pseudo_realtime ) - s->dropped = 0; - - stats.dropped = s->dropped; - } - -PktInterfaceSrc::PktInterfaceSrc(const char* arg_interface, const char* filter, - PktSrc_Filter_Type ft) -: PktSrc() - { - char tmp_errbuf[PCAP_ERRBUF_SIZE]; - filter_type = ft; - - // Determine interface if not specified. - if ( ! arg_interface && ! (arg_interface = pcap_lookupdev(tmp_errbuf)) ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_lookupdev: %s", tmp_errbuf); - return; - } - - interface = copy_string(arg_interface); - - // Determine network and netmask. - uint32 net; - if ( pcap_lookupnet(interface, &net, &netmask, tmp_errbuf) < 0 ) - { - // ### The lookup can fail if no address is assigned to - // the interface; and libpcap doesn't have any useful notion - // of error codes, just error strings - how bogus - so we - // just kludge around the error :-(. - // sprintf(errbuf, "pcap_lookupnet %s", tmp_errbuf); - // return; - net = 0; - netmask = 0xffffff00; - } - - // We use the smallest time-out possible to return almost immediately if - // no packets are available. (We can't use set_nonblocking() as it's - // broken on FreeBSD: even when select() indicates that we can read - // something, we may get nothing if the store buffer hasn't filled up - // yet.) - pd = pcap_open_live(interface, snaplen, 1, 1, tmp_errbuf); - - if ( ! pd ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_open_live: %s", tmp_errbuf); - closed = true; - return; - } - - // ### This needs autoconf'ing. -#ifdef HAVE_PCAP_INT_H - reporter->Info("pcap bufsize = %d\n", ((struct pcap *) pd)->bufsize); -#endif - -#ifdef HAVE_LINUX - if ( pcap_setnonblock(pd, 1, tmp_errbuf) < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_setnonblock: %s", tmp_errbuf); - pcap_close(pd); - closed = true; - return; - } -#endif - selectable_fd = pcap_fileno(pd); - - if ( PrecompileFilter(0, filter) && SetFilter(0) ) - { - SetHdrSize(); - - if ( closed ) - // Couldn't get header size. - return; - - reporter->Info("listening on %s, capture length %d bytes\n", interface, snaplen); - } - else - closed = true; - } - - -PktFileSrc::PktFileSrc(const char* arg_readfile, const char* filter, - PktSrc_Filter_Type ft) -: PktSrc() - { - readfile = copy_string(arg_readfile); - - filter_type = ft; - - pd = pcap_open_offline((char*) readfile, errbuf); - - if ( pd && PrecompileFilter(0, filter) && SetFilter(0) ) - { - SetHdrSize(); - - if ( closed ) - // Unknown link layer type. - return; - - // We don't put file sources into non-blocking mode as - // otherwise we would not be able to identify the EOF. - - selectable_fd = fileno(pcap_file(pd)); - - if ( selectable_fd < 0 ) - reporter->InternalError("OS does not support selectable pcap fd"); - } - else - closed = true; - } - - -SecondaryPath::SecondaryPath() - { - filter = 0; - - // Glue together the secondary filter, if exists. - Val* secondary_fv = internal_val("secondary_filters"); - if ( secondary_fv->AsTableVal()->Size() == 0 ) - return; - - int did_first = 0; - const TableEntryValPDict* v = secondary_fv->AsTable(); - IterCookie* c = v->InitForIteration(); - TableEntryVal* tv; - HashKey* h; - - while ( (tv = v->NextEntry(h, c)) ) - { - // Get the index values. - ListVal* index = - secondary_fv->AsTableVal()->RecoverIndex(h); - - const char* str = - index->Index(0)->Ref()->AsString()->CheckString(); - - if ( ++did_first == 1 ) - { - filter = copy_string(str); - } - else - { - if ( strlen(filter) > 0 ) - { - char* tmp_f = new char[strlen(str) + strlen(filter) + 32]; - if ( strlen(str) == 0 ) - sprintf(tmp_f, "%s", filter); - else - sprintf(tmp_f, "(%s) or (%s)", filter, str); - delete [] filter; - filter = tmp_f; - } - } - - // Build secondary_path event table item and link it. - SecondaryEvent* se = - new SecondaryEvent(index->Index(0)->Ref()->AsString()->CheckString(), - tv->Value()->AsFunc() ); - - event_list.append(se); - - delete h; - Unref(index); - } - } - -SecondaryPath::~SecondaryPath() - { - loop_over_list(event_list, i) - delete event_list[i]; - - delete [] filter; - } - - -SecondaryProgram::~SecondaryProgram() - { - delete program; - } - -PktDumper::PktDumper(const char* arg_filename, bool arg_append) - { - filename[0] = '\0'; - is_error = false; - append = arg_append; - dumper = 0; - open_time = 0.0; - - // We need a pcap_t with a reasonable link-layer type. We try to get it - // from the packet sources. If not available, we fall back to Ethernet. - // FIXME: Perhaps we should make this configurable? - int linktype = -1; - - if ( pkt_srcs.length() ) - linktype = pkt_srcs[0]->LinkType(); - - if ( linktype < 0 ) - linktype = DLT_EN10MB; - - pd = pcap_open_dead(linktype, 8192); - if ( ! pd ) - { - Error("error for pcap_open_dead"); - return; - } - - if ( arg_filename ) - Open(arg_filename); - } - -bool PktDumper::Open(const char* arg_filename) - { - if ( ! arg_filename && ! *filename ) - { - Error("no filename given"); - return false; - } - - if ( arg_filename ) - { - if ( dumper && streq(arg_filename, filename) ) - // Already open. - return true; - - safe_strncpy(filename, arg_filename, FNBUF_LEN); - } - - if ( dumper ) - Close(); - - struct stat s; - int exists = 0; - - if ( append ) - { - // See if output file already exists (and is non-empty). - exists = stat(filename, &s); ; - - if ( exists < 0 && errno != ENOENT ) - { - Error(fmt("can't stat file %s: %s", filename, strerror(errno))); - return false; - } - } - - if ( ! append || exists < 0 || s.st_size == 0 ) - { - // Open new file. - dumper = pcap_dump_open(pd, filename); - if ( ! dumper ) - { - Error(pcap_geterr(pd)); - return false; - } - } - - else - { - // Old file and we need to append, which, unfortunately, - // is not supported by libpcap. So, we have to hack a - // little bit, knowing that pcap_dumpter_t is, in fact, - // a FILE ... :-( - dumper = (pcap_dumper_t*) fopen(filename, "a"); - if ( ! dumper ) - { - Error(fmt("can't open dump %s: %s", filename, strerror(errno))); - return false; - } - } - - open_time = network_time; - is_error = false; - return true; - } - -bool PktDumper::Close() - { - if ( dumper ) - { - pcap_dump_close(dumper); - dumper = 0; - is_error = false; - } - - return true; - } - -bool PktDumper::Dump(const struct pcap_pkthdr* hdr, const u_char* pkt) - { - if ( ! dumper ) - return false; - - if ( ! open_time ) - open_time = network_time; - - pcap_dump((u_char*) dumper, hdr, pkt); - - return true; - } - -void PktDumper::Error(const char* errstr) - { - safe_strncpy(errbuf, errstr, sizeof(errbuf)); - is_error = true; - } - -int get_link_header_size(int dl) - { - switch ( dl ) { - case DLT_NULL: - return 4; - - case DLT_EN10MB: - return 14; - - case DLT_FDDI: - return 13 + 8; // fddi_header + LLC - -#ifdef DLT_LINUX_SLL - case DLT_LINUX_SLL: - return 16; -#endif - - case DLT_PPP_SERIAL: // PPP_SERIAL - return 4; - - case DLT_RAW: - return 0; - } - - return -1; - } diff --git a/src/PktSrc.h b/src/PktSrc.h deleted file mode 100644 index 70eef4dd00..0000000000 --- a/src/PktSrc.h +++ /dev/null @@ -1,258 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#ifndef pktsrc_h -#define pktsrc_h - -#include "Dict.h" -#include "Expr.h" -#include "BPF_Program.h" -#include "IOSource.h" -#include "RemoteSerializer.h" - -#define BRO_PCAP_ERRBUF_SIZE PCAP_ERRBUF_SIZE + 256 - -extern "C" { -#include -} - -declare(PDict,BPF_Program); - -// Whether a PktSrc object is used by the normal filter structure or the -// secondary-path structure. -typedef enum { - TYPE_FILTER_NORMAL, // the normal filter - TYPE_FILTER_SECONDARY, // the secondary-path filter -} PktSrc_Filter_Type; - - -// {filter,event} tuples conforming the secondary path. -class SecondaryEvent { -public: - SecondaryEvent(const char* arg_filter, Func* arg_event) - { - filter = arg_filter; - event = arg_event; - } - - const char* Filter() { return filter; } - Func* Event() { return event; } - -private: - const char* filter; - Func* event; -}; - -declare(PList,SecondaryEvent); -typedef PList(SecondaryEvent) secondary_event_list; - - - -class SecondaryPath { -public: - SecondaryPath(); - ~SecondaryPath(); - - secondary_event_list& EventTable() { return event_list; } - const char* Filter() { return filter; } - -private: - secondary_event_list event_list; - // OR'ed union of all SecondaryEvent filters - char* filter; -}; - -// Main secondary-path object. -extern SecondaryPath* secondary_path; - - -// {program, {filter,event}} tuple table. -class SecondaryProgram { -public: - SecondaryProgram(BPF_Program* arg_program, SecondaryEvent* arg_event) - { - program = arg_program; - event = arg_event; - } - - ~SecondaryProgram(); - - BPF_Program* Program() { return program; } - SecondaryEvent* Event() { return event; } - -private: - // Associated program. - BPF_Program *program; - - // Event that is run in case the program is matched. - SecondaryEvent* event; -}; - -declare(PList,SecondaryProgram); -typedef PList(SecondaryProgram) secondary_program_list; - - - -class PktSrc : public IOSource { -public: - ~PktSrc(); - - // IOSource interface - bool IsReady(); - void GetFds(int* read, int* write, int* except); - double NextTimestamp(double* local_network_time); - void Process(); - const char* Tag() { return "PktSrc"; } - - const char* ErrorMsg() const { return errbuf; } - void ClearErrorMsg() { *errbuf ='\0'; } - - // Returns the packet last processed; false if there is no - // current packet available. - bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); - - int HdrSize() const { return hdr_size; } - int DataLink() const { return datalink; } - - void ConsumePacket() { data = 0; } - - int IsLive() const { return interface != 0; } - - pcap_t* PcapHandle() const { return pd; } - int LinkType() const { return pcap_datalink(pd); } - - const char* ReadFile() const { return readfile; } - const char* Interface() const { return interface; } - PktSrc_Filter_Type FilterType() const { return filter_type; } - void AddSecondaryTablePrograms(); - const secondary_program_list& ProgramTable() const - { return program_list; } - - // Signal packet source that processing was suspended and is now going - // to be continued. - void ContinueAfterSuspend(); - - // Only valid in pseudo-realtime mode. - double CurrentPacketTimestamp() { return current_pseudo; } - double CurrentPacketWallClock(); - - struct Stats { - unsigned int received; // pkts received (w/o drops) - unsigned int dropped; // pkts dropped - unsigned int link; // total packets on link - // (not always not available) - }; - - virtual void Statistics(Stats* stats); - - // Precompiles a filter and associates the given index with it. - // Returns true on success, 0 if a problem occurred. - virtual int PrecompileFilter(int index, const char* filter); - - // Activates the filter with the given index. - // Returns true on success, 0 if a problem occurred. - virtual int SetFilter(int index); - -protected: - PktSrc(); - - static const int PCAP_TIMEOUT = 20; - - void SetHdrSize(); - - virtual void Close(); - - // Returns 1 on success, 0 on time-out/gone dry. - virtual int ExtractNextPacket(); - - // Checks if the current packet has a pseudo-time <= current_time. - // If yes, returns pseudo-time, otherwise 0. - double CheckPseudoTime(); - - double current_timestamp; - double next_timestamp; - - // Only set in pseudo-realtime mode. - double first_timestamp; - double first_wallclock; - double current_wallclock; - double current_pseudo; - - struct pcap_pkthdr hdr; - const u_char* data; // contents of current packet - const u_char* last_data; // same, but unaffected by consuming - int hdr_size; - int datalink; - double next_sync_point; // For trace synchronziation in pseudo-realtime - - char* interface; // nil if not reading from an interface - char* readfile; // nil if not reading from a file - - pcap_t* pd; - int selectable_fd; - uint32 netmask; - char errbuf[BRO_PCAP_ERRBUF_SIZE]; - - Stats stats; - - PDict(BPF_Program) filters; // precompiled filters - - PktSrc_Filter_Type filter_type; // normal path or secondary path - secondary_program_list program_list; -}; - -class PktInterfaceSrc : public PktSrc { -public: - PktInterfaceSrc(const char* interface, const char* filter, - PktSrc_Filter_Type ft=TYPE_FILTER_NORMAL); -}; - -class PktFileSrc : public PktSrc { -public: - PktFileSrc(const char* readfile, const char* filter, - PktSrc_Filter_Type ft=TYPE_FILTER_NORMAL); -}; - - -extern int get_link_header_size(int dl); - -class PktDumper { -public: - PktDumper(const char* file = 0, bool append = false); - ~PktDumper() { Close(); } - - bool Open(const char* file = 0); - bool Close(); - bool Dump(const struct pcap_pkthdr* hdr, const u_char* pkt); - - pcap_dumper_t* PcapDumper() { return dumper; } - - const char* FileName() const { return filename; } - bool IsError() const { return is_error; } - const char* ErrorMsg() const { return errbuf; } - - // This heuristic will horribly fail if we're using packets - // with different link layers. (If we can't derive a reasonable value - // from the packet sources, our fall-back is Ethernet.) - int HdrSize() const - { return get_link_header_size(pcap_datalink(pd)); } - - // Network time when dump file was opened. - double OpenTime() const { return open_time; } - -private: - void InitPd(); - void Error(const char* str); - - static const int FNBUF_LEN = 1024; - char filename[FNBUF_LEN]; - - bool append; - pcap_dumper_t* dumper; - pcap_t* pd; - double open_time; - - bool is_error; - char errbuf[BRO_PCAP_ERRBUF_SIZE]; -}; - -#endif diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index c8cf03667b..b0db8fafe8 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -188,10 +188,11 @@ #include "File.h" #include "Conn.h" #include "Reporter.h" -#include "threading/SerialTypes.h" -#include "logging/Manager.h" #include "IPAddr.h" #include "bro_inet_ntop.h" +#include "threading/SerialTypes.h" +#include "logging/Manager.h" +#include "iosource/Manager.h" extern "C" { #include "setsignal.h" @@ -284,10 +285,10 @@ struct ping_args { \ if ( ! c ) \ { \ - idle = io->IsIdle();\ + SetIdle(io->IsIdle());\ return true; \ } \ - idle = false; \ + SetIdle(false); \ } static const char* msgToStr(int msg) @@ -536,7 +537,6 @@ RemoteSerializer::RemoteSerializer() current_sync_point = 0; syncing_times = false; io = 0; - closed = false; terminating = false; in_sync = 0; last_flush = 0; @@ -574,7 +574,7 @@ void RemoteSerializer::Init() Fork(); - io_sources.Register(this); + iosource_mgr->Register(this); Log(LogInfo, fmt("communication started, parent pid is %d, child pid is %d", getpid(), child_pid)); initialized = 1; @@ -1278,7 +1278,7 @@ bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl, return false; listening = true; - closed = false; + SetClosed(false); return true; } @@ -1347,7 +1347,7 @@ bool RemoteSerializer::StopListening() return false; listening = false; - closed = ! IsActive(); + SetClosed(! IsActive()); return true; } @@ -1385,7 +1385,7 @@ double RemoteSerializer::NextTimestamp(double* local_network_time) if ( received_logs > 0 ) { // If we processed logs last time, assume there's more. - idle = false; + SetIdle(false); received_logs = 0; return timer_mgr->Time(); } @@ -1400,7 +1400,7 @@ double RemoteSerializer::NextTimestamp(double* local_network_time) pt = timer_mgr->Time(); if ( packets.length() ) - idle = false; + SetIdle(false); if ( et >= 0 && (et < pt || pt < 0) ) return et; @@ -1479,7 +1479,7 @@ void RemoteSerializer::Process() } if ( packets.length() ) - idle = false; + SetIdle(false); } void RemoteSerializer::Finish() @@ -1511,7 +1511,7 @@ bool RemoteSerializer::Poll(bool may_block) } io->Flush(); - idle = false; + SetIdle(false); switch ( msgstate ) { case TYPE: @@ -1695,7 +1695,7 @@ bool RemoteSerializer::DoMessage() case MSG_TERMINATE: assert(terminating); - io_sources.Terminate(); + iosource_mgr->Terminate(); return true; case MSG_REMOTE_PRINT: @@ -1885,7 +1885,7 @@ void RemoteSerializer::RemovePeer(Peer* peer) delete peer->cache_out; delete peer; - closed = ! IsActive(); + SetClosed(! IsActive()); if ( in_sync == peer ) in_sync = 0; @@ -2850,7 +2850,7 @@ void RemoteSerializer::GotEvent(const char* name, double time, BufferedEvent* e = new BufferedEvent; // Our time, not the time when the event was generated. - e->time = pkt_srcs.length() ? + e->time = iosource_mgr->GetPktSrcs().size() ? time_t(network_time) : time_t(timer_mgr->Time()); e->src = current_peer->id; @@ -3094,7 +3094,7 @@ RecordVal* RemoteSerializer::GetPeerVal(PeerID id) void RemoteSerializer::ChildDied() { Log(LogError, "child died"); - closed = true; + SetClosed(true); child_pid = 0; // Shut down the main process as well. @@ -3188,7 +3188,7 @@ void RemoteSerializer::FatalError(const char* msg) Log(LogError, msg); reporter->Error("%s", msg); - closed = true; + SetClosed(true); if ( kill(child_pid, SIGQUIT) < 0 ) reporter->Warning("warning: cannot kill child pid %d, %s", child_pid, strerror(errno)); diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 5ff7fff8d6..f8b306f002 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -6,7 +6,7 @@ #include "Dict.h" #include "List.h" #include "Serializer.h" -#include "IOSource.h" +#include "iosource/IOSource.h" #include "Stats.h" #include "File.h" #include "logging/WriterBackend.h" @@ -22,7 +22,7 @@ namespace threading { } // This class handles the communication done in Bro's main loop. -class RemoteSerializer : public Serializer, public IOSource { +class RemoteSerializer : public Serializer, public iosource::IOSource { public: RemoteSerializer(); virtual ~RemoteSerializer(); diff --git a/src/Serializer.cc b/src/Serializer.cc index 156ad67f2e..74740497a1 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -19,6 +19,7 @@ #include "Conn.h" #include "Timer.h" #include "RemoteSerializer.h" +#include "iosource/Manager.h" Serializer::Serializer(SerializationFormat* arg_format) { @@ -1045,7 +1046,7 @@ EventPlayer::EventPlayer(const char* file) Error(fmt("event replayer: cannot open %s", file)); if ( ReadHeader() ) - io_sources.Register(this); + iosource_mgr->Register(this); } EventPlayer::~EventPlayer() @@ -1085,7 +1086,7 @@ double EventPlayer::NextTimestamp(double* local_network_time) { UnserialInfo info(this); Unserialize(&info); - closed = io->Eof(); + SetClosed(io->Eof()); } if ( ! ne_time ) @@ -1142,7 +1143,7 @@ bool Packet::Serialize(SerialInfo* info) const static BroFile* profiling_output = 0; #ifdef DEBUG -static PktDumper* dump = 0; +static iosource::PktDumper* dump = 0; #endif Packet* Packet::Unserialize(UnserialInfo* info) @@ -1188,7 +1189,7 @@ Packet* Packet::Unserialize(UnserialInfo* info) p->hdr = hdr; p->pkt = (u_char*) pkt; p->tag = tag; - p->hdr_size = get_link_header_size(p->link_type); + p->hdr_size = iosource::PktSrc::GetLinkHeaderSize(p->link_type); delete [] tag; @@ -1213,9 +1214,15 @@ Packet* Packet::Unserialize(UnserialInfo* info) if ( debug_logger.IsEnabled(DBG_TM) ) { if ( ! dump ) - dump = new PktDumper("tm.pcap"); + dump = iosource_mgr->OpenPktDumper("tm.pcap", true); - dump->Dump(p->hdr, p->pkt); + if ( dump ) + { + iosource::PktDumper::Packet dp; + dp.hdr = p->hdr; + dp.data = p->pkt; + dump->Record(&dp); + } } #endif diff --git a/src/Serializer.h b/src/Serializer.h index af4878ccf5..3be2da5134 100644 --- a/src/Serializer.h +++ b/src/Serializer.h @@ -15,7 +15,7 @@ #include "SerialInfo.h" #include "IP.h" #include "Timer.h" -#include "IOSource.h" +#include "iosource/IOSource.h" #include "Reporter.h" class SerializationCache; @@ -350,7 +350,7 @@ public: }; // Plays a file of events back. -class EventPlayer : public FileSerializer, public IOSource { +class EventPlayer : public FileSerializer, public iosource::IOSource { public: EventPlayer(const char* file); virtual ~EventPlayer(); diff --git a/src/Sessions.cc b/src/Sessions.cc index acc306d277..c84c677db4 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -168,7 +168,7 @@ void NetSessions::Done() void NetSessions::DispatchPacket(double t, const struct pcap_pkthdr* hdr, const u_char* pkt, int hdr_size, - PktSrc* src_ps, PacketSortElement* pkt_elem) + iosource::PktSrc* src_ps, PacketSortElement* pkt_elem) { const struct ip* ip_hdr = 0; const u_char* ip_data = 0; @@ -185,10 +185,14 @@ void NetSessions::DispatchPacket(double t, const struct pcap_pkthdr* hdr, // Blanket encapsulation hdr_size += encap_hdr_size; +#if 0 if ( src_ps->FilterType() == TYPE_FILTER_NORMAL ) NextPacket(t, hdr, pkt, hdr_size, pkt_elem); else NextPacketSecondary(t, hdr, pkt, hdr_size, src_ps); +#else + NextPacket(t, hdr, pkt, hdr_size, pkt_elem); +#endif } void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, @@ -278,7 +282,7 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size, - const PktSrc* src_ps) + const iosource::PktSrc* src_ps) { SegmentProfiler(segment_logger, "processing-secondary-packet"); @@ -291,6 +295,7 @@ void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* return; } +#if 0 const struct ip* ip = (const struct ip*) (pkt + hdr_size); if ( ip->ip_v == 4 ) { @@ -321,6 +326,7 @@ void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* delete args; } } +#endif } int NetSessions::CheckConnectionTag(Connection* conn) @@ -1341,14 +1347,24 @@ void NetSessions::DumpPacket(const struct pcap_pkthdr* hdr, return; if ( len == 0 ) - pkt_dumper->Dump(hdr, pkt); + { + iosource::PktDumper::Packet p; + p.hdr = hdr; + p.data = pkt; + pkt_dumper->Record(&p); + } + else { struct pcap_pkthdr h = *hdr; h.caplen = len; if ( h.caplen > hdr->caplen ) reporter->InternalError("bad modified caplen"); - pkt_dumper->Dump(&h, pkt); + + iosource::PktDumper::Packet p; + p.hdr = &h; + p.data = pkt; + pkt_dumper->Record(&p); } } diff --git a/src/Sessions.h b/src/Sessions.h index 1788541f45..4f12bd1240 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -74,7 +74,7 @@ public: // employing the packet sorter first. void DispatchPacket(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size, - PktSrc* src_ps, PacketSortElement* pkt_elem); + iosource::PktSrc* src_ps, PacketSortElement* pkt_elem); void Done(); // call to drain events before destructing @@ -225,7 +225,7 @@ protected: void NextPacketSecondary(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size, - const PktSrc* src_ps); + const iosource::PktSrc* src_ps); // Record the given packet (if a dumper is active). If len=0 // then the whole packet is recorded, otherwise just the first diff --git a/src/bro.bif b/src/bro.bif index 24dff3c77c..2b94307143 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -21,6 +21,7 @@ #include "IPAddr.h" #include "util.h" #include "file_analysis/Manager.h" +#include "iosource/Manager.h" using namespace std; @@ -33,7 +34,7 @@ TableType* var_sizes; // and hence it's declared in NetVar.{h,cc}. extern RecordType* gap_info; -static PktDumper* addl_pkt_dumper = 0; +static iosource::PktDumper* addl_pkt_dumper = 0; bro_int_t parse_int(const char*& fmt) { @@ -1657,11 +1658,14 @@ function net_stats%(%): NetStats unsigned int drop = 0; unsigned int link = 0; - loop_over_list(pkt_srcs, i) - { - PktSrc* ps = pkt_srcs[i]; + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - struct PktSrc::Stats stat; + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + struct iosource::PktSrc::Stats stat; ps->Statistics(&stat); recv += stat.received; drop += stat.dropped; @@ -3206,10 +3210,15 @@ function dump_current_packet%(file_name: string%) : bool return new Val(0, TYPE_BOOL); if ( ! addl_pkt_dumper ) - addl_pkt_dumper = new PktDumper(0, true); + addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); - addl_pkt_dumper->Open(file_name->CheckString()); - addl_pkt_dumper->Dump(hdr, pkt); + if ( addl_pkt_dumper ) + { + iosource::PktDumper::Packet p; + p.hdr = hdr; + p.data = pkt; + addl_pkt_dumper->Record(&p); + } return new Val(! addl_pkt_dumper->IsError(), TYPE_BOOL); %} @@ -3266,10 +3275,15 @@ function dump_packet%(pkt: pcap_packet, file_name: string%) : bool hdr.len = (*pkt_vl)[3]->AsCount(); if ( ! addl_pkt_dumper ) - addl_pkt_dumper = new PktDumper(0, true); + addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); - addl_pkt_dumper->Open(file_name->CheckString()); - addl_pkt_dumper->Dump(&hdr, (*pkt_vl)[4]->AsString()->Bytes()); + if ( addl_pkt_dumper ) + { + iosource::PktDumper::Packet p; + p.hdr = &hdr; + p.data = (*pkt_vl)[4]->AsString()->Bytes(); + addl_pkt_dumper->Record(&p); + } return new Val(addl_pkt_dumper->IsError(), TYPE_BOOL); %} @@ -4030,14 +4044,14 @@ function rotate_file_by_name%(f: string%): rotate_info bool is_addl_pkt_dumper = false; // Special case: one of current dump files. - if ( pkt_dumper && streq(pkt_dumper->FileName(), f->CheckString()) ) + if ( pkt_dumper && streq(pkt_dumper->Path().c_str(), f->CheckString()) ) { is_pkt_dumper = true; pkt_dumper->Close(); } if ( addl_pkt_dumper && - streq(addl_pkt_dumper->FileName(), f->CheckString()) ) + streq(addl_pkt_dumper->Path().c_str(), f->CheckString()) ) { is_addl_pkt_dumper = true; addl_pkt_dumper->Close(); @@ -4156,15 +4170,18 @@ function precompile_pcap_filter%(id: PcapFilterID, s: string%): bool %{ bool success = true; - loop_over_list(pkt_srcs, i) - { - pkt_srcs[i]->ClearErrorMsg(); + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - if ( ! pkt_srcs[i]->PrecompileFilter(id->ForceAsInt(), + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + if ( ! ps->PrecompileFilter(id->ForceAsInt(), s->CheckString()) ) { reporter->Error("precompile_pcap_filter: %s", - pkt_srcs[i]->ErrorMsg()); + ps->ErrorMsg()); success = false; } } @@ -4194,11 +4211,14 @@ function install_pcap_filter%(id: PcapFilterID%): bool %{ bool success = true; - loop_over_list(pkt_srcs, i) - { - pkt_srcs[i]->ClearErrorMsg(); + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - if ( ! pkt_srcs[i]->SetFilter(id->ForceAsInt()) ) + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + if ( ! ps->SetFilter(id->ForceAsInt()) ) success = false; } @@ -4221,9 +4241,14 @@ function install_pcap_filter%(id: PcapFilterID%): bool ## uninstall_dst_net_filter function pcap_error%(%): string %{ - loop_over_list(pkt_srcs, i) + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) { - const char* err = pkt_srcs[i]->ErrorMsg(); + iosource::PktSrc* ps = *i; + + const char* err = ps->ErrorMsg(); if ( *err ) return new StringVal(err); } diff --git a/src/iosource/CMakeLists.txt b/src/iosource/CMakeLists.txt new file mode 100644 index 0000000000..a9246e8de9 --- /dev/null +++ b/src/iosource/CMakeLists.txt @@ -0,0 +1,23 @@ + +include(BroSubdir) + +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +add_subdirectory(pktsrc) + + +set(iosource_SRCS + Component.cc + Manager.cc + + pktsrc/Component.cc + pktsrc/PktDumper.cc + pktsrc/PktSrc.cc +) + +bro_add_subdir_library(iosource ${iosource_SRCS}) +add_dependencies(bro_iosource generate_outputs) + diff --git a/src/iosource/Component.cc b/src/iosource/Component.cc new file mode 100644 index 0000000000..5f916c0a11 --- /dev/null +++ b/src/iosource/Component.cc @@ -0,0 +1,44 @@ + +#include "Component.h" + +#include "Desc.h" + +using namespace iosource; + +Component::Component(const std::string& arg_name) + : plugin::Component(plugin::component::IOSOURCE) + { + name = arg_name; + } + +Component::Component(plugin::component::Type type, const std::string& arg_name) + : plugin::Component(type) + { + name = arg_name; + } + +Component::Component(const Component& other) + : plugin::Component(other) + { + name = other.name; + } + +Component::~Component() + { + } + +void Component::Describe(ODesc* d) const + { + plugin::Component::Describe(d); + d->Add(name); + } + +Component& Component::operator=(const Component& other) + { + plugin::Component::operator=(other); + + if ( &other != this ) + name = other.name; + + return *this; + } diff --git a/src/iosource/Component.h b/src/iosource/Component.h new file mode 100644 index 0000000000..b56eeb038c --- /dev/null +++ b/src/iosource/Component.h @@ -0,0 +1,56 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PLUGIN_COMPONENT_H +#define IOSOURCE_PLUGIN_COMPONENT_H + +#include "plugin/Component.h" + +namespace iosource { + +class IOSource; + +/** + * Component description for plugins providing IOSources. + */ +class Component : public plugin::Component { +public: + typedef IOSource* (*factory_callback)(); + + /** + * XXX + */ + Component(const std::string& name); + + /** + * Copy constructor. + */ + Component(const Component& other); + + /** + * Destructor. + */ + ~Component(); + + /** + * XXX + */ + virtual const char* Name() const { return name.c_str(); } + + /** + * Generates a human-readable description of the component. This goes + * into the output of \c "bro -NN". + */ + virtual void Describe(ODesc* d) const; + + Component& operator=(const Component& other); + +protected: + Component(plugin::component::Type type, const std::string& name); + +private: + std::string name; +}; + +} + +#endif diff --git a/src/IOSource.h b/src/iosource/IOSource.h similarity index 51% rename from src/IOSource.h rename to src/iosource/IOSource.h index db50bbd2a9..3419152a9a 100644 --- a/src/IOSource.h +++ b/src/iosource/IOSource.h @@ -1,13 +1,17 @@ -// Interface for classes providing/consuming data during Bro's main loop. +// See the file "COPYING" in the main distribution directory for copyright. -#ifndef iosource_h -#define iosource_h +#ifndef IOSOURCE_IOSOURCE_H +#define IOSOURCE_IOSOURCE_H + +#include -#include #include "Timer.h" -using namespace std; +namespace iosource { +/** + * Interface class for components providing/consuming data inside Bro's main loop. + */ class IOSource { public: IOSource() { idle = closed = false; } @@ -20,6 +24,12 @@ public: // Otherwise, source may be removed. bool IsOpen() const { return ! closed; } + // XXX + virtual void Init() { } + + // XXX + virtual void Done() { } + // Returns select'able fds (leaves args untouched if we don't have // selectable fds). virtual void GetFds(int* read, int* write, int* except) = 0; @@ -46,58 +56,18 @@ public: protected: // Derived classed are to set this to true if they have gone dry // temporarily. - bool idle; + void SetIdle(bool is_idle) { idle = is_idle; } + // Derived classed are to set this to true if they have gone dry - // permanently. + // temporarily. + void SetClosed(bool is_closed) { closed = is_closed; } + +private: + bool idle; bool closed; }; -class IOSourceRegistry { -public: - IOSourceRegistry() { call_count = 0; dont_counts = 0; } - ~IOSourceRegistry(); - - // If dont_count is true, this source does not contribute to the - // number of IOSources returned by Size(). The effect is that - // if all sources but the non-counting ones have gone dry, - // processing will shut down. - void Register(IOSource* src, bool dont_count = false); - - // This may block for some time. - IOSource* FindSoonest(double* ts); - - int Size() const { return sources.size() - dont_counts; } - - // Terminate IOSource processing immediately by removing all - // sources (and therefore returning a Size() of zero). - void Terminate() { RemoveAll(); } - -protected: - // When looking for a source with something to process, - // every SELECT_FREQUENCY calls we will go ahead and - // block on a select(). - static const int SELECT_FREQUENCY = 25; - - // Microseconds to wait in an empty select if no source is ready. - static const int SELECT_TIMEOUT = 50; - - void RemoveAll(); - - unsigned int call_count; - int dont_counts; - - struct Source { - IOSource* src; - int fd_read; - int fd_write; - int fd_except; - }; - - typedef list SourceList; - SourceList sources; -}; - -extern IOSourceRegistry io_sources; +} #endif diff --git a/src/IOSource.cc b/src/iosource/Manager.cc similarity index 52% rename from src/IOSource.cc rename to src/iosource/Manager.cc index d47007caad..9c14330868 100644 --- a/src/IOSource.cc +++ b/src/iosource/Manager.cc @@ -1,3 +1,4 @@ + #include #include #include @@ -5,26 +6,37 @@ #include -#include "util.h" +#include "Manager.h" #include "IOSource.h" +#include "pktsrc/PktSrc.h" +#include "pktsrc/PktDumper.h" +#include "pktsrc/Component.h" +#include "plugin/Manager.h" -IOSourceRegistry io_sources; +#include "util.h" -IOSourceRegistry::~IOSourceRegistry() +#define DEFAULT_PREFIX "pcap" + +using namespace iosource; + +Manager::~Manager() { for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) + { + (*i)->src->Done(); delete *i; + } sources.clear(); } -void IOSourceRegistry::RemoveAll() +void Manager::RemoveAll() { // We're cheating a bit here ... dont_counts = sources.size(); } -IOSource* IOSourceRegistry::FindSoonest(double* ts) +IOSource* Manager::FindSoonest(double* ts) { // Remove sources which have gone dry. For simplicity, we only // remove at most one each time. @@ -101,9 +113,9 @@ IOSource* IOSourceRegistry::FindSoonest(double* ts) FD_SET(src->fd_write, &fd_write); FD_SET(src->fd_except, &fd_except); - maxx = max(src->fd_read, maxx); - maxx = max(src->fd_write, maxx); - maxx = max(src->fd_except, maxx); + maxx = std::max(src->fd_read, maxx); + maxx = std::max(src->fd_write, maxx); + maxx = std::max(src->fd_except, maxx); } // We can't block indefinitely even when all sources are dry: @@ -166,11 +178,130 @@ finished: return soonest_src; } -void IOSourceRegistry::Register(IOSource* src, bool dont_count) +void Manager::Register(IOSource* src, bool dont_count) { + src->Init(); Source* s = new Source; s->src = src; if ( dont_count ) ++dont_counts; - return sources.push_back(s); + + sources.push_back(s); + } + +void Manager::Register(PktSrc* src) + { + pkt_srcs.push_back(src); + Register(src, false); + } + +static std::pair split_prefix(std::string path) + { + // See if the path comes with a prefix telling us which type of + // PktSrc to use. If not, choose default. + std::string prefix; + + std::string::size_type i = path.find(":"); + if ( i != std::string::npos ) + { + prefix = path.substr(0, i); + path = path.substr(++i, std::string::npos); + } + + else + prefix= DEFAULT_PREFIX; + + return std::make_pair(prefix, path); + } + +PktSrc* Manager::OpenPktSrc(const std::string& path, const std::string& filter, bool is_live) + { + std::pair t = split_prefix(path); + std::string prefix = t.first; + std::string npath = t.second; + + // Find the component providing packet sources of the requested prefix. + + pktsrc::SourceComponent* component = 0; + + std::list all_components = plugin_mgr->Components(); + + for ( std::list::const_iterator i = all_components.begin(); + i != all_components.end(); i++ ) + { + pktsrc::SourceComponent* c = *i; + + if ( c->Prefix() == prefix && + (( is_live && c->DoesLive() ) || + (! is_live && c->DoesTrace())) ) + { + component = c; + break; + } + } + + + if ( ! component ) + reporter->FatalError("type of packet source '%s' not recognized", prefix.c_str()); + + // Instantiate packet source. + + PktSrc* ps = (*component->Factory())(path, filter, is_live); + + if ( ! (ps && ps->IsOpen()) ) + { + string type = (is_live ? "interface" : "trace file"); + string pserr = ps->ErrorMsg() ? (string(" - ") + ps->ErrorMsg()) : ""; + + reporter->FatalError("%s: problem with %s %s%s\n", + prog, path.c_str(), type.c_str(), pserr.c_str()); + } + + DBG_LOG(DBG_PKTIO, "Created packet source of type %s for %s\n", component->Name(), path.c_str()); + + Register(ps); + return ps; + } + + +PktDumper* Manager::OpenPktDumper(const string& path, bool append) + { + std::pair t = split_prefix(path); + std::string prefix = t.first; + std::string npath = t.second; + + // Find the component providing packet dumpers of the requested prefix. + + pktsrc::DumperComponent* component = 0; + + std::list all_components = plugin_mgr->Components(); + + for ( std::list::const_iterator i = all_components.begin(); + i != all_components.end(); i++ ) + { + if ( (*i)->Prefix() == prefix ) + { + component = (*i); + break; + } + } + + if ( ! component ) + reporter->FatalError("type of packet dumper '%s' not recognized", prefix.c_str()); + + // Instantiate packet dumper. + + PktDumper* pd = (*component->Factory())(path, append); + + if ( ! (pd && pd->IsOpen()) ) + { + string pderr = pd->ErrorMsg().size() ? (string(" - ") + pd->ErrorMsg()) : ""; + + reporter->FatalError("%s: can't open write file \"%s\"%s\n", + prog, path.c_str(), pderr.c_str()); + } + + DBG_LOG(DBG_PKTIO, "Created packer dumper of type %s for %s\n", component->Name(), path.c_str()); + + return pd; } diff --git a/src/iosource/Manager.h b/src/iosource/Manager.h new file mode 100644 index 0000000000..5a3a58d798 --- /dev/null +++ b/src/iosource/Manager.h @@ -0,0 +1,75 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_MANAGER_H +#define IOSOURCE_MANAGER_H + +#include +#include + +namespace iosource { + +class IOSource; +class PktSrc; +class PktDumper; + +class Manager { +public: + Manager() { call_count = 0; dont_counts = 0; } + ~Manager(); + + // If dont_count is true, this source does not contribute to the + // number of IOSources returned by Size(). The effect is that + // if all sources but the non-counting ones have gone dry, + // processing will shut down. + void Register(IOSource* src, bool dont_count = false); + + // This may block for some time. + IOSource* FindSoonest(double* ts); + + int Size() const { return sources.size() - dont_counts; } + + typedef std::list PktSrcList; + const PktSrcList& GetPktSrcs() const { return pkt_srcs; } + + // Terminate IOSource processing immediately by removing all + // sources (and therefore returning a Size() of zero). + void Terminate() { RemoveAll(); } + + PktSrc* OpenPktSrc(const std::string& path, const std::string& filter, bool is_live); + PktDumper* OpenPktDumper(const std::string& path, bool append); + +protected: + void Register(PktSrc* src); + + // When looking for a source with something to process, + // every SELECT_FREQUENCY calls we will go ahead and + // block on a select(). + static const int SELECT_FREQUENCY = 25; + + // Microseconds to wait in an empty select if no source is ready. + static const int SELECT_TIMEOUT = 50; + + void RemoveAll(); + + unsigned int call_count; + int dont_counts; + + struct Source { + IOSource* src; + int fd_read; + int fd_write; + int fd_except; + }; + + typedef std::list SourceList; + SourceList sources; + + PktSrcList pkt_srcs; +}; + +} + +extern iosource::Manager* iosource_mgr; + +#endif + diff --git a/src/iosource/pktsrc/CMakeLists.txt b/src/iosource/pktsrc/CMakeLists.txt new file mode 100644 index 0000000000..07303b46a3 --- /dev/null +++ b/src/iosource/pktsrc/CMakeLists.txt @@ -0,0 +1,2 @@ + +add_subdirectory(pcap) diff --git a/src/iosource/pktsrc/Component.cc b/src/iosource/pktsrc/Component.cc new file mode 100644 index 0000000000..22c49feed0 --- /dev/null +++ b/src/iosource/pktsrc/Component.cc @@ -0,0 +1,130 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Component.h" + +#include "../Desc.h" + +using namespace iosource::pktsrc; + +SourceComponent::SourceComponent(const std::string& arg_name, const std::string& arg_prefix, InputType arg_type, factory_callback arg_factory) + : iosource::Component(plugin::component::PKTSRC, arg_name) + { + prefix = arg_prefix; + type = arg_type; + factory = arg_factory; + } + +SourceComponent::SourceComponent(const SourceComponent& other) + : iosource::Component(other) + { + prefix = other.prefix; + type = other.type; + factory = other.factory; + } + +SourceComponent::~SourceComponent() + { + } + +const std::string& SourceComponent::Prefix() const + { + return prefix; + } + +bool SourceComponent::DoesLive() const + { + return type == LIVE || type == BOTH; + } + +bool SourceComponent::DoesTrace() const + { + return type == TRACE || type == BOTH; + } + +SourceComponent::factory_callback SourceComponent::Factory() const + { + return factory; + } + + +void SourceComponent::Describe(ODesc* d) const + { + iosource::Component::Describe(d); + + d->Add(" (interface prefix: "); + d->Add(prefix); + d->Add(")"); + } + +SourceComponent& SourceComponent::operator=(const SourceComponent& other) + { + iosource::Component::operator=(other); + + if ( &other != this ) + { + prefix = other.prefix; + type = other.type; + factory = other.factory; + } + + return *this; + } + +DumperComponent::DumperComponent(const std::string& arg_name, const std::string& arg_prefix, factory_callback arg_factory) + : plugin::Component(plugin::component::PKTDUMPER) + { + name = arg_name; + factory = arg_factory; + prefix = arg_prefix; + } + +DumperComponent::DumperComponent(const DumperComponent& other) + : plugin::Component(other) + { + name = other.name; + factory = other.factory; + prefix = other.prefix; + } + +DumperComponent::~DumperComponent() + { + } + +DumperComponent::factory_callback DumperComponent::Factory() const + { + return factory; + } + +const char* DumperComponent::Name() const + { + return name.c_str(); + } + +const std::string& DumperComponent::Prefix() const + { + return prefix; + } + +void DumperComponent::Describe(ODesc* d) const + { + plugin::Component::Describe(d); + + d->Add(name); + d->Add(" (dumper prefix: "); + d->Add(prefix); + d->Add(")"); + } + +DumperComponent& DumperComponent::operator=(const DumperComponent& other) + { + plugin::Component::operator=(other); + + if ( &other != this ) + { + name = other.name; + factory = other.factory; + prefix = other.prefix; + } + + return *this; + } diff --git a/src/iosource/pktsrc/Component.h b/src/iosource/pktsrc/Component.h new file mode 100644 index 0000000000..2a62fb5503 --- /dev/null +++ b/src/iosource/pktsrc/Component.h @@ -0,0 +1,132 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PLUGIN_COMPONENT_H +#define IOSOURCE_PKTSRC_PLUGIN_COMPONENT_H + +#include "../Component.h" + +namespace iosource { + +class PktSrc; +class PktDumper; + +namespace pktsrc { + +/** + * Component description for plugins providing a PktSrc for packet input. + */ +class SourceComponent : public iosource::Component { +public: + enum InputType { LIVE, TRACE, BOTH }; + + typedef PktSrc* (*factory_callback)(const std::string& path, const std::string& filter, bool is_live); + + /** + * XXX + */ + SourceComponent(const std::string& name, const std::string& prefix, InputType type, factory_callback factory); + + /** + * Copy constructor. + */ + SourceComponent(const SourceComponent& other); + + /** + * Destructor. + */ + virtual ~SourceComponent(); + + /** + * Returns the prefix passes to the constructor. + */ + const std::string& Prefix() const; + + /** + * Returns true if packet source instantiated by the component handle + * live traffic. + */ + bool DoesLive() const; + + /** + * Returns true if packet source instantiated by the component handle + * offline traces. + */ + bool DoesTrace() const; + + /** + * Returns the source's factory function. + */ + factory_callback Factory() const; + + /** + * Generates a human-readable description of the component. This goes + * into the output of \c "bro -NN". + */ + virtual void Describe(ODesc* d) const; + + SourceComponent& operator=(const SourceComponent& other); + +private: + std::string prefix; + InputType type; + factory_callback factory; +}; + +/** + * Component description for plugins providing a PktDumper for packet output. + * + * PktDumpers aren't IOSurces but we locate them here to keep them along with + * the PktSrc. + */ +class DumperComponent : public plugin::Component { +public: + typedef PktDumper* (*factory_callback)(const std::string& path, bool append); + + /** + * XXX + */ + DumperComponent(const std::string& name, const std::string& prefix, factory_callback factory); + + /** + * Copy constructor. + */ + DumperComponent(const DumperComponent& other); + + /** + * Destructor. + */ + ~DumperComponent(); + + /** + * XXX + */ + virtual const char* Name() const; + + /** + * Returns the prefix passes to the constructor. + */ + const std::string& Prefix() const; + + /** + * Returns the source's factory function. + */ + factory_callback Factory() const; + + /** + * Generates a human-readable description of the component. This goes + * into the output of \c "bro -NN". + */ + virtual void Describe(ODesc* d) const; + + DumperComponent& operator=(const DumperComponent& other); + +private: + std::string name; + std::string prefix; + factory_callback factory; +}; + +} +} + +#endif diff --git a/src/iosource/pktsrc/PktDumper.cc b/src/iosource/pktsrc/PktDumper.cc new file mode 100644 index 0000000000..21ad79b87d --- /dev/null +++ b/src/iosource/pktsrc/PktDumper.cc @@ -0,0 +1,79 @@ + +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include + +#include "config.h" + +#include "PktDumper.h" + +using namespace iosource; + +PktDumper::PktDumper() + { + is_open = false; + errmsg = ""; + } + +PktDumper::~PktDumper() + { + } + +const std::string& PktDumper::Path() const + { + return props.path; + } + +bool PktDumper::IsOpen() const + { + return is_open; + } + +double PktDumper::OpenTime() const + { + return is_open ? props.open_time : 0; + } + +bool PktDumper::IsError() const + { + return errmsg.size(); + } + +const std::string& PktDumper::ErrorMsg() const + { + return errmsg; + } + +int PktDumper::HdrSize() const + { + return is_open ? props.hdr_size : -1; + } + +bool PktDumper::Record(const Packet* pkt) + { + return Dump(pkt); + } + +void PktDumper::Opened(const Properties& arg_props) + { + is_open = true; + props = arg_props; + DBG_LOG(DBG_PKTIO, "Opened dumper %s", props.path.c_str()); + } + +void PktDumper::Closed() + { + is_open = false; + props.path = ""; + DBG_LOG(DBG_PKTIO, "Closed dumper %s", props.path.c_str()); + } + +void PktDumper::Error(const std::string& msg) + { + errmsg = msg; + + DBG_LOG(DBG_PKTIO, "Error with dumper %s: %s", + IsOpen() ? props.path.c_str() : "", + msg.c_str()); + } diff --git a/src/iosource/pktsrc/PktDumper.h b/src/iosource/pktsrc/PktDumper.h new file mode 100644 index 0000000000..b8f3595e32 --- /dev/null +++ b/src/iosource/pktsrc/PktDumper.h @@ -0,0 +1,57 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PKTDUMPER_H +#define IOSOURCE_PKTSRC_PKTDUMPER_H + +#include "../IOSource.h" + +namespace iosource { + +class PktDumper { +public: + struct Packet { + const struct pcap_pkthdr* hdr; + const u_char* data; + }; + + PktDumper(); + virtual ~PktDumper(); + + const std::string& Path() const; + bool IsOpen() const; + double OpenTime() const; + bool IsError() const; + const std::string& ErrorMsg() const; + int HdrSize() const; + bool Record(const Packet* pkt); + + virtual void Close() = 0; + virtual void Open() = 0; + +protected: + // Methods to use by derived classed. + // + struct Properties { + std::string path; + int hdr_size; + double open_time; + }; + + void Opened(const Properties& props); + void Closed(); + void Error(const std::string& msg); + + // PktSrc interface for derived classes to implement. + + virtual bool Dump(const Packet* pkt) = 0; + +private: + bool is_open; + Properties props; + + std::string errmsg; +}; + +} + +#endif diff --git a/src/iosource/pktsrc/PktSrc.cc b/src/iosource/pktsrc/PktSrc.cc new file mode 100644 index 0000000000..703a2d634b --- /dev/null +++ b/src/iosource/pktsrc/PktSrc.cc @@ -0,0 +1,411 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include + +#include "config.h" + +#include "util.h" +#include "PktSrc.h" +#include "Hash.h" +#include "Net.h" +#include "Sessions.h" + +using namespace iosource; + +PktSrc::PktSrc() + { + have_packet = false; + errbuf = ""; + + next_sync_point = 0; + first_timestamp = 0.0; + first_wallclock = current_wallclock = 0; + } + +PktSrc::~PktSrc() + { + } + +const std::string& PktSrc::Path() const + { + static std::string not_open("not open"); + return IsOpen() ? props.path : not_open; + } + +const char* PktSrc::ErrorMsg() const + { + return errbuf.c_str(); + } + +int PktSrc::LinkType() const + { + return IsOpen() ? props.link_type : -1; + } + +int PktSrc::HdrSize() const + { + return IsOpen() ? props.hdr_size : -1; + } + +int PktSrc::SnapLen() const + { + return snaplen; // That's a global. Change? + } + +bool PktSrc::IsLive() const + { + return props.is_live; + } + +double PktSrc::CurrentPacketTimestamp() + { + return current_pseudo; + } + +double PktSrc::CurrentPacketWallClock() + { + // We stop time when we are suspended. + if ( net_is_processing_suspended() ) + current_wallclock = current_time(true); + + return current_wallclock; + } + +void PktSrc::Opened(const Properties& arg_props) + { + props = arg_props; + SetClosed(false); + + DBG_LOG(DBG_PKTIO, "Opened source %s", props.path.c_str()); + } + +void PktSrc::Closed() + { + SetClosed(true); + + DBG_LOG(DBG_PKTIO, "Closed source %s", props.path.c_str()); + } + +void PktSrc::Error(const std::string& msg) + { + // We don't report this immediately, Bro will ask us for the error + // once it notices we aren't open. + errbuf = msg; + DBG_LOG(DBG_PKTIO, "Error with source %s: %s", + IsOpen() ? props.path.c_str() : "", + msg.c_str()); + } + +void PktSrc::Info(const std::string& msg) + { + reporter->Info("%s", msg.c_str()); + } + +void PktSrc::Weird(const std::string& msg, const Packet* p) + { + sessions->Weird(msg.c_str(), p->hdr, p->data, 0); + } + +void PktSrc::InternalError(const std::string& msg) + { + reporter->InternalError("%s", msg.c_str()); + } + +void PktSrc::ContinueAfterSuspend() + { + current_wallclock = current_time(true); + } + +int PktSrc::GetLinkHeaderSize(int link_type) + { + switch ( link_type ) { + case DLT_NULL: + return 4; + + case DLT_EN10MB: + return 14; + + case DLT_FDDI: + return 13 + 8; // fddi_header + LLC + +#ifdef DLT_LINUX_SLL + case DLT_LINUX_SLL: + return 16; +#endif + + case DLT_PPP_SERIAL: // PPP_SERIAL + return 4; + + case DLT_RAW: + return 0; + } + + return -1; + } + +double PktSrc::CheckPseudoTime() + { + if ( ! IsOpen() ) + return 0; + + if ( ! ExtractNextPacketInternal() ) + return 0; + + if ( remote_trace_sync_interval ) + { + if ( next_sync_point == 0 || current_packet.ts >= next_sync_point ) + { + int n = remote_serializer->SendSyncPoint(); + next_sync_point = first_timestamp + + n * remote_trace_sync_interval; + remote_serializer->Log(RemoteSerializer::LogInfo, + fmt("stopping at packet %.6f, next sync-point at %.6f", + current_packet.ts, next_sync_point)); + + return 0; + } + } + + double pseudo_time = current_packet.ts - first_timestamp; + double ct = (current_time(true) - first_wallclock) * pseudo_realtime; + + return pseudo_time <= ct ? bro_start_time + pseudo_time : 0; + } + +void PktSrc::Init() + { + Open(); + } + +void PktSrc::Done() + { + Close(); + } + +void PktSrc::GetFds(int* read, int* write, int* except) + { + if ( pseudo_realtime ) + { + // Select would give erroneous results. But we simulate it + // by setting idle accordingly. + SetIdle(CheckPseudoTime() == 0); + return; + } + + if ( IsOpen() && props.selectable_fd >= 0 ) + *read = props.selectable_fd; + } + +double PktSrc::NextTimestamp(double* local_network_time) + { + if ( ! IsOpen() ) + return -1.0; + + if ( ! ExtractNextPacketInternal() ) + return -1.0; + + if ( pseudo_realtime ) + { + // Delay packet if necessary. + double packet_time = CheckPseudoTime(); + if ( packet_time ) + return packet_time; + + SetIdle(true); + return -1.0; + } + + return current_packet.ts; + } + +void PktSrc::Process() + { + if ( ! IsOpen() ) + return; + + if ( ! ExtractNextPacketInternal() ) + return; + + int pkt_hdr_size = props.hdr_size; + + // Unfortunately some packets on the link might have MPLS labels + // while others don't. That means we need to ask the link-layer if + // labels are in place. + bool have_mpls = false; + + int protocol = 0; + const u_char* data = current_packet.data; + + switch ( props.link_type ) { + case DLT_NULL: + { + protocol = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0]; + + // From the Wireshark Wiki: "AF_INET6, unfortunately, has + // different values in {NetBSD,OpenBSD,BSD/OS}, + // {FreeBSD,DragonFlyBSD}, and {Darwin/Mac OS X}, so an IPv6 + // packet might have a link-layer header with 24, 28, or 30 + // as the AF_ value." As we may be reading traces captured on + // platforms other than what we're running on, we accept them + // all here. + if ( protocol != AF_INET + && protocol != AF_INET6 + && protocol != 24 + && protocol != 28 + && protocol != 30 ) + { + Weird("non_ip_packet_in_null_transport", ¤t_packet); + data = 0; + return; + } + + break; + } + + case DLT_EN10MB: + { + // Get protocol being carried from the ethernet frame. + protocol = (data[12] << 8) + data[13]; + + switch ( protocol ) + { + // MPLS carried over the ethernet frame. + case 0x8847: + have_mpls = true; + break; + + // VLAN carried over the ethernet frame. + case 0x8100: + data += GetLinkHeaderSize(props.link_type); + data += 4; // Skip the vlan header + pkt_hdr_size = 0; + + // Check for 802.1ah (Q-in-Q) containing IP. + // Only do a second layer of vlan tag + // stripping because there is no + // specification that allows for deeper + // nesting. + if ( ((data[2] << 8) + data[3]) == 0x0800 ) + data += 4; + + break; + + // PPPoE carried over the ethernet frame. + case 0x8864: + data += GetLinkHeaderSize(props.link_type); + protocol = (data[6] << 8) + data[7]; + data += 8; // Skip the PPPoE session and PPP header + pkt_hdr_size = 0; + + if ( protocol != 0x0021 && protocol != 0x0057 ) + { + // Neither IPv4 nor IPv6. + Weird("non_ip_packet_in_pppoe_encapsulation", ¤t_packet); + data = 0; + return; + } + break; + } + + break; + } + + case DLT_PPP_SERIAL: + { + // Get PPP protocol. + protocol = (data[2] << 8) + data[3]; + + if ( protocol == 0x0281 ) + // MPLS Unicast + have_mpls = true; + + else if ( protocol != 0x0021 && protocol != 0x0057 ) + { + // Neither IPv4 nor IPv6. + Weird("non_ip_packet_in_ppp_encapsulation", ¤t_packet); + data = 0; + return; + } + break; + } + } + + if ( have_mpls ) + { + // Remove the data link layer + data += GetLinkHeaderSize(props.link_type); + + // Denote a header size of zero before the IP header + pkt_hdr_size = 0; + + // Skip the MPLS label stack. + bool end_of_stack = false; + + while ( ! end_of_stack ) + { + end_of_stack = *(data + 2) & 0x01; + data += 4; + } + } + + if ( pseudo_realtime ) + { + current_pseudo = CheckPseudoTime(); + net_packet_arrival(current_pseudo, current_packet.hdr, current_packet.data, pkt_hdr_size, this); + if ( ! first_wallclock ) + first_wallclock = current_time(true); + } + + else + net_packet_arrival(current_packet.ts, current_packet.hdr, current_packet.data, pkt_hdr_size, this); + + have_packet = 0; + DoneWithPacket(¤t_packet); + } + +const char* PktSrc::Tag() + { + return "PktSrc"; + } + +int PktSrc::ExtractNextPacketInternal() + { + if ( have_packet ) + return true; + + have_packet = false; + + // Don't return any packets if processing is suspended (except for the + // very first packet which we need to set up times). + if ( net_is_processing_suspended() && first_timestamp ) + { + SetIdle(true); + return 0; + } + + if ( pseudo_realtime ) + current_wallclock = current_time(true); + + if ( ExtractNextPacket(¤t_packet) ) + { + if ( ! first_timestamp ) + first_timestamp = current_packet.ts; + + have_packet = true; + return 1; + } + + if ( pseudo_realtime && using_communication && ! IsOpen() ) + { + // Source has gone dry, we're done. + if ( remote_trace_sync_interval ) + remote_serializer->SendFinalSyncPoint(); + else + remote_serializer->Terminate(); + } + + SetIdle(true); + return 0; + } + diff --git a/src/iosource/pktsrc/PktSrc.h b/src/iosource/pktsrc/PktSrc.h new file mode 100644 index 0000000000..3c3436bb19 --- /dev/null +++ b/src/iosource/pktsrc/PktSrc.h @@ -0,0 +1,140 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PKTSRC_H +#define IOSOURCE_PKTSRC_PKTSRC_H + +#include "../IOSource.h" + +struct pcap_pkthdr; + +namespace iosource { + +class PktSrc : public IOSource { +public: + struct Stats { + unsigned int received; // pkts received (w/o drops) + unsigned int dropped; // pkts dropped + unsigned int link; // total packets on link + // (not always not available) + // + Stats() { received = dropped = link = 0; } + }; + + PktSrc(); + virtual ~PktSrc(); + + const std::string& Path() const; + const std::string& Filter() const; + bool IsLive() const; + int LinkType() const; + const char* ErrorMsg() const; + int HdrSize() const; + int SnapLen() const; + + // Only valid in pseudo-realtime mode. + double CurrentPacketTimestamp(); + double CurrentPacketWallClock(); + + // Signal packet source that processing was suspended and is now + // going to be continued. + void ContinueAfterSuspend(); + + virtual void Statistics(Stats* stats) = 0; + + // Returns the packet last processed; false if there is no + // current packet available. + virtual bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) = 0; + + // Precompiles a filter and associates the given index with it. + // Returns true on success, 0 if a problem occurred or filtering is + // not supported. + virtual int PrecompileFilter(int index, const std::string& filter) = 0; + + // Activates the filter with the given index. Returns true on + // success, 0 if a problem occurred or the filtering is not + // supported. + virtual int SetFilter(int index) = 0; + + static int GetLinkHeaderSize(int link_type); + +#if 0 + PktSrc_Filter_Type FilterType() const { return filter_type; } + + void AddSecondaryTablePrograms(); + const secondary_program_list& ProgramTable() const + { return program_list; } +#endif + +protected: + // Methods to use by derived classes. + + struct Properties { + std::string path; + std::string filter; // Maybe different than what's passed in if not (directly) supported. + int selectable_fd; + int link_type; + int hdr_size; + bool is_live; + }; + + struct Packet { + double ts; + const struct ::pcap_pkthdr* hdr; + const u_char* data; + }; + + void Opened(const Properties& props); + void Closed(); + void Info(const std::string& msg); + void Error(const std::string& msg); + void Weird(const std::string& msg, const Packet* pkt); + void InternalError(const std::string& msg); + + // PktSrc interface for derived classes to implement. + + virtual void Open() = 0; + virtual void Close() = 0; + // Returns 1 on success, 0 on time-out/gone dry. + virtual int ExtractNextPacket(Packet* pkt) = 0; + virtual void DoneWithPacket(Packet* pkt) = 0; + +private: + // Checks if the current packet has a pseudo-time <= current_time. + // If yes, returns pseudo-time, otherwise 0. + double CheckPseudoTime(); + + // XXX + int ExtractNextPacketInternal(); + + // IOSource interface implementation. + virtual void Init(); + virtual void Done(); + virtual void GetFds(int* read, int* write, int* except); + virtual double NextTimestamp(double* local_network_time); + virtual void Process(); + virtual const char* Tag(); + + Properties props; + + bool have_packet; + Packet current_packet; + + // Only set in pseudo-realtime mode. + double first_timestamp; + double first_wallclock; + double current_wallclock; + double current_pseudo; + double next_sync_point; // For trace synchronziation in pseudo-realtime + + std::string errbuf; + +#if 0 + PktSrc_Filter_Type filter_type; // normal path or secondary path + secondary_program_list program_list; +#endif +}; + +} + + +#endif diff --git a/src/iosource/pktsrc/old-2ndary-code.h b/src/iosource/pktsrc/old-2ndary-code.h new file mode 100644 index 0000000000..0b47cccdc5 --- /dev/null +++ b/src/iosource/pktsrc/old-2ndary-code.h @@ -0,0 +1,69 @@ +// Whether a PktSrc object is used by the normal filter structure or the +// secondary-path structure. +typedef enum { + TYPE_FILTER_NORMAL, // the normal filter + TYPE_FILTER_SECONDARY, // the secondary-path filter +} PktSrc_Filter_Type; + +// {filter,event} tuples conforming the secondary path. +class SecondaryEvent { +public: + SecondaryEvent(const char* arg_filter, Func* arg_event) + { + filter = arg_filter; + event = arg_event; + } + + const char* Filter() { return filter; } + Func* Event() { return event; } + +private: + const char* filter; + Func* event; +}; + +declare(PList,SecondaryEvent); +typedef PList(SecondaryEvent) secondary_event_list; + +class SecondaryPath { +public: + SecondaryPath(); + ~SecondaryPath(); + + secondary_event_list& EventTable() { return event_list; } + const char* Filter() { return filter; } + +private: + secondary_event_list event_list; + // OR'ed union of all SecondaryEvent filters + char* filter; +}; + +// Main secondary-path object. +extern SecondaryPath* secondary_path; + +// {program, {filter,event}} tuple table. +class SecondaryProgram { +public: + SecondaryProgram(BPF_Program* arg_program, SecondaryEvent* arg_event) + { + program = arg_program; + event = arg_event; + } + + ~SecondaryProgram(); + + BPF_Program* Program() { return program; } + SecondaryEvent* Event() { return event; } + +private: + // Associated program. + BPF_Program *program; + + // Event that is run in case the program is matched. + SecondaryEvent* event; +}; + +declare(PList,SecondaryProgram); +typedef PList(SecondaryProgram) secondary_program_list; + diff --git a/src/BPF_Program.cc b/src/iosource/pktsrc/pcap/BPF_Program.cc similarity index 100% rename from src/BPF_Program.cc rename to src/iosource/pktsrc/pcap/BPF_Program.cc diff --git a/src/BPF_Program.h b/src/iosource/pktsrc/pcap/BPF_Program.h similarity index 100% rename from src/BPF_Program.h rename to src/iosource/pktsrc/pcap/BPF_Program.h diff --git a/src/iosource/pktsrc/pcap/CMakeLists.txt b/src/iosource/pktsrc/pcap/CMakeLists.txt new file mode 100644 index 0000000000..b43d51b0ca --- /dev/null +++ b/src/iosource/pktsrc/pcap/CMakeLists.txt @@ -0,0 +1,8 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro Pcap) +bro_plugin_cc(Source.cc Dumper.cc BPF_Program.cc Plugin.cc) +bro_plugin_end() diff --git a/src/iosource/pktsrc/pcap/Dumper.cc b/src/iosource/pktsrc/pcap/Dumper.cc new file mode 100644 index 0000000000..ad1ac2fa73 --- /dev/null +++ b/src/iosource/pktsrc/pcap/Dumper.cc @@ -0,0 +1,111 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include + +#include "Dumper.h" +#include "../PktSrc.h" + +using namespace iosource::pktsrc; + +PcapDumper::PcapDumper(const std::string& path, bool arg_append) + { + append = arg_append; + props.path = path; + dumper = 0; + pd = 0; + } + +PcapDumper::~PcapDumper() + { + } + +void PcapDumper::Open() + { + int linktype = -1; + + pd = pcap_open_dead(DLT_EN10MB, 8192); + if ( ! pd ) + { + Error("error for pcap_open_dead"); + return; + } + + if ( props.path.empty() ) + { + Error("no filename given"); + return; + } + + struct stat s; + int exists = 0; + + if ( append ) + { + // See if output file already exists (and is non-empty). + exists = stat(props.path.c_str(), &s); ; + + if ( exists < 0 && errno != ENOENT ) + { + Error(fmt("can't stat file %s: %s", props.path.c_str(), strerror(errno))); + return; + } + } + + if ( ! append || exists < 0 || s.st_size == 0 ) + { + // Open new file. + dumper = pcap_dump_open(pd, props.path.c_str()); + if ( ! dumper ) + { + Error(pcap_geterr(pd)); + return; + } + } + + else + { + // Old file and we need to append, which, unfortunately, + // is not supported by libpcap. So, we have to hack a + // little bit, knowing that pcap_dumpter_t is, in fact, + // a FILE ... :-( + dumper = (pcap_dumper_t*) fopen(props.path.c_str(), "a"); + if ( ! dumper ) + { + Error(fmt("can't open dump %s: %s", props.path.c_str(), strerror(errno))); + return; + } + } + + props.open_time = network_time; + props.hdr_size = PktSrc::GetLinkHeaderSize(pcap_datalink(pd)); + Opened(props); + } + +void PcapDumper::Close() + { + if ( ! dumper ) + return; + + pcap_dump_close(dumper); + pcap_close(pd); + dumper = 0; + pd = 0; + + Closed(); + } + +bool PcapDumper::Dump(const Packet* pkt) + { + if ( ! dumper ) + return false; + + pcap_dump((u_char*) dumper, pkt->hdr, pkt->data); + + return true; + } + +iosource::PktDumper* PcapDumper::Instantiate(const std::string& path, bool append) + { + return new PcapDumper(path, append); + } diff --git a/src/iosource/pktsrc/pcap/Dumper.h b/src/iosource/pktsrc/pcap/Dumper.h new file mode 100644 index 0000000000..c2762a2b04 --- /dev/null +++ b/src/iosource/pktsrc/pcap/Dumper.h @@ -0,0 +1,40 @@ + +#ifndef IOSOURCE_PKTSRC_PCAP_DUMPER_H +#define IOSOURCE_PKTSRC_PCAP_DUMPER_H + +extern "C" { +#include +} + +#include "../PktDumper.h" + +namespace iosource { +namespace pktsrc { + +class PcapDumper : public PktDumper { +public: + PcapDumper(const std::string& path, bool append); + virtual ~PcapDumper(); + + static PktDumper* Instantiate(const std::string& path, bool appen); + +protected: + // PktDumper interface. + virtual void Open(); + virtual void Close(); + virtual bool Dump(const Packet* pkt); + +private: + Properties props; + + bool append; + pcap_dumper_t* dumper; + pcap_t* pd; +}; + +} +} + +#endif + + diff --git a/src/iosource/pktsrc/pcap/Plugin.cc b/src/iosource/pktsrc/pcap/Plugin.cc new file mode 100644 index 0000000000..307f2da99f --- /dev/null +++ b/src/iosource/pktsrc/pcap/Plugin.cc @@ -0,0 +1,12 @@ + +#include "plugin/Plugin.h" +#include "iosource/pktsrc/Component.h" + +#include "Source.h" +#include "Dumper.h" + +BRO_PLUGIN_BEGIN(Bro, Pcap) + BRO_PLUGIN_DESCRIPTION("Packet I/O via libpcap"); + BRO_PLUGIN_PKTSRC("PcapReader", "pcap", SourceComponent::BOTH, PcapSource); + BRO_PLUGIN_PKTDUMPER("PcapTraceWriter", "pcap", PcapDumper); +BRO_PLUGIN_END diff --git a/src/iosource/pktsrc/pcap/Source.cc b/src/iosource/pktsrc/pcap/Source.cc new file mode 100644 index 0000000000..86c0273adf --- /dev/null +++ b/src/iosource/pktsrc/pcap/Source.cc @@ -0,0 +1,343 @@ + +#include "config.h" + +#include "Source.h" + +#ifdef HAVE_PCAP_INT_H +#include +#endif + +using namespace iosource::pktsrc; + +PcapSource::~PcapSource() + { + Close(); + } + +PcapSource::PcapSource(const std::string& path, const std::string& filter, bool is_live) + { + props.path = path; + props.filter = filter; + props.is_live = is_live; + last_data = 0; + } + +void PcapSource::Open() + { + if ( props.is_live ) + OpenLive(); + else + OpenOffline(); + } + +void PcapSource::Close() + { + if ( ! pd ) + return; + + BPF_Program* code; + IterCookie* cookie = filters.InitForIteration(); + while ( (code = filters.NextEntry(cookie)) ) + delete code; + + filters.Clear(); + + pcap_close(pd); + pd = 0; + last_data = 0; + + Closed(); + } + +void PcapSource::OpenLive() + { + char errbuf[PCAP_ERRBUF_SIZE]; + char tmp_errbuf[PCAP_ERRBUF_SIZE]; + +#if 0 + filter_type = ft; +#endif + + // Determine interface if not specified. + if ( props.path.empty() ) + props.path = pcap_lookupdev(tmp_errbuf); + + if ( props.path.empty() ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "pcap_lookupdev: %s", tmp_errbuf); + Error(errbuf); + return; + } + + // Determine network and netmask. + uint32 net; + if ( pcap_lookupnet(props.path.c_str(), &net, &netmask, tmp_errbuf) < 0 ) + { + // ### The lookup can fail if no address is assigned to + // the interface; and libpcap doesn't have any useful notion + // of error codes, just error std::strings - how bogus - so we + // just kludge around the error :-(. + // sprintf(errbuf, "pcap_lookupnet %s", tmp_errbuf); + // return; + netmask = 0xffffff00; + } + + // We use the smallest time-out possible to return almost immediately if + // no packets are available. (We can't use set_nonblocking() as it's + // broken on FreeBSD: even when select() indicates that we can read + // something, we may get nothing if the store buffer hasn't filled up + // yet.) + pd = pcap_open_live(props.path.c_str(), SnapLen(), 1, 1, tmp_errbuf); + + if ( ! pd ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "pcap_open_live: %s", tmp_errbuf); + Error(errbuf); + return; + } + + // ### This needs autoconf'ing. +#ifdef HAVE_PCAP_INT_H + Info("pcap bufsize = %d\n", ((struct pcap *) pd)->bufsize); +#endif + +#ifdef HAVE_LINUX + if ( pcap_setnonblock(pd, 1, tmp_errbuf) < 0 ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "pcap_setnonblock: %s", tmp_errbuf); + Error(errbuf); + pcap_close(pd); + return; + } +#endif + + props.selectable_fd = pcap_fileno(pd); + + if ( PrecompileFilter(0, props.filter) && SetFilter(0) ) + { + SetHdrSize(); + + if ( ! pd ) + // Was closed, couldn't get header size. + return; + + Info(fmt("listening on %s, capture length %d bytes\n", props.path.c_str(), SnapLen())); + } + else + Close(); + + props.is_live = true; + Opened(props); + } + +void PcapSource::OpenOffline() + { + char errbuf[PCAP_ERRBUF_SIZE]; + char tmp_errbuf[PCAP_ERRBUF_SIZE]; + +#if 0 + filter_type = ft; +#endif + + pd = pcap_open_offline(props.path.c_str(), errbuf); + + if ( ! pd ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "pcap_open_offline: %s", tmp_errbuf); + Error(errbuf); + return; + } + + if ( PrecompileFilter(0, props.filter) && SetFilter(0) ) + { + SetHdrSize(); + + if ( ! pd ) + // Was closed, unknown link layer type. + return; + + // We don't put file sources into non-blocking mode as + // otherwise we would not be able to identify the EOF. + + props.selectable_fd = fileno(pcap_file(pd)); + + if ( props.selectable_fd < 0 ) + InternalError("OS does not support selectable pcap fd"); + } + + else + Close(); + + props.is_live = false; + Opened(props); + } + +int PcapSource::ExtractNextPacket(Packet* pkt) + { + const u_char* data = pcap_next(pd, ¤t_hdr); + + if ( ! data ) + { + // Source has gone dry. If it's a network interface, this just means + // it's timed out. If it's a file, though, then the file has been + // exhausted. + if ( ! props.is_live ) + Close(); + + return 0; + } + + pkt->ts = current_hdr.ts.tv_sec + double(current_hdr.ts.tv_usec) / 1e6; + pkt->hdr = ¤t_hdr; + pkt->data = last_data = data; + + if ( current_hdr.len == 0 || current_hdr.caplen == 0 ) + { + Weird("empty_pcap_header", pkt); + return 0; + } + + last_hdr = current_hdr; + last_data = data; + ++stats.received; + return 1; + } + +void PcapSource::DoneWithPacket(Packet* pkt) + { + // Nothing to do. + } + +int PcapSource::PrecompileFilter(int index, const std::string& filter) + { + char errbuf[PCAP_ERRBUF_SIZE]; + + // Compile filter. + BPF_Program* code = new BPF_Program(); + + if ( ! code->Compile(pd, filter.c_str(), netmask, errbuf, sizeof(errbuf)) ) + { + PcapError(); + delete code; + return 0; + } + + // Store it in hash. + HashKey* hash = new HashKey(HashKey(bro_int_t(index))); + BPF_Program* oldcode = filters.Lookup(hash); + if ( oldcode ) + delete oldcode; + + filters.Insert(hash, code); + delete hash; + + return 1; + } + +int PcapSource::SetFilter(int index) + { + char errbuf[PCAP_ERRBUF_SIZE]; + +#if 0 + // We don't want load-level filters for the secondary path. + if ( filter_type == TYPE_FILTER_SECONDARY && index > 0 ) + return 1; +#endif + + HashKey* hash = new HashKey(HashKey(bro_int_t(index))); + BPF_Program* code = filters.Lookup(hash); + delete hash; + + if ( ! code ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "No precompiled pcap filter for index %d", + index); + Error(errbuf); + return 0; + } + + if ( pcap_setfilter(pd, code->GetProgram()) < 0 ) + { + PcapError(); + return 0; + } + +#ifndef HAVE_LINUX + // Linux doesn't clear counters when resetting filter. + stats.received = stats.dropped = stats.link = 0; +#endif + + return 1; + } + +void PcapSource::Statistics(Stats* s) + { + char errbuf[PCAP_ERRBUF_SIZE]; + + if ( ! props.is_live ) + s->received = s->dropped = s->link = 0; + + else + { + struct pcap_stat pstat; + if ( pcap_stats(pd, &pstat) < 0 ) + { + PcapError(); + s->received = s->dropped = s->link = 0; + } + + else + { + s->dropped = pstat.ps_drop; + s->link = pstat.ps_recv; + } + } + + s->received = stats.received; + + if ( ! props.is_live ) + s->dropped = 0; + } + +bool PcapSource::GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) + { + if ( ! last_data ) + return false; + + *hdr = &last_hdr; + *pkt = last_data; + return true; + } + +void PcapSource::PcapError() + { + assert(pd); + Error(fmt("pcap_error: %s", pcap_geterr(pd))); + Close(); + } + +void PcapSource::SetHdrSize() + { + char errbuf[PCAP_ERRBUF_SIZE]; + + props.link_type = pcap_datalink(pd); + props.hdr_size = GetLinkHeaderSize(props.link_type); + + if ( props.hdr_size < 0 ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "unknown data link type 0x%x", props.link_type); + Error(errbuf); + Close(); + } + } + +iosource::PktSrc* PcapSource::Instantiate(const std::string& path, const std::string& filter, bool is_live) + { + return new PcapSource(path, filter, is_live); + } diff --git a/src/iosource/pktsrc/pcap/Source.h b/src/iosource/pktsrc/pcap/Source.h new file mode 100644 index 0000000000..9f1d7c7eb8 --- /dev/null +++ b/src/iosource/pktsrc/pcap/Source.h @@ -0,0 +1,60 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PCAP_SOURCE_H +#define IOSOURCE_PKTSRC_PCAP_SOURCE_H + +extern "C" { +#include +} + +#include "../PktSrc.h" +#include "BPF_Program.h" +#include "Dict.h" + +declare(PDict,BPF_Program); + +namespace iosource { +namespace pktsrc { + +class PcapSource : public iosource::PktSrc { +public: + // XXX + PcapSource(const std::string& path, const std::string& filter, bool is_live); + + virtual ~PcapSource(); + + static PktSrc* Instantiate(const std::string& path, const std::string& filter, bool is_live); + +protected: + // PktSrc interface. + virtual void Open(); + virtual void Close(); + virtual int ExtractNextPacket(Packet* pkt); + virtual void DoneWithPacket(Packet* pkt); + virtual int PrecompileFilter(int index, const std::string& filter); + virtual int SetFilter(int index); + virtual void Statistics(Stats* stats); + virtual bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); + +private: + void OpenLive(); + void OpenOffline(); + void PcapError(); + void SetHdrSize(); + + Properties props; + Stats stats; + + pcap_t *pd; + uint32 netmask; + PDict(BPF_Program) filters; + + struct pcap_pkthdr current_hdr; + struct pcap_pkthdr last_hdr; + const u_char* last_data; +}; + +} +} + +#endif diff --git a/src/main.cc b/src/main.cc index 53a0cb20ee..cf4bd423b6 100644 --- a/src/main.cc +++ b/src/main.cc @@ -62,6 +62,7 @@ extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "plugin/Manager.h" #include "file_analysis/Manager.h" #include "broxygen/Manager.h" +#include "iosource/Manager.h" #include "binpac_bro.h" @@ -100,6 +101,7 @@ plugin::Manager* plugin_mgr = 0; analyzer::Manager* analyzer_mgr = 0; file_analysis::Manager* file_mgr = 0; broxygen::Manager* broxygen_mgr = 0; +iosource::Manager* iosource_mgr = 0; Stmt* stmts; EventHandlerPtr net_done = 0; RuleMatcher* rule_matcher = 0; @@ -116,7 +118,10 @@ int signal_val = 0; int optimize = 0; int do_notice_analysis = 0; int rule_bench = 0; +int generate_documentation = 0; +#if 0 SecondaryPath* secondary_path = 0; +#endif extern char version[]; char* command_line_policy = 0; vector params; @@ -379,7 +384,9 @@ void terminate_bro() delete event_serializer; delete state_serializer; delete event_registry; +#if 0 delete secondary_path; +#endif delete remote_serializer; delete analyzer_mgr; delete log_mgr; @@ -845,6 +852,7 @@ int main(int argc, char** argv) input_mgr = new input::Manager(); plugin_mgr = new plugin::Manager(); file_mgr = new file_analysis::Manager(); + iosource_mgr = new iosource::Manager(); plugin_mgr->InitPreScript(); analyzer_mgr->InitPreScript(); @@ -967,13 +975,15 @@ int main(int argc, char** argv) snaplen = internal_val("snaplen")->AsCount(); +#if 0 // Initialize the secondary path, if it's needed. secondary_path = new SecondaryPath(); +#endif if ( dns_type != DNS_PRIME ) net_init(interfaces, read_files, netflows, flow_files, writefile, "", - secondary_path->Filter(), do_watchdog); + "", do_watchdog); BroFile::SetDefaultRotation(log_rotate_interval, log_max_size); @@ -1132,9 +1142,9 @@ int main(int argc, char** argv) have_pending_timers = ! reading_traces && timer_mgr->Size() > 0; - io_sources.Register(thread_mgr, true); + iosource_mgr->Register(thread_mgr, true); - if ( io_sources.Size() > 0 || + if ( iosource_mgr->Size() > 0 || have_pending_timers || BifConst::exit_only_after_terminate ) { diff --git a/src/plugin/Component.cc b/src/plugin/Component.cc index ee18d55cdf..94de2c5446 100644 --- a/src/plugin/Component.cc +++ b/src/plugin/Component.cc @@ -43,6 +43,18 @@ void Component::Describe(ODesc* d) const d->Add("File Analyzer"); break; + case component::IOSOURCE: + d->Add("I/O Source"); + break; + + case component::PKTSRC: + d->Add("Packet Source"); + break; + + case component::PKTDUMPER: + d->Add("Packet Dumper"); + break; + default: reporter->InternalWarning("unknown component type in plugin::Component::Describe"); d->Add(""); diff --git a/src/plugin/Component.h b/src/plugin/Component.h index ad02dc7e4b..04248f54a9 100644 --- a/src/plugin/Component.h +++ b/src/plugin/Component.h @@ -3,6 +3,8 @@ #ifndef PLUGIN_COMPONENT_H #define PLUGIN_COMPONENT_H +#include + class ODesc; namespace plugin { @@ -16,7 +18,10 @@ enum Type { READER, /// An input reader (not currently used). WRITER, /// An logging writer (not currenly used). ANALYZER, /// A protocol analyzer. - FILE_ANALYZER /// A file analyzer. + FILE_ANALYZER, /// A file analyzer. + IOSOURCE, /// An I/O source, excluding packet sources. + PKTSRC, /// A packet source. + PKTDUMPER /// A packet dumper. }; } diff --git a/src/plugin/Macros.h b/src/plugin/Macros.h index 9362642e91..c2f8a2ed7a 100644 --- a/src/plugin/Macros.h +++ b/src/plugin/Macros.h @@ -130,4 +130,31 @@ #define BRO_PLUGIN_SUPPORT_ANALYZER(tag) \ AddComponent(new ::analyzer::Component(tag, 0)); +/** + * Defines a component implementing an IOSource (excluding packet sources). + * + * XXXX + * + */ +#define BRO_PLUGIN_IOSOURCE(tag, cls) \ + AddComponent(new ::iosource::pktsrc::Component(tag, iosource::cls::Instantiate)); + +/** + * Defines a component implementing a PktSrc. + * + * XXXX + * + */ +#define BRO_PLUGIN_PKTSRC(tag, prefix, type, cls) \ + AddComponent(new ::iosource::pktsrc::SourceComponent(tag, prefix, iosource::pktsrc::type, iosource::pktsrc::cls::Instantiate)); + +/** + * Defines a component implementing a PktDumper. + * + * XXXX + * + */ +#define BRO_PLUGIN_PKTDUMPER(tag, prefix, cls) \ + AddComponent(new ::iosource::pktsrc::DumperComponent(tag, prefix, iosource::pktsrc::cls::Instantiate)); + #endif diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 4491cd42b5..fdc422bd1f 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -11,7 +11,7 @@ Manager::Manager() did_process = true; next_beat = 0; terminating = false; - idle = true; + SetIdle(true); } Manager::~Manager() @@ -47,8 +47,8 @@ void Manager::Terminate() all_threads.clear(); msg_threads.clear(); - idle = true; - closed = true; + SetIdle(true); + SetClosed(true); terminating = false; } @@ -56,7 +56,7 @@ void Manager::AddThread(BasicThread* thread) { DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name()); all_threads.push_back(thread); - idle = false; + SetIdle(false); } void Manager::AddMsgThread(MsgThread* thread) diff --git a/src/threading/Manager.h b/src/threading/Manager.h index e839749a91..c94cc41aaa 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -4,7 +4,7 @@ #include -#include "IOSource.h" +#include "iosource/IOSource.h" #include "BasicThread.h" #include "MsgThread.h" @@ -21,7 +21,7 @@ namespace threading { * their outgoing message queue on a regular basis and feeds data sent into * the rest of Bro. It also triggers the regular heartbeats. */ -class Manager : public IOSource +class Manager : public iosource::IOSource { public: /** diff --git a/src/util.cc b/src/util.cc index ad55e3f75e..358e88b0e2 100644 --- a/src/util.cc +++ b/src/util.cc @@ -43,6 +43,7 @@ #include "NetVar.h" #include "Net.h" #include "Reporter.h" +#include "iosource/Manager.h" /** * Return IP address without enclosing brackets and any leading 0x. @@ -1351,11 +1352,13 @@ double current_time(bool real) double t = double(tv.tv_sec) + double(tv.tv_usec) / 1e6; - if ( ! pseudo_realtime || real || pkt_srcs.length() == 0 ) + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + if ( ! pseudo_realtime || real || pkt_srcs.empty() ) return t; // This obviously only works for a single source ... - PktSrc* src = pkt_srcs[0]; + iosource::PktSrc* src = pkt_srcs.front(); if ( net_is_processing_suspended() ) return src->CurrentPacketTimestamp(); From 61ee2b9172a10c3f0e483b90a0947fd54f3e989f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 11 Dec 2013 20:55:17 -0800 Subject: [PATCH 002/106] Fixing rebase relicts. --- src/CMakeLists.txt | 3 --- src/Net.h | 4 ---- 2 files changed, 7 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1aede44934..516426ea02 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -151,7 +151,6 @@ list(APPEND BINPAC_OUTPUTS "${BINPAC_OUTPUT_CC}") set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE) set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE) -add_subdirectory(iosource) add_subdirectory(analyzer) add_subdirectory(file_analysis) add_subdirectory(probabilistic) @@ -251,8 +250,6 @@ set(bro_SRCS Anon.cc Attr.cc Base64.cc - BroDoc.cc - BroDocObj.cc Brofiler.cc BroString.cc CCL.cc diff --git a/src/Net.h b/src/Net.h index 421bee5911..b9320181f9 100644 --- a/src/Net.h +++ b/src/Net.h @@ -4,11 +4,7 @@ #define net_h #include "net_util.h" -<<<<<<< HEAD #include "util.h" -#include "BPF_Program.h" -======= ->>>>>>> 5493253... Checkpoint. #include "List.h" #include "FlowSrc.h" #include "Func.h" From 9a9451af00e5243ed29a2c305761e33ad9829577 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 27 Jan 2014 15:19:32 -0800 Subject: [PATCH 003/106] A series of updates and tweaks to the new PktSrc interface. --- configure | 4 ++ src/Net.cc | 91 ++++++---------------------- src/iosource/Manager.cc | 6 +- src/iosource/pktsrc/CMakeLists.txt | 1 + src/iosource/pktsrc/Component.cc | 95 ++++++++++++++++++++++++++---- src/iosource/pktsrc/Component.h | 28 ++++++--- src/iosource/pktsrc/PktSrc.cc | 9 +++ src/iosource/pktsrc/PktSrc.h | 10 ++-- src/iosource/pktsrc/pcap/Plugin.cc | 4 +- src/iosource/pktsrc/pcap/Source.cc | 35 +++++++---- src/iosource/pktsrc/pcap/Source.h | 4 -- 11 files changed, 169 insertions(+), 118 deletions(-) diff --git a/configure b/configure index ba9bf58301..5a4596328c 100755 --- a/configure +++ b/configure @@ -64,6 +64,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --with-dataseries=PATH path to DataSeries and Lintel libraries --with-xml2=PATH path to libxml2 installation (for DataSeries) --with-curl=PATH path to libcurl install root (for ElasticSearch) + --with-netmap=PATH path to netmap distribution Packaging Options (for developers): --binary-package toggle special logic for binary packaging @@ -247,6 +248,9 @@ while [ $# -ne 0 ]; do --with-curl=*) append_cache_entry LibCURL_ROOT_DIR PATH $optarg ;; + --with-netmap=*) + append_cache_entry NETMAP_ROOT_DIR PATH $optarg + ;; --binary-package) append_cache_entry BINARY_PACKAGING_MODE BOOL true ;; diff --git a/src/Net.cc b/src/Net.cc index 629f2398d0..a1278c709b 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -167,90 +167,33 @@ void net_init(name_list& interfaces, name_list& readfiles, reading_traces = 1; for ( int i = 0; i < readfiles.length(); ++i ) - iosource_mgr->OpenPktSrc(readfiles[i], filter, false); + { + iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(readfiles[i], filter, false); + assert(ps); + + if ( ps->ErrorMsg() ) + reporter->FatalError("%s: problem with trace file %s - %s\n", + prog, readfiles[i], + ps->ErrorMsg()); + } } -#if 0 - if ( secondary_filter ) - { - // We use a second PktFileSrc for the - // secondary path. - PktFileSrc* ps = new PktFileSrc(readfiles[i], - secondary_filter, - TYPE_FILTER_SECONDARY); - - if ( ! ps->IsOpen() ) - reporter->FatalError("%s: problem with trace file %s - %s\n", - prog, readfiles[i], - ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } - - ps->AddSecondaryTablePrograms(); - } - - for ( int i = 0; i < flowfiles.length(); ++i ) - { - FlowFileSrc* fs = new FlowFileSrc(flowfiles[i]); - - if ( ! fs->IsOpen() ) - reporter->FatalError("%s: problem with netflow file %s - %s\n", - prog, flowfiles[i], fs->ErrorMsg()); - else - { - io_sources.Register(fs); - } - } -#endif - - else if ((interfaces.length() > 0 || netflows.length() > 0)) + else if ( interfaces.length() > 0 ) { reading_live = 1; reading_traces = 0; for ( int i = 0; i < interfaces.length(); ++i ) - iosource_mgr->OpenPktSrc(interfaces[i], filter, true); - } -#if 0 - - if ( secondary_filter ) - { - iosource::PktSrc* ps; - ps = new PktInterfaceSrc(interfaces[i], - filter, TYPE_FILTER_SECONDARY); - - if ( ! ps->IsOpen() ) - reporter->Error("%s: problem with interface %s - %s\n", - prog, interfaces[i], - ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } - - ps->AddSecondaryTablePrograms(); - } - } - - for ( int i = 0; i < netflows.length(); ++i ) { - FlowSocketSrc* fs = new FlowSocketSrc(netflows[i]); + iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(interfaces[i], filter, true); + assert(ps); - if ( ! fs->IsOpen() ) - { - reporter->Error("%s: problem with netflow socket %s - %s\n", - prog, netflows[i], fs->ErrorMsg()); - delete fs; - } - - else - io_sources.Register(fs); + if ( ps->ErrorMsg() ) + reporter->FatalError("%s: problem with interface %s - %s\n", + prog, interfaces[i], + ps->ErrorMsg()); } -#endif + } else // have_pending_timers = 1, possibly. We don't set diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index 06608c7afd..6c01e5e57b 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -231,7 +231,7 @@ PktSrc* Manager::OpenPktSrc(const std::string& path, const std::string& filter, { pktsrc::SourceComponent* c = *i; - if ( c->Prefix() == prefix && + if ( c->HandlesPrefix(prefix) && (( is_live && c->DoesLive() ) || (! is_live && c->DoesTrace())) ) { @@ -242,7 +242,7 @@ PktSrc* Manager::OpenPktSrc(const std::string& path, const std::string& filter, if ( ! component ) - reporter->FatalError("type of packet source '%s' not recognized", prefix.c_str()); + reporter->FatalError("type of packet source '%s' not recognized, or mode not supported", prefix.c_str()); // Instantiate packet source. @@ -279,7 +279,7 @@ PktDumper* Manager::OpenPktDumper(const string& path, bool append) for ( std::list::const_iterator i = all_components.begin(); i != all_components.end(); i++ ) { - if ( (*i)->Prefix() == prefix ) + if ( (*i)->HandlesPrefix(prefix) ) { component = (*i); break; diff --git a/src/iosource/pktsrc/CMakeLists.txt b/src/iosource/pktsrc/CMakeLists.txt index 07303b46a3..9c8a458c54 100644 --- a/src/iosource/pktsrc/CMakeLists.txt +++ b/src/iosource/pktsrc/CMakeLists.txt @@ -1,2 +1,3 @@ add_subdirectory(pcap) +add_subdirectory(netmap) diff --git a/src/iosource/pktsrc/Component.cc b/src/iosource/pktsrc/Component.cc index 7597e1aaa5..6caf743ff9 100644 --- a/src/iosource/pktsrc/Component.cc +++ b/src/iosource/pktsrc/Component.cc @@ -3,13 +3,14 @@ #include "Component.h" #include "../Desc.h" +#include "../Reporter.h" using namespace iosource::pktsrc; SourceComponent::SourceComponent(const std::string& arg_name, const std::string& arg_prefix, InputType arg_type, factory_callback arg_factory) : iosource::Component(plugin::component::PKTSRC, arg_name) { - prefix = arg_prefix; + tokenize_string(arg_prefix, ":", &prefixes); type = arg_type; factory = arg_factory; } @@ -18,9 +19,21 @@ SourceComponent::~SourceComponent() { } -const std::string& SourceComponent::Prefix() const +const std::vector& SourceComponent::Prefixes() const { - return prefix; + return prefixes; + } + +bool SourceComponent::HandlesPrefix(const string& prefix) const + { + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( *i == prefix ) + return true; + } + + return false; } bool SourceComponent::DoesLive() const @@ -43,16 +56,50 @@ void SourceComponent::Describe(ODesc* d) const { iosource::Component::Describe(d); - d->Add(" (interface prefix: "); - d->Add(prefix); + string prefs; + + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( prefs.size() ) + prefs += ", "; + + prefs += *i; + } + + d->Add(" (interface prefix"); + if ( prefixes.size() > 1 ) + d->Add("es"); + + d->Add(": "); + d->Add(prefs); + d->Add("; "); + + switch ( type ) { + case LIVE: + d->Add("live input"); + break; + + case TRACE: + d->Add("trace input"); + break; + + case BOTH: + d->Add("live and trace input"); + break; + + default: + reporter->InternalError("unknown PkrSrc type"); + } + d->Add(")"); } DumperComponent::DumperComponent(const std::string& name, const std::string& arg_prefix, factory_callback arg_factory) : plugin::Component(plugin::component::PKTDUMPER, name) { + tokenize_string(arg_prefix, ":", &prefixes); factory = arg_factory; - prefix = arg_prefix; } DumperComponent::~DumperComponent() @@ -64,17 +111,45 @@ DumperComponent::factory_callback DumperComponent::Factory() const return factory; } -const std::string& DumperComponent::Prefix() const +const std::vector& DumperComponent::Prefixes() const { - return prefix; + return prefixes; + } + +bool DumperComponent::HandlesPrefix(const string& prefix) const + { + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( *i == prefix ) + return true; + } + + return false; } void DumperComponent::Describe(ODesc* d) const { plugin::Component::Describe(d); - d->Add(" (dumper prefix: "); - d->Add(prefix); + string prefs; + + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( prefs.size() ) + prefs += ", "; + + prefs += *i; + } + + d->Add(" (dumper prefix"); + + if ( prefixes.size() > 1 ) + d->Add("es"); + + d->Add(": "); + d->Add(prefs); d->Add(")"); } diff --git a/src/iosource/pktsrc/Component.h b/src/iosource/pktsrc/Component.h index 31ed8a7180..0e4755d7b8 100644 --- a/src/iosource/pktsrc/Component.h +++ b/src/iosource/pktsrc/Component.h @@ -3,6 +3,8 @@ #ifndef IOSOURCE_PKTSRC_PLUGIN_COMPONENT_H #define IOSOURCE_PKTSRC_PLUGIN_COMPONENT_H +#include + #include "../Component.h" namespace iosource { @@ -24,7 +26,7 @@ public: /** * XXX */ - SourceComponent(const std::string& name, const std::string& prefix, InputType type, factory_callback factory); + SourceComponent(const std::string& name, const std::string& prefixes, InputType type, factory_callback factory); /** * Destructor. @@ -32,9 +34,14 @@ public: virtual ~SourceComponent(); /** - * Returns the prefix passes to the constructor. + * Returns the prefix(es) passed to the constructor. */ - const std::string& Prefix() const; + const std::vector& Prefixes() const; + + /** + * Returns true if the given prefix is among the one specified for the component. + */ + bool HandlesPrefix(const std::string& prefix) const; /** * Returns true if packet source instantiated by the component handle @@ -60,7 +67,7 @@ public: virtual void Describe(ODesc* d) const; private: - std::string prefix; + std::vector prefixes; InputType type; factory_callback factory; }; @@ -78,7 +85,7 @@ public: /** * XXX */ - DumperComponent(const std::string& name, const std::string& prefix, factory_callback factory); + DumperComponent(const std::string& name, const std::string& prefixes, factory_callback factory); /** * Destructor. @@ -86,9 +93,14 @@ public: ~DumperComponent(); /** - * Returns the prefix passes to the constructor. + * Returns the prefix(es) passed to the constructor. */ - const std::string& Prefix() const; + const std::vector& Prefixes() const; + + /** + * Returns true if the given prefix is among the one specified for the component. + */ + bool HandlesPrefix(const std::string& prefix) const; /** * Returns the source's factory function. @@ -102,7 +114,7 @@ public: virtual void Describe(ODesc* d) const; private: - std::string prefix; + std::vector prefixes; factory_callback factory; }; diff --git a/src/iosource/pktsrc/PktSrc.cc b/src/iosource/pktsrc/PktSrc.cc index 703a2d634b..c608f5267f 100644 --- a/src/iosource/pktsrc/PktSrc.cc +++ b/src/iosource/pktsrc/PktSrc.cc @@ -409,3 +409,12 @@ int PktSrc::ExtractNextPacketInternal() return 0; } +int PktSrc::PrecompileFilter(int index, const std::string& filter) + { + return 1; + } + +int PktSrc::SetFilter(int index) + { + return 1; + } diff --git a/src/iosource/pktsrc/PktSrc.h b/src/iosource/pktsrc/PktSrc.h index 3c3436bb19..edeecfe6cf 100644 --- a/src/iosource/pktsrc/PktSrc.h +++ b/src/iosource/pktsrc/PktSrc.h @@ -3,9 +3,11 @@ #ifndef IOSOURCE_PKTSRC_PKTSRC_H #define IOSOURCE_PKTSRC_PKTSRC_H -#include "../IOSource.h" +extern "C" { +#include +} -struct pcap_pkthdr; +#include "../IOSource.h" namespace iosource { @@ -48,12 +50,12 @@ public: // Precompiles a filter and associates the given index with it. // Returns true on success, 0 if a problem occurred or filtering is // not supported. - virtual int PrecompileFilter(int index, const std::string& filter) = 0; + virtual int PrecompileFilter(int index, const std::string& filter); // Activates the filter with the given index. Returns true on // success, 0 if a problem occurred or the filtering is not // supported. - virtual int SetFilter(int index) = 0; + virtual int SetFilter(int index); static int GetLinkHeaderSize(int link_type); diff --git a/src/iosource/pktsrc/pcap/Plugin.cc b/src/iosource/pktsrc/pcap/Plugin.cc index 884b5b2bfd..a412e4f650 100644 --- a/src/iosource/pktsrc/pcap/Plugin.cc +++ b/src/iosource/pktsrc/pcap/Plugin.cc @@ -12,8 +12,8 @@ class Plugin : public plugin::Plugin { public: plugin::Configuration Configure() { - AddComponent(new ::iosource::pktsrc::SourceComponent("PcapReader", "pcap", ::iosource::pktsrc::SourceComponent::BOTH, ::iosource::pktsrc::PcapSource::Instantiate)); - AddComponent(new ::iosource::pktsrc::DumperComponent("PcapWriter", "pcap", ::iosource::pktsrc::PcapDumper::Instantiate)); + AddComponent(new ::iosource::pktsrc::SourceComponent("PcapReader", "pcap", ::iosource::pktsrc::SourceComponent::BOTH, ::iosource::pktsrc::PcapSource::Instantiate)); + AddComponent(new ::iosource::pktsrc::DumperComponent("PcapWriter", "pcap", ::iosource::pktsrc::PcapDumper::Instantiate)); plugin::Configuration config; config.name = "Bro::Pcap"; diff --git a/src/iosource/pktsrc/pcap/Source.cc b/src/iosource/pktsrc/pcap/Source.cc index 86c0273adf..8165724871 100644 --- a/src/iosource/pktsrc/pcap/Source.cc +++ b/src/iosource/pktsrc/pcap/Source.cc @@ -1,4 +1,6 @@ +#include + #include "config.h" #include "Source.h" @@ -92,9 +94,7 @@ void PcapSource::OpenLive() if ( ! pd ) { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_open_live: %s", tmp_errbuf); - Error(errbuf); + Error(tmp_errbuf); return; } @@ -106,10 +106,7 @@ void PcapSource::OpenLive() #ifdef HAVE_LINUX if ( pcap_setnonblock(pd, 1, tmp_errbuf) < 0 ) { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_setnonblock: %s", tmp_errbuf); - Error(errbuf); - pcap_close(pd); + PcapError(); return; } #endif @@ -136,7 +133,6 @@ void PcapSource::OpenLive() void PcapSource::OpenOffline() { char errbuf[PCAP_ERRBUF_SIZE]; - char tmp_errbuf[PCAP_ERRBUF_SIZE]; #if 0 filter_type = ft; @@ -146,8 +142,6 @@ void PcapSource::OpenOffline() if ( ! pd ) { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_open_offline: %s", tmp_errbuf); Error(errbuf); return; } @@ -178,6 +172,9 @@ void PcapSource::OpenOffline() int PcapSource::ExtractNextPacket(Packet* pkt) { + if ( ! pd ) + return 0; + const u_char* data = pcap_next(pd, ¤t_hdr); if ( ! data ) @@ -214,6 +211,9 @@ void PcapSource::DoneWithPacket(Packet* pkt) int PcapSource::PrecompileFilter(int index, const std::string& filter) { + if ( ! pd ) + return 1; // Prevent error message. + char errbuf[PCAP_ERRBUF_SIZE]; // Compile filter. @@ -240,6 +240,9 @@ int PcapSource::PrecompileFilter(int index, const std::string& filter) int PcapSource::SetFilter(int index) { + if ( ! pd ) + return 1; // Prevent error message + char errbuf[PCAP_ERRBUF_SIZE]; #if 0 @@ -279,7 +282,7 @@ void PcapSource::Statistics(Stats* s) { char errbuf[PCAP_ERRBUF_SIZE]; - if ( ! props.is_live ) + if ( ! (props.is_live && pd) ) s->received = s->dropped = s->link = 0; else @@ -316,13 +319,19 @@ bool PcapSource::GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) void PcapSource::PcapError() { - assert(pd); - Error(fmt("pcap_error: %s", pcap_geterr(pd))); + if ( pd ) + Error(fmt("pcap_error: %s", pcap_geterr(pd))); + else + Error("pcap_error: not open"); + Close(); } void PcapSource::SetHdrSize() { + if ( ! pd ) + return; + char errbuf[PCAP_ERRBUF_SIZE]; props.link_type = pcap_datalink(pd); diff --git a/src/iosource/pktsrc/pcap/Source.h b/src/iosource/pktsrc/pcap/Source.h index 9f1d7c7eb8..03b75c1ca7 100644 --- a/src/iosource/pktsrc/pcap/Source.h +++ b/src/iosource/pktsrc/pcap/Source.h @@ -3,10 +3,6 @@ #ifndef IOSOURCE_PKTSRC_PCAP_SOURCE_H #define IOSOURCE_PKTSRC_PCAP_SOURCE_H -extern "C" { -#include -} - #include "../PktSrc.h" #include "BPF_Program.h" #include "Dict.h" From 462fd68931d523a4c712e4531e7386c6e3c2c96c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 27 Jan 2014 15:20:27 -0800 Subject: [PATCH 004/106] Prototype of a netmap packet source. TODO: Add userland BPF filtering so that our filters work. --- src/iosource/pktsrc/PktSrc.cc | 2 +- src/iosource/pktsrc/netmap/CMakeLists.txt | 12 ++ src/iosource/pktsrc/netmap/FindNetmap.cmake | 33 +++++ src/iosource/pktsrc/netmap/Plugin.cc | 26 ++++ src/iosource/pktsrc/netmap/Source.cc | 127 ++++++++++++++++++++ src/iosource/pktsrc/netmap/Source.h | 48 ++++++++ 6 files changed, 247 insertions(+), 1 deletion(-) create mode 100644 src/iosource/pktsrc/netmap/CMakeLists.txt create mode 100644 src/iosource/pktsrc/netmap/FindNetmap.cmake create mode 100644 src/iosource/pktsrc/netmap/Plugin.cc create mode 100644 src/iosource/pktsrc/netmap/Source.cc create mode 100644 src/iosource/pktsrc/netmap/Source.h diff --git a/src/iosource/pktsrc/PktSrc.cc b/src/iosource/pktsrc/PktSrc.cc index c608f5267f..66d18638b8 100644 --- a/src/iosource/pktsrc/PktSrc.cc +++ b/src/iosource/pktsrc/PktSrc.cc @@ -35,7 +35,7 @@ const std::string& PktSrc::Path() const const char* PktSrc::ErrorMsg() const { - return errbuf.c_str(); + return errbuf.size() ? errbuf.c_str() : 0; } int PktSrc::LinkType() const diff --git a/src/iosource/pktsrc/netmap/CMakeLists.txt b/src/iosource/pktsrc/netmap/CMakeLists.txt new file mode 100644 index 0000000000..a8a8a78a16 --- /dev/null +++ b/src/iosource/pktsrc/netmap/CMakeLists.txt @@ -0,0 +1,12 @@ + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_SOURCE_DIR}) + +find_package(Netmap) + +if ( NETMAP_FOUND ) + include(BroPlugin) + include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${NETMAP_INCLUDE_DIR}/sys) + bro_plugin_begin(Bro Netmap) + bro_plugin_cc(Source.cc Plugin.cc) + bro_plugin_end() +endif () diff --git a/src/iosource/pktsrc/netmap/FindNetmap.cmake b/src/iosource/pktsrc/netmap/FindNetmap.cmake new file mode 100644 index 0000000000..a04da2a6a0 --- /dev/null +++ b/src/iosource/pktsrc/netmap/FindNetmap.cmake @@ -0,0 +1,33 @@ +# - Try to find netmap includes. +# +# +# Variables used by this module, they can change the default behaviour and need +# to be set before calling find_package: +# +# NETMAP_ROOT_DIR Set this variable to the root installation of +# netmap if the module has problems finding the +# proper installation path. +# +# Variables defined by this module: +# +# NETMAP_FOUND System has netmap API files. +# NETMAP_INCLUDE_DIR The netmap include directory. + +find_path(NETMAP_ROOT_DIR + NAMES sys/net/netmap_user.h +) + +find_path(NETMAP_INCLUDE_DIR + NAMES sys/net/netmap_user.h + HINTS ${NETMAP_ROOT_DIR} +) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Netmap DEFAULT_MSG + NETMAP_INCLUDE_DIR +) + +mark_as_advanced( + NETMAP_ROOT_DIR + NETMAP_INCLUDE_DIR +) diff --git a/src/iosource/pktsrc/netmap/Plugin.cc b/src/iosource/pktsrc/netmap/Plugin.cc new file mode 100644 index 0000000000..05b4434e16 --- /dev/null +++ b/src/iosource/pktsrc/netmap/Plugin.cc @@ -0,0 +1,26 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Source.h" + +namespace plugin { +namespace Bro_Netmap { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::iosource::pktsrc::SourceComponent("NetmapReader", "netmap", ::iosource::pktsrc::SourceComponent::LIVE, ::iosource::pktsrc::NetmapSource::InstantiateNetmap)); + AddComponent(new ::iosource::pktsrc::SourceComponent("NetmapReader", "vale", ::iosource::pktsrc::SourceComponent::LIVE, ::iosource::pktsrc::NetmapSource::InstantiateVale)); + + plugin::Configuration config; + config.name = "Bro::Netmap"; + config.description = "Packet aquisition via netmap"; + return config; + } +} plugin; + +} +} + diff --git a/src/iosource/pktsrc/netmap/Source.cc b/src/iosource/pktsrc/netmap/Source.cc new file mode 100644 index 0000000000..b0569a5a0c --- /dev/null +++ b/src/iosource/pktsrc/netmap/Source.cc @@ -0,0 +1,127 @@ + +#include + +#include "config.h" + +#include "Source.h" + +using namespace iosource::pktsrc; + +NetmapSource::~NetmapSource() + { + Close(); + } + +NetmapSource::NetmapSource(const std::string& path, const std::string& filter, bool is_live, const std::string& arg_kind) + { + if ( ! is_live ) + Error("netmap source does not support offline input"); + + kind = arg_kind; + props.path = path; + props.filter = filter; + last_data = 0; + } + +void NetmapSource::Close() + { + if ( ! nd ) + return; + + nm_close(nd); + nd = 0; + last_data = 0; + + Closed(); + } + +void NetmapSource::Open() + { + std::string iface = kind + ":" + props.path; + nd = nm_open(iface.c_str(), getenv("NETMAP_RING_ID"), 0, 0); + + if ( ! nd ) + { + Error(errno ? strerror(errno) : "invalid interface"); + return; + } + + props.selectable_fd = NETMAP_FD(nd); + props.is_live = true; + props.link_type = DLT_EN10MB; + props.hdr_size = GetLinkHeaderSize(props.link_type); + assert(props.hdr_size >= 0); + + Info(fmt("netmap listening on %s\n", props.path.c_str())); + + Opened(props); + } + +int NetmapSource::ExtractNextPacket(Packet* pkt) + { + nm_hdr_t hdr; + const u_char* data = nm_nextpkt(nd, &hdr); + + if ( ! data ) + // Source has gone dry. + return 0; + + current_hdr.ts = hdr.ts; + current_hdr.caplen = hdr.caplen; + current_hdr.len = hdr.len; + + pkt->ts = current_hdr.ts.tv_sec + double(current_hdr.ts.tv_usec) / 1e6; + pkt->hdr = ¤t_hdr; + pkt->data = last_data = data; + + if ( current_hdr.len == 0 || current_hdr.caplen == 0 ) + { + Weird("empty_netmap_header", pkt); + return 0; + } + + last_hdr = current_hdr; + last_data = data; + ++stats.received; + return 1; + } + +void NetmapSource::DoneWithPacket(Packet* pkt) + { + // Nothing to do. + } + +void NetmapSource::Statistics(Stats* s) + { + if ( ! nd ) + { + s->received = s->link = s->dropped = 0; + return; + } + + s->received = stats.received; + + // TODO: Seems these counter's aren't actually set? + s->link = nd->st.ps_recv; + s->dropped = nd->st.ps_drop + nd->st.ps_ifdrop; + } + +bool NetmapSource::GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) + { + if ( ! last_data ) + return false; + + *hdr = &last_hdr; + *pkt = last_data; + return true; + } + +iosource::PktSrc* NetmapSource::InstantiateNetmap(const std::string& path, const std::string& filter, bool is_live) + { + return new NetmapSource(path, filter, is_live, "netmap"); + } + +iosource::PktSrc* NetmapSource::InstantiateVale(const std::string& path, const std::string& filter, bool is_live) + { + return new NetmapSource(path, filter, is_live, "value"); + } diff --git a/src/iosource/pktsrc/netmap/Source.h b/src/iosource/pktsrc/netmap/Source.h new file mode 100644 index 0000000000..ff17fe792c --- /dev/null +++ b/src/iosource/pktsrc/netmap/Source.h @@ -0,0 +1,48 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_NETMAP_SOURCE_H +#define IOSOURCE_PKTSRC_NETMAP_SOURCE_H + +extern "C" { +#define NETMAP_WITH_LIBS +#include +} + +#include "../PktSrc.h" + +namespace iosource { +namespace pktsrc { + +class NetmapSource : public iosource::PktSrc { +public: + // XXX + NetmapSource(const std::string& path, const std::string& filter, bool is_live, const std::string& kind); + virtual ~NetmapSource(); + + static PktSrc* InstantiateNetmap(const std::string& path, const std::string& filter, bool is_live); + static PktSrc* InstantiateVale(const std::string& path, const std::string& filter, bool is_live); + +protected: + // PktSrc interface. + virtual void Open(); + virtual void Close(); + virtual int ExtractNextPacket(Packet* pkt); + virtual void DoneWithPacket(Packet* pkt); + virtual void Statistics(Stats* stats); + virtual bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); + +private: + std::string kind; + Properties props; + Stats stats; + + nm_desc_t *nd; + pcap_pkthdr current_hdr; + pcap_pkthdr last_hdr; + const u_char* last_data; +}; + +} +} + +#endif From f4cbcb9b03b175741f6504d9cc97f35626336fdb Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sat, 12 Jul 2014 19:09:46 -0700 Subject: [PATCH 005/106] Converting log writers and input readers to plugins. --- CMakeLists.txt | 27 ---- cmake | 2 +- scripts/base/frameworks/input/main.bro | 11 ++ scripts/base/frameworks/logging/main.bro | 10 +- .../frameworks/logging/writers/dataseries.bro | 2 +- .../base/frameworks/logging/writers/none.bro | 2 +- scripts/base/init-bare.bro | 6 +- src/CMakeLists.txt | 22 +--- src/Func.cc | 8 +- src/Func.h | 2 +- src/NetVar.cc | 4 - src/NetVar.h | 2 - src/RemoteSerializer.cc | 9 +- src/Type.cc | 15 ++- src/Val.cc | 8 ++ src/analyzer/Component.cc | 2 +- src/analyzer/Manager.cc | 2 +- src/analyzer/Manager.h | 4 - src/file_analysis/Component.cc | 2 +- src/file_analysis/Manager.cc | 2 +- src/input/CMakeLists.txt | 22 ++++ src/input/Component.cc | 28 +++++ src/input/Component.h | 59 +++++++++ src/input/Manager.cc | 77 ++---------- src/input/Manager.h | 6 +- src/input/ReaderBackend.cc | 2 +- src/input/ReaderBackend.h | 2 + src/input/ReaderFrontend.cc | 2 +- src/input/Tag.cc | 22 ++++ src/input/Tag.h | 116 ++++++++++++++++++ src/{ => input}/input.bif | 34 +---- src/input/readers/CMakeLists.txt | 6 + src/input/readers/{ => ascii}/Ascii.cc | 10 +- src/input/readers/{ => ascii}/Ascii.h | 2 +- src/input/readers/ascii/CMakeLists.txt | 9 ++ src/input/readers/ascii/Plugin.cc | 24 ++++ src/input/readers/ascii/ascii.bif | 7 ++ .../readers/{ => benchmark}/Benchmark.cc | 11 +- src/input/readers/{ => benchmark}/Benchmark.h | 2 +- src/input/readers/benchmark/CMakeLists.txt | 9 ++ src/input/readers/benchmark/Plugin.cc | 24 ++++ src/input/readers/benchmark/benchmark.bif | 9 ++ src/input/readers/{ => binary}/Binary.cc | 4 +- src/input/readers/{ => binary}/Binary.h | 2 +- src/input/readers/binary/CMakeLists.txt | 9 ++ src/input/readers/binary/Plugin.cc | 24 ++++ src/input/readers/binary/binary.bif | 4 + src/input/readers/raw/CMakeLists.txt | 9 ++ src/input/readers/raw/Plugin.cc | 43 +++++++ src/input/readers/raw/Plugin.h | 30 +++++ src/input/readers/{ => raw}/Raw.cc | 21 ++-- src/input/readers/{ => raw}/Raw.h | 4 +- src/input/readers/raw/raw.bif | 4 + src/input/readers/sqlite/CMakeLists.txt | 9 ++ src/input/readers/sqlite/Plugin.cc | 24 ++++ src/input/readers/{ => sqlite}/SQLite.cc | 10 +- src/input/readers/{ => sqlite}/SQLite.h | 3 +- src/input/readers/sqlite/sqlite.bif | 6 + src/logging/CMakeLists.txt | 22 ++++ src/logging/Component.cc | 29 +++++ src/logging/Component.h | 59 +++++++++ src/logging/Manager.cc | 102 ++------------- src/logging/Manager.h | 12 +- src/logging/Tag.cc | 22 ++++ src/logging/Tag.h | 116 ++++++++++++++++++ src/logging/WriterBackend.h | 2 + src/logging/WriterFrontend.cc | 2 +- src/{ => logging}/logging.bif | 54 -------- src/logging/writers/CMakeLists.txt | 6 + src/logging/writers/{ => ascii}/Ascii.cc | 2 +- src/logging/writers/{ => ascii}/Ascii.h | 5 +- src/logging/writers/ascii/CMakeLists.txt | 9 ++ src/logging/writers/ascii/Plugin.cc | 25 ++++ src/logging/writers/ascii/ascii.bif | 14 +++ src/logging/writers/dataseries/CMakeLists.txt | 19 +++ .../writers/{ => dataseries}/DataSeries.cc | 5 +- .../writers/{ => dataseries}/DataSeries.h | 3 +- src/logging/writers/dataseries/Plugin.cc | 25 ++++ src/logging/writers/dataseries/dataseries.bif | 10 ++ .../writers/elasticsearch/CMakeLists.txt | 15 +++ .../{ => elasticsearch}/ElasticSearch.cc | 13 +- .../{ => elasticsearch}/ElasticSearch.h | 6 +- src/logging/writers/elasticsearch/Plugin.cc | 37 ++++++ .../writers/elasticsearch/elasticsearch.bif | 14 +++ src/logging/writers/none/CMakeLists.txt | 9 ++ src/logging/writers/{ => none}/None.cc | 2 +- src/logging/writers/{ => none}/None.h | 2 +- src/logging/writers/none/Plugin.cc | 25 ++++ src/logging/writers/none/none.bif | 6 + src/logging/writers/sqlite/CMakeLists.txt | 9 ++ src/logging/writers/sqlite/Plugin.cc | 25 ++++ src/logging/writers/{ => sqlite}/SQLite.cc | 4 +- src/logging/writers/{ => sqlite}/SQLite.h | 3 +- src/logging/writers/sqlite/sqlite.bif | 9 ++ src/main.cc | 37 +----- src/plugin/ComponentManager.h | 18 ++- src/types.bif | 38 ------ .../btest/core/leaks/dataseries-rotate.bro | 2 +- testing/btest/core/leaks/dataseries.bro | 2 +- .../frameworks/logging/dataseries/options.bro | 2 +- .../frameworks/logging/dataseries/rotate.bro | 2 +- .../logging/dataseries/test-logging.bro | 2 +- .../logging/dataseries/time-as-int.bro | 2 +- .../logging/dataseries/wikipedia.bro | 2 +- .../base/frameworks/logging/sqlite/error.bro | 2 +- .../base/frameworks/logging/sqlite/set.bro | 2 +- .../base/frameworks/logging/sqlite/types.bro | 2 +- .../frameworks/logging/sqlite/wikipedia.bro | 2 +- testing/scripts/has-writer | 4 +- 109 files changed, 1177 insertions(+), 495 deletions(-) create mode 100644 src/input/CMakeLists.txt create mode 100644 src/input/Component.cc create mode 100644 src/input/Component.h create mode 100644 src/input/Tag.cc create mode 100644 src/input/Tag.h rename src/{ => input}/input.bif (68%) create mode 100644 src/input/readers/CMakeLists.txt rename src/input/readers/{ => ascii}/Ascii.cc (99%) rename src/input/readers/{ => ascii}/Ascii.h (98%) create mode 100644 src/input/readers/ascii/CMakeLists.txt create mode 100644 src/input/readers/ascii/Plugin.cc create mode 100644 src/input/readers/ascii/ascii.bif rename src/input/readers/{ => benchmark}/Benchmark.cc (98%) rename src/input/readers/{ => benchmark}/Benchmark.h (97%) create mode 100644 src/input/readers/benchmark/CMakeLists.txt create mode 100644 src/input/readers/benchmark/Plugin.cc create mode 100644 src/input/readers/benchmark/benchmark.bif rename src/input/readers/{ => binary}/Binary.cc (98%) rename src/input/readers/{ => binary}/Binary.h (96%) create mode 100644 src/input/readers/binary/CMakeLists.txt create mode 100644 src/input/readers/binary/Plugin.cc create mode 100644 src/input/readers/binary/binary.bif create mode 100644 src/input/readers/raw/CMakeLists.txt create mode 100644 src/input/readers/raw/Plugin.cc create mode 100644 src/input/readers/raw/Plugin.h rename src/input/readers/{ => raw}/Raw.cc (98%) rename src/input/readers/{ => raw}/Raw.h (96%) create mode 100644 src/input/readers/raw/raw.bif create mode 100644 src/input/readers/sqlite/CMakeLists.txt create mode 100644 src/input/readers/sqlite/Plugin.cc rename src/input/readers/{ => sqlite}/SQLite.cc (97%) rename src/input/readers/{ => sqlite}/SQLite.h (97%) create mode 100644 src/input/readers/sqlite/sqlite.bif create mode 100644 src/logging/CMakeLists.txt create mode 100644 src/logging/Component.cc create mode 100644 src/logging/Component.h create mode 100644 src/logging/Tag.cc create mode 100644 src/logging/Tag.h rename src/{ => logging}/logging.bif (61%) create mode 100644 src/logging/writers/CMakeLists.txt rename src/logging/writers/{ => ascii}/Ascii.cc (99%) rename src/logging/writers/{ => ascii}/Ascii.h (98%) create mode 100644 src/logging/writers/ascii/CMakeLists.txt create mode 100644 src/logging/writers/ascii/Plugin.cc create mode 100644 src/logging/writers/ascii/ascii.bif create mode 100644 src/logging/writers/dataseries/CMakeLists.txt rename src/logging/writers/{ => dataseries}/DataSeries.cc (99%) rename src/logging/writers/{ => dataseries}/DataSeries.h (98%) create mode 100644 src/logging/writers/dataseries/Plugin.cc create mode 100644 src/logging/writers/dataseries/dataseries.bif create mode 100644 src/logging/writers/elasticsearch/CMakeLists.txt rename src/logging/writers/{ => elasticsearch}/ElasticSearch.cc (99%) rename src/logging/writers/{ => elasticsearch}/ElasticSearch.h (98%) create mode 100644 src/logging/writers/elasticsearch/Plugin.cc create mode 100644 src/logging/writers/elasticsearch/elasticsearch.bif create mode 100644 src/logging/writers/none/CMakeLists.txt rename src/logging/writers/{ => none}/None.cc (98%) rename src/logging/writers/{ => none}/None.h (96%) create mode 100644 src/logging/writers/none/Plugin.cc create mode 100644 src/logging/writers/none/none.bif create mode 100644 src/logging/writers/sqlite/CMakeLists.txt create mode 100644 src/logging/writers/sqlite/Plugin.cc rename src/logging/writers/{ => sqlite}/SQLite.cc (99%) rename src/logging/writers/{ => sqlite}/SQLite.h (97%) create mode 100644 src/logging/writers/sqlite/sqlite.bif diff --git a/CMakeLists.txt b/CMakeLists.txt index 77aac6c611..4bcfc7e336 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -127,33 +127,6 @@ if (GOOGLEPERFTOOLS_FOUND) endif () endif () -set(USE_DATASERIES false) -find_package(Lintel) -find_package(DataSeries) -find_package(LibXML2) - -if (NOT DISABLE_DATASERIES AND - LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) - set(USE_DATASERIES true) - include_directories(BEFORE ${Lintel_INCLUDE_DIR}) - include_directories(BEFORE ${DataSeries_INCLUDE_DIR}) - include_directories(BEFORE ${LibXML2_INCLUDE_DIR}) - list(APPEND OPTLIBS ${Lintel_LIBRARIES}) - list(APPEND OPTLIBS ${DataSeries_LIBRARIES}) - list(APPEND OPTLIBS ${LibXML2_LIBRARIES}) -endif() - -set(USE_ELASTICSEARCH false) -set(USE_CURL false) -find_package(LibCURL) - -if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND) - set(USE_ELASTICSEARCH true) - set(USE_CURL true) - include_directories(BEFORE ${LibCURL_INCLUDE_DIR}) - list(APPEND OPTLIBS ${LibCURL_LIBRARIES}) -endif() - if (ENABLE_PERFTOOLS_DEBUG OR ENABLE_PERFTOOLS) # Just a no op to prevent CMake from complaining about manually-specified # ENABLE_PERFTOOLS_DEBUG or ENABLE_PERFTOOLS not being used if google diff --git a/cmake b/cmake index d09122d327..e9cfa2bec0 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit d09122d3275baab8a3915fc7f87895ba0377241f +Subproject commit e9cfa2bec050d5b9dc9a67a72a9675867211f2f5 diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 7ca6e1c72b..fa766ba27b 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -4,6 +4,17 @@ module Input; export { + type Event: enum { + EVENT_NEW = 0, + EVENT_CHANGED = 1, + EVENT_REMOVED = 2, + }; + + type Mode: enum { + MANUAL = 0, + REREAD = 1, + STREAM = 2 + }; ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index c068866f63..d2013ba8dc 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -5,9 +5,15 @@ module Log; -# Log::ID and Log::Writer are defined in types.bif due to circular dependencies. - export { + ## Type that defines a ID unique for each log stream. Scripts creating new log + ## streams need to redef this enum to add their own specific log ID. The log ID + ## implicitly determines the default name of the generated log file. + type Log::ID: enum { + ## Dummy place-holder. + UNKNOWN + }; + ## If true, local logging is by default enabled for all filters. const enable_local_logging = T &redef; diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro index b24601d6b9..6fd65debdb 100644 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -57,4 +57,4 @@ function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool return Log::run_rotation_postprocessor_cmd(info, dst); } -redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; +# redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/logging/writers/none.bro b/scripts/base/frameworks/logging/writers/none.bro index 5763b796a9..3a2e3c0e81 100644 --- a/scripts/base/frameworks/logging/writers/none.bro +++ b/scripts/base/frameworks/logging/writers/none.bro @@ -13,5 +13,5 @@ function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool return T; } -redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func }; +# redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func }; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 740ec07fca..01e6e6c5df 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -3363,8 +3363,6 @@ const global_hash_seed: string = "" &redef; ## The maximum is currently 128 bits. const bits_per_uid: count = 96 &redef; -# Load BiFs defined by plugins. -@load base/bif/plugins # Load these frameworks here because they use fairly deep integration with # BiFs and script-land defined types. @@ -3374,3 +3372,7 @@ const bits_per_uid: count = 96 &redef; @load base/frameworks/files @load base/bif + +# Load BiFs defined by plugins. +@load base/bif/plugins + diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index cd65592c74..d2a272b467 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -118,8 +118,6 @@ include(BifCl) set(BIF_SRCS bro.bif - logging.bif - input.bif event.bif const.bif types.bif @@ -155,6 +153,8 @@ list(APPEND BINPAC_OUTPUTS "${BINPAC_OUTPUT_CC}") set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE) set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE) +add_subdirectory(input) +add_subdirectory(logging) add_subdirectory(analyzer) add_subdirectory(file_analysis) add_subdirectory(probabilistic) @@ -347,24 +347,6 @@ set(bro_SRCS threading/formatters/Ascii.cc threading/formatters/JSON.cc - logging/Manager.cc - logging/WriterBackend.cc - logging/WriterFrontend.cc - logging/writers/Ascii.cc - logging/writers/DataSeries.cc - logging/writers/SQLite.cc - logging/writers/ElasticSearch.cc - logging/writers/None.cc - - input/Manager.cc - input/ReaderBackend.cc - input/ReaderFrontend.cc - input/readers/Ascii.cc - input/readers/Raw.cc - input/readers/Benchmark.cc - input/readers/Binary.cc - input/readers/SQLite.cc - 3rdparty/sqlite3.c plugin/Component.cc diff --git a/src/Func.cc b/src/Func.cc index 2b1a44f33a..533a561f85 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -277,7 +277,7 @@ int BroFunc::IsPure() const return 1; } -Val* BroFunc::HandlePluginResult(Val* plugin_resul, tval_list* args) +Val* BroFunc::HandlePluginResult(Val* plugin_result, val_list* args) const { // Helper function factoring out this code from BroFunc:Call() for better // readability. @@ -600,14 +600,10 @@ void builtin_error(const char* msg, BroObj* arg) } #include "bro.bif.func_h" -#include "logging.bif.func_h" -#include "input.bif.func_h" #include "reporter.bif.func_h" #include "strings.bif.func_h" #include "bro.bif.func_def" -#include "logging.bif.func_def" -#include "input.bif.func_def" #include "reporter.bif.func_def" #include "strings.bif.func_def" @@ -623,8 +619,6 @@ void init_builtin_funcs() gap_info = internal_type("gap_info")->AsRecordType(); #include "bro.bif.func_init" -#include "logging.bif.func_init" -#include "input.bif.func_init" #include "reporter.bif.func_init" #include "strings.bif.func_init" diff --git a/src/Func.h b/src/Func.h index 280e31be61..4f46957029 100644 --- a/src/Func.h +++ b/src/Func.h @@ -100,7 +100,7 @@ public: protected: BroFunc() : Func(BRO_FUNC) {} Stmt* AddInits(Stmt* body, id_list* inits); - Val* HandlePluginResult(Val* plugin_result, val_list* args); + Val* HandlePluginResult(Val* plugin_result, val_list* args) const; DECLARE_SERIAL(BroFunc); diff --git a/src/NetVar.cc b/src/NetVar.cc index 0a11a754bb..7c66b55bc2 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -245,8 +245,6 @@ bro_uint_t bits_per_uid; #include "const.bif.netvar_def" #include "types.bif.netvar_def" #include "event.bif.netvar_def" -#include "logging.bif.netvar_def" -#include "input.bif.netvar_def" #include "reporter.bif.netvar_def" void init_event_handlers() @@ -311,8 +309,6 @@ void init_net_var() { #include "const.bif.netvar_init" #include "types.bif.netvar_init" -#include "logging.bif.netvar_init" -#include "input.bif.netvar_init" #include "reporter.bif.netvar_init" conn_id = internal_type("conn_id")->AsRecordType(); diff --git a/src/NetVar.h b/src/NetVar.h index c726c793b2..edd70d1ea6 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -255,8 +255,6 @@ extern void init_net_var(); #include "const.bif.netvar_h" #include "types.bif.netvar_h" #include "event.bif.netvar_h" -#include "logging.bif.netvar_h" -#include "input.bif.netvar_h" #include "reporter.bif.netvar_h" #endif diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 6cda46cd6c..3e46c5a1d2 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -192,6 +192,7 @@ #include "logging/Manager.h" #include "IPAddr.h" #include "bro_inet_ntop.h" +#include "logging/logging.bif.h" extern "C" { #include "setsignal.h" @@ -2723,8 +2724,8 @@ bool RemoteSerializer::ProcessLogCreateWriter() fmt.EndRead(); - id_val = new EnumVal(id, BifType::Enum::Log::ID); - writer_val = new EnumVal(writer, BifType::Enum::Log::Writer); + id_val = new EnumVal(id, internal_type("Log::ID")->AsEnumType()); + writer_val = new EnumVal(writer, internal_type("Log::Writer")->AsEnumType()); if ( ! log_mgr->CreateWriter(id_val, writer_val, info, num_fields, fields, true, false, true) ) @@ -2796,8 +2797,8 @@ bool RemoteSerializer::ProcessLogWrite() } } - id_val = new EnumVal(id, BifType::Enum::Log::ID); - writer_val = new EnumVal(writer, BifType::Enum::Log::Writer); + id_val = new EnumVal(id, internal_type("Log::ID")->AsEnumType()); + writer_val = new EnumVal(writer, internal_type("Log::Writer")->AsEnumType()); success = log_mgr->Write(id_val, writer_val, path, num_fields, vals); diff --git a/src/Type.cc b/src/Type.cc index 6a0aa35b1b..17a6efa203 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -1476,10 +1476,19 @@ void EnumType::CheckAndAddName(const string& module_name, const char* name, } else { + // We allow double-definitions if matching exactly. This is + // so that we can define an enum both in a *.bif and *.bro to + // avoid cyclic dependencies. + if ( id->Name() != make_full_var_name(module_name.c_str(), name) + || (id->HasVal() && val != id->ID_Val()->AsEnum()) ) + { + Unref(id); + reporter->Error("identifier or enumerator value in enumerated type definition already exists"); + SetError(); + return; + } + Unref(id); - reporter->Error("identifier or enumerator value in enumerated type definition already exists"); - SetError(); - return; } AddNameInternal(module_name, name, val, is_export); diff --git a/src/Val.cc b/src/Val.cc index 5f605a178e..ad7a920010 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -2920,7 +2920,15 @@ void EnumVal::ValDescribe(ODesc* d) const const char* ename = type->AsEnumType()->Lookup(val.int_val); if ( ! ename ) + { + EnumType::enum_name_list l = type->AsEnumType()->Names(); + + for ( EnumType::enum_name_list::const_iterator iter = l.begin(); + iter != l.end(); ++iter ) + fprintf(stderr, "%s -> %lld\n", iter->first.c_str(), iter->second); + ename = ""; + } d->Add(ename); } diff --git a/src/analyzer/Component.cc b/src/analyzer/Component.cc index 932e0c06a3..9f12759640 100644 --- a/src/analyzer/Component.cc +++ b/src/analyzer/Component.cc @@ -28,7 +28,7 @@ void Component::DoDescribe(ODesc* d) const if ( factory ) { d->Add("ANALYZER_"); - d->Add(canon_name); + d->Add(CanonicalName()); d->Add(", "); } diff --git a/src/analyzer/Manager.cc b/src/analyzer/Manager.cc index 6c495a6fd9..bc8fceaf39 100644 --- a/src/analyzer/Manager.cc +++ b/src/analyzer/Manager.cc @@ -60,7 +60,7 @@ bool Manager::ConnIndex::operator<(const ConnIndex& other) const } Manager::Manager() - : plugin::ComponentManager("Analyzer") + : plugin::ComponentManager("Analyzer", "Tag") { } diff --git a/src/analyzer/Manager.h b/src/analyzer/Manager.h index 151e8922ed..2388a36219 100644 --- a/src/analyzer/Manager.h +++ b/src/analyzer/Manager.h @@ -45,10 +45,6 @@ namespace analyzer { * sets up their initial analyzer tree, including adding the right \c PIA, * respecting well-known ports, and tracking any analyzers specifically * scheduled for individidual connections. - * - * Note that we keep the public interface of this class free of std::* - * classes. This allows to external analyzer code to potentially use a - * different C++ standard library. */ class Manager : public plugin::ComponentManager { public: diff --git a/src/file_analysis/Component.cc b/src/file_analysis/Component.cc index 52ac8fe894..7869116392 100644 --- a/src/file_analysis/Component.cc +++ b/src/file_analysis/Component.cc @@ -26,6 +26,6 @@ void Component::DoDescribe(ODesc* d) const if ( factory ) { d->Add("ANALYZER_"); - d->Add(canon_name); + d->Add(CanonicalName()); } } diff --git a/src/file_analysis/Manager.cc b/src/file_analysis/Manager.cc index 2a96315dbb..59c0fa0023 100644 --- a/src/file_analysis/Manager.cc +++ b/src/file_analysis/Manager.cc @@ -22,7 +22,7 @@ string Manager::salt; Manager::Manager() : plugin::ComponentManager("Files"), + file_analysis::Component>("Files", "Tag"), id_map(), ignored(), current_file_id(), magic_state() { } diff --git a/src/input/CMakeLists.txt b/src/input/CMakeLists.txt new file mode 100644 index 0000000000..6ef56cc65e --- /dev/null +++ b/src/input/CMakeLists.txt @@ -0,0 +1,22 @@ + +include(BroSubdir) + +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +add_subdirectory(readers) + +set(input_SRCS + Component.cc + Manager.cc + ReaderBackend.cc + ReaderFrontend.cc + Tag.cc +) + +bif_target(input.bif) + +bro_add_subdir_library(input ${input_SRCS} ${BIF_OUTPUT_CC}) + diff --git a/src/input/Component.cc b/src/input/Component.cc new file mode 100644 index 0000000000..fd70c76216 --- /dev/null +++ b/src/input/Component.cc @@ -0,0 +1,28 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Component.h" +#include "Manager.h" + +#include "../Desc.h" +#include "../util.h" + +using namespace input; + +Component::Component(const std::string& name, factory_callback arg_factory) + : plugin::Component(plugin::component::WRITER, name) + { + factory = arg_factory; + + input_mgr->RegisterComponent(this, "READER_"); + } + +Component::~Component() + { + } + +void Component::DoDescribe(ODesc* d) const + { + d->Add("Input::READER_"); + d->Add(CanonicalName()); + } + diff --git a/src/input/Component.h b/src/input/Component.h new file mode 100644 index 0000000000..0812aa63cf --- /dev/null +++ b/src/input/Component.h @@ -0,0 +1,59 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUT_COMPONENT_H +#define INPUT_COMPONENT_H + +#include "Tag.h" +#include "plugin/Component.h" +#include "plugin/TaggedComponent.h" + +namespace input { + +class ReaderFrontend; +class ReaderBackend; + +/** + * Component description for plugins providing log readers. + */ +class Component : public plugin::Component, + public plugin::TaggedComponent { +public: + typedef ReaderBackend* (*factory_callback)(ReaderFrontend* frontend); + + /** + * Constructor. + * + * @param name The name of the provided reader. This name is used + * across the system to identify the reader. + * + * @param factory A factory function to instantiate instances of the + * readers's class, which must be derived directly or indirectly from + * input::ReaderBackend. This is typically a static \c Instatiate() + * method inside the class that just allocates and returns a new + * instance. + */ + Component(const std::string& name, factory_callback factory); + + /** + * Destructor. + */ + ~Component(); + + /** + * Returns the reader's factory function. + */ + factory_callback Factory() const { return factory; } + +protected: + /** + * Overriden from plugin::Component. + */ + virtual void DoDescribe(ODesc* d) const; + +private: + factory_callback factory; +}; + +} + +#endif diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 95983faf26..044f9fcae3 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -5,11 +5,7 @@ #include "Manager.h" #include "ReaderFrontend.h" #include "ReaderBackend.h" -#include "readers/Ascii.h" -#include "readers/Raw.h" -#include "readers/Benchmark.h" -#include "readers/Binary.h" -#include "readers/SQLite.h" +#include "input.bif.h" #include "Event.h" #include "EventHandler.h" @@ -24,24 +20,6 @@ using namespace input; using threading::Value; using threading::Field; -struct ReaderDefinition { - bro_int_t type; // The reader type. - const char *name; // Descriptive name for error messages. - bool (*init)(); // Optional one-time initializing function. - ReaderBackend* (*factory)(ReaderFrontend* frontend); // Factory function for creating instances. -}; - -ReaderDefinition input_readers[] = { - { BifEnum::Input::READER_ASCII, "Ascii", 0, reader::Ascii::Instantiate }, - { BifEnum::Input::READER_RAW, "Raw", 0, reader::Raw::Instantiate }, - { BifEnum::Input::READER_BENCHMARK, "Benchmark", 0, reader::Benchmark::Instantiate }, - { BifEnum::Input::READER_BINARY, "Binary", 0, reader::Binary::Instantiate }, - { BifEnum::Input::READER_SQLITE, "SQLite", 0, reader::SQLite::Instantiate }, - - // End marker - { BifEnum::Input::READER_DEFAULT, "None", 0, (ReaderBackend* (*)(ReaderFrontend* frontend))0 } -}; - static void delete_value_ptr_array(Value** vals, int num_fields) { for ( int i = 0; i < num_fields; ++i ) @@ -215,6 +193,7 @@ Manager::AnalysisStream::~AnalysisStream() } Manager::Manager() + : plugin::ComponentManager("Input", "Reader") { end_of_data = internal_handler("Input::end_of_data"); } @@ -229,55 +208,17 @@ Manager::~Manager() } -ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) +ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, EnumVal* tag) { - ReaderDefinition* ir = input_readers; + Component* c = Lookup(tag); - while ( true ) + if ( ! c ) { - if ( ir->type == BifEnum::Input::READER_DEFAULT ) - { - reporter->Error("The reader that was requested was not found and could not be initialized."); - return 0; - } - - if ( ir->type != type ) - { - // no, didn't find the right one... - ++ir; - continue; - } - - - // call init function of writer if presnt - if ( ir->init ) - { - if ( (*ir->init)() ) - { - //clear it to be not called again - ir->init = 0; - } - - else { - // ohok. init failed, kill factory for all eternity - ir->factory = 0; - DBG_LOG(DBG_LOGGING, "Failed to init input class %s", ir->name); - return 0; - } - - } - - if ( ! ir->factory ) - // no factory? - return 0; - - // all done. break. - break; + reporter->Error("The reader that was requested was not found and could not be initialized."); + return 0; } - assert(ir->factory); - - ReaderBackend* backend = (*ir->factory)(frontend); + ReaderBackend* backend = (*c->Factory())(frontend); assert(backend); return backend; @@ -286,8 +227,6 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) // Create a new input reader object to be used at whomevers leisure lateron. bool Manager::CreateStream(Stream* info, RecordVal* description) { - ReaderDefinition* ir = input_readers; - RecordType* rtype = description->Type()->AsRecordType(); if ( ! ( same_type(rtype, BifType::Record::Input::TableDescription, 0) || same_type(rtype, BifType::Record::Input::EventDescription, 0) diff --git a/src/input/Manager.h b/src/input/Manager.h index 8156ed5248..cfac803129 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -10,6 +10,8 @@ #include "RemoteSerializer.h" #include "Val.h" +#include "Component.h" + #include namespace input { @@ -20,7 +22,7 @@ class ReaderBackend; /** * Singleton class for managing input streams. */ -class Manager { +class Manager : public plugin::ComponentManager { public: /** * Constructor. @@ -131,7 +133,7 @@ protected: // Instantiates a new ReaderBackend of the given type (note that // doing so creates a new thread!). - ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); + ReaderBackend* CreateBackend(ReaderFrontend* frontend, EnumVal* tag); // Function called from the ReaderBackend to notify the manager that // a stream has been removed or a stream has been closed. Used to diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 4c7540609c..72043c5932 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -156,7 +156,7 @@ public: } }; -using namespace logging; +using namespace input; ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 84984a3ce5..e87789abbd 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -8,6 +8,8 @@ #include "threading/SerialTypes.h" #include "threading/MsgThread.h" +#include "Component.h" + namespace input { /** diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index d28f410de0..3852a1002a 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -44,7 +44,7 @@ ReaderFrontend::ReaderFrontend(const ReaderBackend::ReaderInfo& arg_info, EnumVa const char* t = type->Type()->AsEnumType()->Lookup(type->InternalInt()); name = copy_string(fmt("%s/%s", arg_info.source, t)); - backend = input_mgr->CreateBackend(this, type->InternalInt()); + backend = input_mgr->CreateBackend(this, type); assert(backend); backend->Start(); } diff --git a/src/input/Tag.cc b/src/input/Tag.cc new file mode 100644 index 0000000000..4f227f7799 --- /dev/null +++ b/src/input/Tag.cc @@ -0,0 +1,22 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Tag.h" +#include "Manager.h" + +input::Tag input::Tag::Error; + +input::Tag::Tag(type_t type, subtype_t subtype) + : ::Tag(input_mgr->GetTagEnumType(), type, subtype) + { + } + +input::Tag& input::Tag::operator=(const input::Tag& other) + { + ::Tag::operator=(other); + return *this; + } + +EnumVal* input::Tag::AsEnumVal() const + { + return ::Tag::AsEnumVal(input_mgr->GetTagEnumType()); + } diff --git a/src/input/Tag.h b/src/input/Tag.h new file mode 100644 index 0000000000..1977c49aeb --- /dev/null +++ b/src/input/Tag.h @@ -0,0 +1,116 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUT_TAG_H +#define INPUT_TAG_H + +#include "config.h" +#include "util.h" +#include "../Tag.h" +#include "plugin/TaggedComponent.h" +#include "plugin/ComponentManager.h" + +class EnumVal; + +namespace input { + +class Manager; +class Component; + +/** + * Class to identify a reader type. + * + * The script-layer analogue is Log::Writer. + */ +class Tag : public ::Tag { +public: + /* + * Copy constructor. + */ + Tag(const Tag& other) : ::Tag(other) {} + + /** + * Default constructor. This initializes the tag with an error value + * that will make \c operator \c bool return false. + */ + Tag() : ::Tag() {} + + /** + * Destructor. + */ + ~Tag() {} + + /** + * Returns false if the tag represents an error value rather than a + * legal reader type. + * TODO: make this conversion operator "explicit" (C++11) or use a + * "safe bool" idiom (not necessary if "explicit" is available), + * otherwise this may allow nonsense/undesired comparison operations. + */ + operator bool() const { return *this != Tag(); } + + /** + * Assignment operator. + */ + Tag& operator=(const Tag& other); + + /** + * Compares two tags for equality. + */ + bool operator==(const Tag& other) const + { + return ::Tag::operator==(other); + } + + /** + * Compares two tags for inequality. + */ + bool operator!=(const Tag& other) const + { + return ::Tag::operator!=(other); + } + + /** + * Compares two tags for less-than relationship. + */ + bool operator<(const Tag& other) const + { + return ::Tag::operator<(other); + } + + /** + * Returns the \c Log::Writer enum that corresponds to this tag. + * The returned value does not have its ref-count increased. + * + * @param etype the script-layer enum type associated with the tag. + */ + EnumVal* AsEnumVal() const; + + static Tag Error; + +protected: + friend class plugin::ComponentManager; + friend class plugin::TaggedComponent; + + /** + * Constructor. + * + * @param type The main type. Note that the \a input::Manager + * manages the value space internally, so noone else should assign + * any main types. + * + * @param subtype The sub type, which is left to an reader for + * interpretation. By default it's set to zero. + */ + Tag(type_t type, subtype_t subtype = 0); + + /** + * Constructor. + * + * @param val An enum value of script type \c Log::Writer. + */ + Tag(EnumVal* val) : ::Tag(val) {} +}; + +} + +#endif diff --git a/src/input.bif b/src/input/input.bif similarity index 68% rename from src/input.bif rename to src/input/input.bif index f7c4d37a67..b28ccc00d8 100644 --- a/src/input.bif +++ b/src/input/input.bif @@ -4,9 +4,14 @@ module Input; %%{ #include "input/Manager.h" -#include "NetVar.h" %%} +enum Event %{ + EVENT_NEW = 0, + EVENT_CHANGED = 1, + EVENT_REMOVED = 2, +%} + type TableDescription: record; type EventDescription: record; type AnalysisDescription: record; @@ -45,30 +50,3 @@ function Input::__force_update%(id: string%) : bool const accept_unsupported_types: bool; -# Options for Ascii Reader - -module InputAscii; - -const separator: string; -const set_separator: string; -const empty_field: string; -const unset_field: string; - -module InputRaw; -const record_separator: string; - -module InputBenchmark; -const factor: double; -const spread: count; -const autospread: double; -const addfactor: count; -const stopspreadat: count; -const timedspread: double; - -module InputBinary; -const chunk_size: count; - -module InputSQLite; -const set_separator: string; -const unset_field: string; -const empty_field: string; diff --git a/src/input/readers/CMakeLists.txt b/src/input/readers/CMakeLists.txt new file mode 100644 index 0000000000..36b7439052 --- /dev/null +++ b/src/input/readers/CMakeLists.txt @@ -0,0 +1,6 @@ + +add_subdirectory(ascii) +add_subdirectory(benchmark) +add_subdirectory(binary) +add_subdirectory(raw) +add_subdirectory(sqlite) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/ascii/Ascii.cc similarity index 99% rename from src/input/readers/Ascii.cc rename to src/input/readers/ascii/Ascii.cc index a79121e80a..1bbcaea1d9 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/ascii/Ascii.cc @@ -1,18 +1,18 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "Ascii.h" -#include "NetVar.h" - #include #include -#include "../../threading/SerialTypes.h" - #include #include #include #include +#include "Ascii.h" +#include "ascii.bif.h" + +#include "threading/SerialTypes.h" + using namespace input::reader; using namespace threading; using threading::Value; diff --git a/src/input/readers/Ascii.h b/src/input/readers/ascii/Ascii.h similarity index 98% rename from src/input/readers/Ascii.h rename to src/input/readers/ascii/Ascii.h index 5d6bc71d54..fe9bb95845 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/ascii/Ascii.h @@ -6,7 +6,7 @@ #include #include -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" #include "threading/formatters/Ascii.h" namespace input { namespace reader { diff --git a/src/input/readers/ascii/CMakeLists.txt b/src/input/readers/ascii/CMakeLists.txt new file mode 100644 index 0000000000..267bb9a7ab --- /dev/null +++ b/src/input/readers/ascii/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro AsciiReader) +bro_plugin_cc(Ascii.cc Plugin.cc) +bro_plugin_bif(ascii.bif) +bro_plugin_end() diff --git a/src/input/readers/ascii/Plugin.cc b/src/input/readers/ascii/Plugin.cc new file mode 100644 index 0000000000..b389cb8602 --- /dev/null +++ b/src/input/readers/ascii/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Ascii.h" + +namespace plugin { +namespace Bro_AsciiReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("Ascii", ::input::reader::Ascii::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::AsciiReader"; + config.description = "ASCII input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/ascii/ascii.bif b/src/input/readers/ascii/ascii.bif new file mode 100644 index 0000000000..8bb3a96492 --- /dev/null +++ b/src/input/readers/ascii/ascii.bif @@ -0,0 +1,7 @@ + +module InputAscii; + +const separator: string; +const set_separator: string; +const empty_field: string; +const unset_field: string; diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/benchmark/Benchmark.cc similarity index 98% rename from src/input/readers/Benchmark.cc rename to src/input/readers/benchmark/Benchmark.cc index de7eae8cc8..9d962c8c64 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/benchmark/Benchmark.cc @@ -1,16 +1,15 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "Benchmark.h" -#include "NetVar.h" - -#include "../../threading/SerialTypes.h" - #include #include #include #include -#include "../../threading/Manager.h" +#include "Benchmark.h" +#include "benchmark.bif.h" + +#include "threading/SerialTypes.h" +#include "threading/Manager.h" using namespace input::reader; using threading::Value; diff --git a/src/input/readers/Benchmark.h b/src/input/readers/benchmark/Benchmark.h similarity index 97% rename from src/input/readers/Benchmark.h rename to src/input/readers/benchmark/Benchmark.h index 3296f3a85e..42501c1c29 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/benchmark/Benchmark.h @@ -3,7 +3,7 @@ #ifndef INPUT_READERS_BENCHMARK_H #define INPUT_READERS_BENCHMARK_H -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" #include "threading/formatters/Ascii.h" namespace input { namespace reader { diff --git a/src/input/readers/benchmark/CMakeLists.txt b/src/input/readers/benchmark/CMakeLists.txt new file mode 100644 index 0000000000..3b3a34ae47 --- /dev/null +++ b/src/input/readers/benchmark/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro BenchmarkReader) +bro_plugin_cc(Benchmark.cc Plugin.cc) +bro_plugin_bif(benchmark.bif) +bro_plugin_end() diff --git a/src/input/readers/benchmark/Plugin.cc b/src/input/readers/benchmark/Plugin.cc new file mode 100644 index 0000000000..d5e0975a80 --- /dev/null +++ b/src/input/readers/benchmark/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Benchmark.h" + +namespace plugin { +namespace Bro_BenchmarkReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("Benchmark", ::input::reader::Benchmark::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::BenchmarkReader"; + config.description = "Benchmark input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/benchmark/benchmark.bif b/src/input/readers/benchmark/benchmark.bif new file mode 100644 index 0000000000..d505f0efaf --- /dev/null +++ b/src/input/readers/benchmark/benchmark.bif @@ -0,0 +1,9 @@ + +module InputBenchmark; + +const factor: double; +const spread: count; +const autospread: double; +const addfactor: count; +const stopspreadat: count; +const timedspread: double; diff --git a/src/input/readers/Binary.cc b/src/input/readers/binary/Binary.cc similarity index 98% rename from src/input/readers/Binary.cc rename to src/input/readers/binary/Binary.cc index 96a9028f7b..560a80f9a0 100644 --- a/src/input/readers/Binary.cc +++ b/src/input/readers/binary/Binary.cc @@ -3,9 +3,9 @@ #include #include "Binary.h" -#include "NetVar.h" +#include "binary.bif.h" -#include "../../threading/SerialTypes.h" +#include "threading/SerialTypes.h" using namespace input::reader; using threading::Value; diff --git a/src/input/readers/Binary.h b/src/input/readers/binary/Binary.h similarity index 96% rename from src/input/readers/Binary.h rename to src/input/readers/binary/Binary.h index a2283d1980..587d56cfa7 100644 --- a/src/input/readers/Binary.h +++ b/src/input/readers/binary/Binary.h @@ -5,7 +5,7 @@ #include -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" namespace input { namespace reader { diff --git a/src/input/readers/binary/CMakeLists.txt b/src/input/readers/binary/CMakeLists.txt new file mode 100644 index 0000000000..800c3b7567 --- /dev/null +++ b/src/input/readers/binary/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro BinaryReader) +bro_plugin_cc(Binary.cc Plugin.cc) +bro_plugin_bif(binary.bif) +bro_plugin_end() diff --git a/src/input/readers/binary/Plugin.cc b/src/input/readers/binary/Plugin.cc new file mode 100644 index 0000000000..7c5dc16b8b --- /dev/null +++ b/src/input/readers/binary/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Binary.h" + +namespace plugin { +namespace Bro_BinaryReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("Binary", ::input::reader::Binary::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::BinaryReader"; + config.description = "Binary input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/binary/binary.bif b/src/input/readers/binary/binary.bif new file mode 100644 index 0000000000..54e32ff453 --- /dev/null +++ b/src/input/readers/binary/binary.bif @@ -0,0 +1,4 @@ + +module InputBinary; + +const chunk_size: count; diff --git a/src/input/readers/raw/CMakeLists.txt b/src/input/readers/raw/CMakeLists.txt new file mode 100644 index 0000000000..5540d70202 --- /dev/null +++ b/src/input/readers/raw/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro RawReader) +bro_plugin_cc(Raw.cc Plugin.cc) +bro_plugin_bif(raw.bif) +bro_plugin_end() diff --git a/src/input/readers/raw/Plugin.cc b/src/input/readers/raw/Plugin.cc new file mode 100644 index 0000000000..c7af84e34e --- /dev/null +++ b/src/input/readers/raw/Plugin.cc @@ -0,0 +1,43 @@ +// See the file in the main distribution directory for copyright. + +#include "Plugin.h" + +namespace plugin { namespace Bro_RawReader { Plugin plugin; } } + +using namespace plugin::Bro_RawReader; + +Plugin::Plugin() + { + init = false; + } + +plugin::Configuration Plugin::Configure() + { + AddComponent(new ::input::Component("Raw", ::input::reader::Raw::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::RawReader"; + config.description = "Raw input reader"; + return config; + } + +void Plugin::InitPreScript() + { + if ( pthread_mutex_init(&fork_mutex, 0) != 0 ) + reporter->FatalError("cannot initialize raw reader's mutex"); + + init = true; + } + +void Plugin::Done() + { + pthread_mutex_destroy(&fork_mutex); + init = false; + } + +pthread_mutex_t* Plugin::ForkMutex() + { + assert(init); + return &fork_mutex; + } + diff --git a/src/input/readers/raw/Plugin.h b/src/input/readers/raw/Plugin.h new file mode 100644 index 0000000000..59a5dfd2be --- /dev/null +++ b/src/input/readers/raw/Plugin.h @@ -0,0 +1,30 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Raw.h" + +namespace plugin { +namespace Bro_RawReader { + +class Plugin : public plugin::Plugin { +public: + Plugin(); + + plugin::Configuration Configure(); + + virtual void InitPreScript(); + virtual void Done(); + + pthread_mutex_t * ForkMutex(); + +private: + bool init; + pthread_mutex_t fork_mutex; + +}; + +extern Plugin plugin; + +} +} diff --git a/src/input/readers/Raw.cc b/src/input/readers/raw/Raw.cc similarity index 98% rename from src/input/readers/Raw.cc rename to src/input/readers/raw/Raw.cc index 11976e2a11..259792cb3f 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/raw/Raw.cc @@ -1,10 +1,5 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "Raw.h" -#include "NetVar.h" - -#include "../../threading/SerialTypes.h" - #include #include #include @@ -14,6 +9,12 @@ #include #include +#include "Raw.h" +#include "Plugin.h" +#include "raw.bif.h" + +#include "threading/SerialTypes.h" + extern "C" { #include "setsignal.h" } @@ -23,12 +24,6 @@ using threading::Value; using threading::Field; const int Raw::block_size = 4096; // how big do we expect our chunks of data to be. -pthread_mutex_t Raw::fork_mutex; - -bool Raw::ClassInit() - { - return pthread_mutex_init(&fork_mutex, 0) == 0; - } Raw::Raw(ReaderFrontend *frontend) : ReaderBackend(frontend) { @@ -109,7 +104,7 @@ bool Raw::SetFDFlags(int fd, int cmd, int flags) bool Raw::LockForkMutex() { - int res = pthread_mutex_lock(&fork_mutex); + int res = pthread_mutex_lock(plugin::Bro_RawReader::plugin.ForkMutex()); if ( res == 0 ) return true; @@ -119,7 +114,7 @@ bool Raw::LockForkMutex() bool Raw::UnlockForkMutex() { - int res = pthread_mutex_unlock(&fork_mutex); + int res = pthread_mutex_unlock(plugin::Bro_RawReader::plugin.ForkMutex()); if ( res == 0 ) return true; diff --git a/src/input/readers/Raw.h b/src/input/readers/raw/Raw.h similarity index 96% rename from src/input/readers/Raw.h rename to src/input/readers/raw/Raw.h index c549125174..06568a6296 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/raw/Raw.h @@ -6,7 +6,7 @@ #include #include -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" namespace input { namespace reader { @@ -21,8 +21,6 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } - static bool ClassInit(); - protected: virtual bool DoInit(const ReaderInfo& info, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); diff --git a/src/input/readers/raw/raw.bif b/src/input/readers/raw/raw.bif new file mode 100644 index 0000000000..becaf47f79 --- /dev/null +++ b/src/input/readers/raw/raw.bif @@ -0,0 +1,4 @@ + +module InputRaw; + +const record_separator: string; diff --git a/src/input/readers/sqlite/CMakeLists.txt b/src/input/readers/sqlite/CMakeLists.txt new file mode 100644 index 0000000000..3c513127dc --- /dev/null +++ b/src/input/readers/sqlite/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro SQLiteReader) +bro_plugin_cc(SQLite.cc Plugin.cc) +bro_plugin_bif(sqlite.bif) +bro_plugin_end() diff --git a/src/input/readers/sqlite/Plugin.cc b/src/input/readers/sqlite/Plugin.cc new file mode 100644 index 0000000000..db75d6dc22 --- /dev/null +++ b/src/input/readers/sqlite/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "SQLite.h" + +namespace plugin { +namespace Bro_SQLiteReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("SQLite", ::input::reader::SQLite::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::SQLiteReader"; + config.description = "SQLite input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/SQLite.cc b/src/input/readers/sqlite/SQLite.cc similarity index 97% rename from src/input/readers/SQLite.cc rename to src/input/readers/sqlite/SQLite.cc index d032f934a7..3790e5919d 100644 --- a/src/input/readers/SQLite.cc +++ b/src/input/readers/sqlite/SQLite.cc @@ -2,16 +2,18 @@ #include "config.h" -#include "SQLite.h" -#include "NetVar.h" - #include #include #include #include #include -#include "../../threading/SerialTypes.h" +#include "SQLite.h" +#include "sqlite.bif.h" +#include "logging/writers/sqlite/sqlite.bif.h" +#include "logging/writers/ascii/ascii.bif.h" + +#include "threading/SerialTypes.h" using namespace input::reader; using threading::Value; diff --git a/src/input/readers/SQLite.h b/src/input/readers/sqlite/SQLite.h similarity index 97% rename from src/input/readers/SQLite.h rename to src/input/readers/sqlite/SQLite.h index a98b3e06b8..f4cae7d01f 100644 --- a/src/input/readers/SQLite.h +++ b/src/input/readers/sqlite/SQLite.h @@ -8,8 +8,7 @@ #include #include -#include "../ReaderBackend.h" - +#include "input/ReaderBackend.h" #include "threading/formatters/Ascii.h" #include "3rdparty/sqlite3.h" diff --git a/src/input/readers/sqlite/sqlite.bif b/src/input/readers/sqlite/sqlite.bif new file mode 100644 index 0000000000..60ea4e3051 --- /dev/null +++ b/src/input/readers/sqlite/sqlite.bif @@ -0,0 +1,6 @@ + +module InputSQLite; + +const set_separator: string; +const unset_field: string; +const empty_field: string; diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt new file mode 100644 index 0000000000..24dbc9860b --- /dev/null +++ b/src/logging/CMakeLists.txt @@ -0,0 +1,22 @@ + +include(BroSubdir) + +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +add_subdirectory(writers) + +set(logging_SRCS + Component.cc + Manager.cc + WriterBackend.cc + WriterFrontend.cc + Tag.cc +) + +bif_target(logging.bif) + +bro_add_subdir_library(logging ${logging_SRCS} ${BIF_OUTPUT_CC}) + diff --git a/src/logging/Component.cc b/src/logging/Component.cc new file mode 100644 index 0000000000..90bc9be819 --- /dev/null +++ b/src/logging/Component.cc @@ -0,0 +1,29 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Component.h" +#include "Manager.h" + +#include "../Desc.h" +#include "../util.h" + +using namespace logging; + +Component::Component(const std::string& name, factory_callback arg_factory) + : plugin::Component(plugin::component::WRITER, name) + { + factory = arg_factory; + + log_mgr->RegisterComponent(this, "WRITER_"); + } + +Component::~Component() + { + } + +void Component::DoDescribe(ODesc* d) const + { + d->Add("Log::WRITER_"); + d->Add(CanonicalName()); + } + + diff --git a/src/logging/Component.h b/src/logging/Component.h new file mode 100644 index 0000000000..21e114b36c --- /dev/null +++ b/src/logging/Component.h @@ -0,0 +1,59 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef LOGGING_COMPONENT_H +#define LOGGING_COMPONENT_H + +#include "Tag.h" +#include "plugin/Component.h" +#include "plugin/TaggedComponent.h" + +namespace logging { + +class WriterFrontend; +class WriterBackend; + +/** + * Component description for plugins providing log writers. + */ +class Component : public plugin::Component, + public plugin::TaggedComponent { +public: + typedef WriterBackend* (*factory_callback)(WriterFrontend* frontend); + + /** + * Constructor. + * + * @param name The name of the provided writer. This name is used + * across the system to identify the writer. + * + * @param factory A factory function to instantiate instances of the + * writers's class, which must be derived directly or indirectly from + * logging::WriterBackend. This is typically a static \c Instatiate() + * method inside the class that just allocates and returns a new + * instance. + */ + Component(const std::string& name, factory_callback factory); + + /** + * Destructor. + */ + ~Component(); + + /** + * Returns the writer's factory function. + */ + factory_callback Factory() const { return factory; } + +protected: + /** + * Overriden from plugin::Component. + */ + virtual void DoDescribe(ODesc* d) const; + +private: + factory_callback factory; +}; + +} + +#endif diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 55e0fddb5a..1fe5db3b26 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -14,48 +14,10 @@ #include "Manager.h" #include "WriterFrontend.h" #include "WriterBackend.h" - -#include "writers/Ascii.h" -#include "writers/None.h" - -#ifdef USE_ELASTICSEARCH -#include "writers/ElasticSearch.h" -#endif - -#ifdef USE_DATASERIES -#include "writers/DataSeries.h" -#endif - -#include "writers/SQLite.h" +#include "logging.bif.h" using namespace logging; -// Structure describing a log writer type. -struct WriterDefinition { - bro_int_t type; // The type. - const char *name; // Descriptive name for error messages. - bool (*init)(); // An optional one-time initialization function. - WriterBackend* (*factory)(WriterFrontend* frontend); // A factory function creating instances. -}; - -// Static table defining all availabel log writers. -WriterDefinition log_writers[] = { - { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, - { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, - { BifEnum::Log::WRITER_SQLITE, "SQLite", 0, writer::SQLite::Instantiate }, - -#ifdef USE_ELASTICSEARCH - { BifEnum::Log::WRITER_ELASTICSEARCH, "ElasticSearch", 0, writer::ElasticSearch::Instantiate }, -#endif - -#ifdef USE_DATASERIES - { BifEnum::Log::WRITER_DATASERIES, "DataSeries", 0, writer::DataSeries::Instantiate }, -#endif - - // End marker, don't touch. - { BifEnum::Log::WRITER_DEFAULT, "None", 0, (WriterBackend* (*)(WriterFrontend* frontend))0 } -}; - struct Manager::Filter { string name; EnumVal* id; @@ -142,6 +104,7 @@ Manager::Stream::~Stream() } Manager::Manager() + : plugin::ComponentManager("Log", "Writer") { rotations_pending = 0; } @@ -152,64 +115,17 @@ Manager::~Manager() delete *s; } -list Manager::SupportedFormats() +WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, EnumVal* tag) { - list formats; + Component* c = Lookup(tag); - for ( WriterDefinition* ld = log_writers; ld->type != BifEnum::Log::WRITER_DEFAULT; ++ld ) - formats.push_back(ld->name); - - return formats; - } - -WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) - { - WriterDefinition* ld = log_writers; - - while ( true ) + if ( ! c ) { - if ( ld->type == BifEnum::Log::WRITER_DEFAULT ) - { - reporter->Error("unknown writer type requested"); - return 0; - } - - if ( ld->type != type ) - { - // Not the right one. - ++ld; - continue; - } - - // If the writer has an init function, call it. - if ( ld->init ) - { - if ( (*ld->init)() ) - // Clear the init function so that we won't - // call it again later. - ld->init = 0; - else - { - // Init failed, disable by deleting factory - // function. - ld->factory = 0; - - reporter->Error("initialization of writer %s failed", ld->name); - return 0; - } - } - - if ( ! ld->factory ) - // Oops, we can't instantiate this guy. - return 0; - - // All done. - break; + reporter->Error("unknown writer type requested"); + return 0; } - assert(ld->factory); - - WriterBackend* backend = (*ld->factory)(frontend); + WriterBackend* backend = (*c->Factory())(frontend); assert(backend); return backend; @@ -1234,7 +1150,7 @@ void Manager::SendAllWritersTo(RemoteSerializer::PeerID peer) { WriterFrontend* writer = i->second->writer; - EnumVal writer_val(i->first.first, BifType::Enum::Log::Writer); + EnumVal writer_val(i->first.first, internal_type("Log::Writer")->AsEnumType()); remote_serializer->SendLogCreateWriter(peer, (*s)->id, &writer_val, *i->second->info, diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 61f6dcd8a7..b8264927a3 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -6,9 +6,12 @@ #define LOGGING_MANAGER_H #include "../Val.h" +#include "../Tag.h" #include "../EventHandler.h" #include "../RemoteSerializer.h" +#include "../plugin/ComponentManager.h" +#include "Component.h" #include "WriterBackend.h" class SerializationFormat; @@ -23,7 +26,7 @@ class RotationFinishedMessage; /** * Singleton class for managing log streams. */ -class Manager { +class Manager : public plugin::ComponentManager { public: /** * Constructor. @@ -154,11 +157,6 @@ public: */ void Terminate(); - /** - * Returns a list of supported output formats. - */ - static list SupportedFormats(); - protected: friend class WriterFrontend; friend class RotationFinishedMessage; @@ -168,7 +166,7 @@ protected: // Instantiates a new WriterBackend of the given type (note that // doing so creates a new thread!). - WriterBackend* CreateBackend(WriterFrontend* frontend, bro_int_t type); + WriterBackend* CreateBackend(WriterFrontend* frontend, EnumVal* tag); //// Function also used by the RemoteSerializer. diff --git a/src/logging/Tag.cc b/src/logging/Tag.cc new file mode 100644 index 0000000000..dea3b41819 --- /dev/null +++ b/src/logging/Tag.cc @@ -0,0 +1,22 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Tag.h" +#include "Manager.h" + +logging::Tag logging::Tag::Error; + +logging::Tag::Tag(type_t type, subtype_t subtype) + : ::Tag(log_mgr->GetTagEnumType(), type, subtype) + { + } + +logging::Tag& logging::Tag::operator=(const logging::Tag& other) + { + ::Tag::operator=(other); + return *this; + } + +EnumVal* logging::Tag::AsEnumVal() const + { + return ::Tag::AsEnumVal(log_mgr->GetTagEnumType()); + } diff --git a/src/logging/Tag.h b/src/logging/Tag.h new file mode 100644 index 0000000000..b5b235154a --- /dev/null +++ b/src/logging/Tag.h @@ -0,0 +1,116 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef LOGGING_TAG_H +#define LOGGING_TAG_H + +#include "config.h" +#include "util.h" +#include "../Tag.h" +#include "plugin/TaggedComponent.h" +#include "plugin/ComponentManager.h" + +class EnumVal; + +namespace logging { + +class Manager; +class Component; + +/** + * Class to identify a writer type. + * + * The script-layer analogue is Log::Writer. + */ +class Tag : public ::Tag { +public: + /* + * Copy constructor. + */ + Tag(const Tag& other) : ::Tag(other) {} + + /** + * Default constructor. This initializes the tag with an error value + * that will make \c operator \c bool return false. + */ + Tag() : ::Tag() {} + + /** + * Destructor. + */ + ~Tag() {} + + /** + * Returns false if the tag represents an error value rather than a + * legal writer type. + * TODO: make this conversion operator "explicit" (C++11) or use a + * "safe bool" idiom (not necessary if "explicit" is available), + * otherwise this may allow nonsense/undesired comparison operations. + */ + operator bool() const { return *this != Tag(); } + + /** + * Assignment operator. + */ + Tag& operator=(const Tag& other); + + /** + * Compares two tags for equality. + */ + bool operator==(const Tag& other) const + { + return ::Tag::operator==(other); + } + + /** + * Compares two tags for inequality. + */ + bool operator!=(const Tag& other) const + { + return ::Tag::operator!=(other); + } + + /** + * Compares two tags for less-than relationship. + */ + bool operator<(const Tag& other) const + { + return ::Tag::operator<(other); + } + + /** + * Returns the \c Log::Writer enum that corresponds to this tag. + * The returned value does not have its ref-count increased. + * + * @param etype the script-layer enum type associated with the tag. + */ + EnumVal* AsEnumVal() const; + + static Tag Error; + +protected: + friend class plugin::ComponentManager; + friend class plugin::TaggedComponent; + + /** + * Constructor. + * + * @param type The main type. Note that the \a logging::Manager + * manages the value space internally, so noone else should assign + * any main types. + * + * @param subtype The sub type, which is left to an writer for + * interpretation. By default it's set to zero. + */ + Tag(type_t type, subtype_t subtype = 0); + + /** + * Constructor. + * + * @param val An enum value of script type \c Log::Writer. + */ + Tag(EnumVal* val) : ::Tag(val) {} +}; + +} + +#endif diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index f5c74e582c..783a497823 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -7,6 +7,8 @@ #include "threading/MsgThread.h" +#include "Component.h" + class RemoteSerializer; namespace logging { diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 09490ce3d1..a075701151 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -120,7 +120,7 @@ WriterFrontend::WriterFrontend(const WriterBackend::WriterInfo& arg_info, EnumVa if ( local ) { - backend = log_mgr->CreateBackend(this, writer->AsEnum()); + backend = log_mgr->CreateBackend(this, writer); if ( backend ) backend->Start(); diff --git a/src/logging.bif b/src/logging/logging.bif similarity index 61% rename from src/logging.bif rename to src/logging/logging.bif index 062e4dbe31..87323ef789 100644 --- a/src/logging.bif +++ b/src/logging/logging.bif @@ -3,8 +3,6 @@ module Log; %%{ -#include "NetVar.h" - #include "logging/Manager.h" %%} @@ -65,55 +63,3 @@ function Log::__flush%(id: Log::ID%): bool bool result = log_mgr->Flush(id->AsEnumVal()); return new Val(result, TYPE_BOOL); %} - -# Options for the ASCII writer. - -module LogAscii; - -const output_to_stdout: bool; -const include_meta: bool; -const meta_prefix: string; -const separator: string; -const set_separator: string; -const empty_field: string; -const unset_field: string; -const use_json: bool; -const json_timestamps: JSON::TimestampFormat; - -# Options for the DataSeries writer. - -module LogDataSeries; - -const compression: string; -const extent_size: count; -const dump_schema: bool; -const use_integer_for_time: bool; -const num_threads: count; - -# Options for the SQLite writer - -module LogSQLite; - -const set_separator: string; -const empty_field: string; -const unset_field: string; - -# Options for the ElasticSearch writer. - -module LogElasticSearch; - -const cluster_name: string; -const server_host: string; -const server_port: count; -const index_prefix: string; -const type_prefix: string; -const transfer_timeout: interval; -const max_batch_size: count; -const max_batch_interval: interval; -const max_byte_size: count; - -# Options for the None writer. - -module LogNone; - -const debug: bool; diff --git a/src/logging/writers/CMakeLists.txt b/src/logging/writers/CMakeLists.txt new file mode 100644 index 0000000000..9718a412d0 --- /dev/null +++ b/src/logging/writers/CMakeLists.txt @@ -0,0 +1,6 @@ + +add_subdirectory(ascii) +add_subdirectory(dataseries) +add_subdirectory(elasticsearch) +add_subdirectory(none) +add_subdirectory(sqlite) diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/ascii/Ascii.cc similarity index 99% rename from src/logging/writers/Ascii.cc rename to src/logging/writers/ascii/Ascii.cc index fe79089b04..a27553916f 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/ascii/Ascii.cc @@ -5,10 +5,10 @@ #include #include -#include "NetVar.h" #include "threading/SerialTypes.h" #include "Ascii.h" +#include "ascii.bif.h" using namespace logging::writer; using namespace threading; diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/ascii/Ascii.h similarity index 98% rename from src/logging/writers/Ascii.h rename to src/logging/writers/ascii/Ascii.h index 54402cc141..8648070111 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/ascii/Ascii.h @@ -5,7 +5,7 @@ #ifndef LOGGING_WRITER_ASCII_H #define LOGGING_WRITER_ASCII_H -#include "../WriterBackend.h" +#include "logging/WriterBackend.h" #include "threading/formatters/Ascii.h" #include "threading/formatters/JSON.h" @@ -16,9 +16,10 @@ public: Ascii(WriterFrontend* frontend); ~Ascii(); + static string LogExt(); + static WriterBackend* Instantiate(WriterFrontend* frontend) { return new Ascii(frontend); } - static string LogExt(); protected: virtual bool DoInit(const WriterInfo& info, int num_fields, diff --git a/src/logging/writers/ascii/CMakeLists.txt b/src/logging/writers/ascii/CMakeLists.txt new file mode 100644 index 0000000000..0cb0357a0d --- /dev/null +++ b/src/logging/writers/ascii/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro AsciiWriter) +bro_plugin_cc(Ascii.cc Plugin.cc) +bro_plugin_bif(ascii.bif) +bro_plugin_end() diff --git a/src/logging/writers/ascii/Plugin.cc b/src/logging/writers/ascii/Plugin.cc new file mode 100644 index 0000000000..4dcefda47b --- /dev/null +++ b/src/logging/writers/ascii/Plugin.cc @@ -0,0 +1,25 @@ +// See the file in the main distribution directory for copyright. + + +#include "plugin/Plugin.h" + +#include "Ascii.h" + +namespace plugin { +namespace Bro_AsciiWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("Ascii", ::logging::writer::Ascii::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::AsciiWriter"; + config.description = "ASCII log writer"; + return config; + } +} plugin; + +} +} diff --git a/src/logging/writers/ascii/ascii.bif b/src/logging/writers/ascii/ascii.bif new file mode 100644 index 0000000000..2817511152 --- /dev/null +++ b/src/logging/writers/ascii/ascii.bif @@ -0,0 +1,14 @@ + +# Options for the ASCII writer. + +module LogAscii; + +const output_to_stdout: bool; +const include_meta: bool; +const meta_prefix: string; +const separator: string; +const set_separator: string; +const empty_field: string; +const unset_field: string; +const use_json: bool; +const json_timestamps: JSON::TimestampFormat; diff --git a/src/logging/writers/dataseries/CMakeLists.txt b/src/logging/writers/dataseries/CMakeLists.txt new file mode 100644 index 0000000000..0917a092b0 --- /dev/null +++ b/src/logging/writers/dataseries/CMakeLists.txt @@ -0,0 +1,19 @@ + +include(BroPlugin) + +find_package(Lintel) +find_package(DataSeries) +find_package(LibXML2) + +if (NOT DISABLE_DATASERIES AND + LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) + + include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + + bro_plugin_begin(Bro DataSeriesWriter) + bro_plugin_cc(DataSeries.cc Plugin.cc) + bro_plugin_bif(dataseries.bif) + bro_plugin_end() + +endif() + diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/dataseries/DataSeries.cc similarity index 99% rename from src/logging/writers/DataSeries.cc rename to src/logging/writers/dataseries/DataSeries.cc index 2c14a51e25..5f039ac5d5 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/dataseries/DataSeries.cc @@ -2,8 +2,6 @@ #include "config.h" -#ifdef USE_DATASERIES - #include #include #include @@ -14,6 +12,7 @@ #include "threading/SerialTypes.h" #include "DataSeries.h" +#include "dataseries.bif.h" using namespace logging; using namespace writer; @@ -458,5 +457,3 @@ bool DataSeries::DoHeartbeat(double network_time, double current_time) { return true; } - -#endif /* USE_DATASERIES */ diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/dataseries/DataSeries.h similarity index 98% rename from src/logging/writers/DataSeries.h rename to src/logging/writers/dataseries/DataSeries.h index fe095bcb37..43ad60d291 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/dataseries/DataSeries.h @@ -11,8 +11,7 @@ #include #include -#include "../WriterBackend.h" -#include "threading/formatters/Ascii.h" +#include "logging/WriterBackend.h" namespace logging { namespace writer { diff --git a/src/logging/writers/dataseries/Plugin.cc b/src/logging/writers/dataseries/Plugin.cc new file mode 100644 index 0000000000..271f523ffa --- /dev/null +++ b/src/logging/writers/dataseries/Plugin.cc @@ -0,0 +1,25 @@ +// See the file in the main distribution directory for copyright. + + +#include "plugin/Plugin.h" + +#include "DataSeries.h" + +namespace plugin { +namespace Bro_DataSeriesWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("DataSeries", ::logging::writer::DataSeries::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::DataSeriesWriter"; + config.description = "DataSeries log writer"; + return config; + } +} plugin; + +} +} diff --git a/src/logging/writers/dataseries/dataseries.bif b/src/logging/writers/dataseries/dataseries.bif new file mode 100644 index 0000000000..2c83a369f2 --- /dev/null +++ b/src/logging/writers/dataseries/dataseries.bif @@ -0,0 +1,10 @@ + +# Options for the DataSeries writer. + +module LogDataSeries; + +const compression: string; +const extent_size: count; +const dump_schema: bool; +const use_integer_for_time: bool; +const num_threads: count; diff --git a/src/logging/writers/elasticsearch/CMakeLists.txt b/src/logging/writers/elasticsearch/CMakeLists.txt new file mode 100644 index 0000000000..6240b3cf63 --- /dev/null +++ b/src/logging/writers/elasticsearch/CMakeLists.txt @@ -0,0 +1,15 @@ + +include(BroPlugin) + +find_package(LibCURL) + +if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND) + include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + bro_plugin_begin(Bro ElasticSearchWriter) + bro_plugin_cc(ElasticSearch.cc Plugin.cc) + bro_plugin_bif(elasticsearch.bif) + bro_plugin_link_library(${LibCURL_LIBRARIES}) + bro_plugin_end() +endif() + + diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/elasticsearch/ElasticSearch.cc similarity index 99% rename from src/logging/writers/ElasticSearch.cc rename to src/logging/writers/elasticsearch/ElasticSearch.cc index 0dd1e1097c..8cf052f4ce 100644 --- a/src/logging/writers/ElasticSearch.cc +++ b/src/logging/writers/elasticsearch/ElasticSearch.cc @@ -6,21 +6,18 @@ #include "config.h" -#ifdef USE_ELASTICSEARCH - #include "util.h" // Needs to come first for stdint.h #include #include - -#include "BroString.h" -#include "NetVar.h" -#include "threading/SerialTypes.h" - #include #include +#include "BroString.h" +#include "threading/SerialTypes.h" + #include "ElasticSearch.h" +#include "elasticsearch.bif.h" using namespace logging; using namespace writer; @@ -291,5 +288,3 @@ bool ElasticSearch::HTTPSend(CURL *handle) // The "successful" return happens above return false; } - -#endif diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/elasticsearch/ElasticSearch.h similarity index 98% rename from src/logging/writers/ElasticSearch.h rename to src/logging/writers/elasticsearch/ElasticSearch.h index 283fff2972..5f3d229b5b 100644 --- a/src/logging/writers/ElasticSearch.h +++ b/src/logging/writers/elasticsearch/ElasticSearch.h @@ -9,8 +9,9 @@ #define LOGGING_WRITER_ELASTICSEARCH_H #include + +#include "logging/WriterBackend.h" #include "threading/formatters/JSON.h" -#include "../WriterBackend.h" namespace logging { namespace writer { @@ -19,9 +20,10 @@ public: ElasticSearch(WriterFrontend* frontend); ~ElasticSearch(); + static string LogExt(); + static WriterBackend* Instantiate(WriterFrontend* frontend) { return new ElasticSearch(frontend); } - static string LogExt(); protected: // Overidden from WriterBackend. diff --git a/src/logging/writers/elasticsearch/Plugin.cc b/src/logging/writers/elasticsearch/Plugin.cc new file mode 100644 index 0000000000..2abb7080e4 --- /dev/null +++ b/src/logging/writers/elasticsearch/Plugin.cc @@ -0,0 +1,37 @@ +// See the file in the main distribution directory for copyright. + +#include + +#include "plugin/Plugin.h" + +#include "ElasticSearch.h" + +namespace plugin { +namespace Bro_ElasticSearchWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("ElasticSearch", ::logging::writer::ElasticSearch::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::ElasticSearchWriter"; + config.description = "ElasticSearch log writer"; + return config; + } + + virtual void InitPreScript() + { + curl_global_init(CURL_GLOBAL_ALL); + } + + virtual void Done() + { + curl_global_cleanup(); + } + +} plugin; + +} +} diff --git a/src/logging/writers/elasticsearch/elasticsearch.bif b/src/logging/writers/elasticsearch/elasticsearch.bif new file mode 100644 index 0000000000..3d56dd7dd4 --- /dev/null +++ b/src/logging/writers/elasticsearch/elasticsearch.bif @@ -0,0 +1,14 @@ + +# Options for the ElasticSearch writer. + +module LogElasticSearch; + +const cluster_name: string; +const server_host: string; +const server_port: count; +const index_prefix: string; +const type_prefix: string; +const transfer_timeout: interval; +const max_batch_size: count; +const max_batch_interval: interval; +const max_byte_size: count; diff --git a/src/logging/writers/none/CMakeLists.txt b/src/logging/writers/none/CMakeLists.txt new file mode 100644 index 0000000000..f6e1265772 --- /dev/null +++ b/src/logging/writers/none/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro NoneWriter) +bro_plugin_cc(None.cc Plugin.cc) +bro_plugin_bif(none.bif) +bro_plugin_end() diff --git a/src/logging/writers/None.cc b/src/logging/writers/none/None.cc similarity index 98% rename from src/logging/writers/None.cc rename to src/logging/writers/none/None.cc index 9b91b82199..0bd507e1f8 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/none/None.cc @@ -2,7 +2,7 @@ #include #include "None.h" -#include "NetVar.h" +#include "none.bif.h" using namespace logging; using namespace writer; diff --git a/src/logging/writers/None.h b/src/logging/writers/none/None.h similarity index 96% rename from src/logging/writers/None.h rename to src/logging/writers/none/None.h index 2a6f71a06a..fda9a35330 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/none/None.h @@ -5,7 +5,7 @@ #ifndef LOGGING_WRITER_NONE_H #define LOGGING_WRITER_NONE_H -#include "../WriterBackend.h" +#include "logging/WriterBackend.h" namespace logging { namespace writer { diff --git a/src/logging/writers/none/Plugin.cc b/src/logging/writers/none/Plugin.cc new file mode 100644 index 0000000000..f712e7408c --- /dev/null +++ b/src/logging/writers/none/Plugin.cc @@ -0,0 +1,25 @@ +// See the file in the main distribution directory for copyright. + + +#include "plugin/Plugin.h" + +#include "None.h" + +namespace plugin { +namespace Bro_NoneWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("None", ::logging::writer::None::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::NoneWriter"; + config.description = "None log writer (primarily for debugging)"; + return config; + } +} plugin; + +} +} diff --git a/src/logging/writers/none/none.bif b/src/logging/writers/none/none.bif new file mode 100644 index 0000000000..2225851c55 --- /dev/null +++ b/src/logging/writers/none/none.bif @@ -0,0 +1,6 @@ + +# Options for the None writer. + +module LogNone; + +const debug: bool; diff --git a/src/logging/writers/sqlite/CMakeLists.txt b/src/logging/writers/sqlite/CMakeLists.txt new file mode 100644 index 0000000000..ce25251679 --- /dev/null +++ b/src/logging/writers/sqlite/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro SQLiteWriter) +bro_plugin_cc(SQLite.cc Plugin.cc) +bro_plugin_bif(sqlite.bif) +bro_plugin_end() diff --git a/src/logging/writers/sqlite/Plugin.cc b/src/logging/writers/sqlite/Plugin.cc new file mode 100644 index 0000000000..75e6497c99 --- /dev/null +++ b/src/logging/writers/sqlite/Plugin.cc @@ -0,0 +1,25 @@ +// See the file in the main distribution directory for copyright. + + +#include "plugin/Plugin.h" + +#include "SQLite.h" + +namespace plugin { +namespace Bro_SQLiteWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("SQLite", ::logging::writer::SQLite::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::SQLiteWriter"; + config.description = "SQLite log writer"; + return config; + } +} plugin; + +} +} diff --git a/src/logging/writers/SQLite.cc b/src/logging/writers/sqlite/SQLite.cc similarity index 99% rename from src/logging/writers/SQLite.cc rename to src/logging/writers/sqlite/SQLite.cc index 44d01ec73f..090810055d 100644 --- a/src/logging/writers/SQLite.cc +++ b/src/logging/writers/sqlite/SQLite.cc @@ -6,10 +6,10 @@ #include #include -#include "../../NetVar.h" -#include "../../threading/SerialTypes.h" +#include "threading/SerialTypes.h" #include "SQLite.h" +#include "sqlite.bif.h" using namespace logging; using namespace writer; diff --git a/src/logging/writers/SQLite.h b/src/logging/writers/sqlite/SQLite.h similarity index 97% rename from src/logging/writers/SQLite.h rename to src/logging/writers/sqlite/SQLite.h index a962e903ff..a820530456 100644 --- a/src/logging/writers/SQLite.h +++ b/src/logging/writers/sqlite/SQLite.h @@ -7,8 +7,7 @@ #include "config.h" -#include "../WriterBackend.h" - +#include "logging/WriterBackend.h" #include "threading/formatters/Ascii.h" #include "3rdparty/sqlite3.h" diff --git a/src/logging/writers/sqlite/sqlite.bif b/src/logging/writers/sqlite/sqlite.bif new file mode 100644 index 0000000000..29b93f3a0c --- /dev/null +++ b/src/logging/writers/sqlite/sqlite.bif @@ -0,0 +1,9 @@ + +# Options for the SQLite writer + +module LogSQLite; + +const set_separator: string; +const empty_field: string; +const unset_field: string; + diff --git a/src/main.cc b/src/main.cc index d1d60218f3..cdac3ec088 100644 --- a/src/main.cc +++ b/src/main.cc @@ -12,10 +12,6 @@ #include #endif -#ifdef USE_CURL -#include -#endif - #ifdef USE_IDMEF extern "C" { #include @@ -54,8 +50,8 @@ extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "threading/Manager.h" #include "input/Manager.h" #include "logging/Manager.h" -#include "logging/writers/Ascii.h" -#include "input/readers/Raw.h" +#include "logging/writers/ascii/Ascii.h" +#include "input/readers/raw/Raw.h" #include "analyzer/Manager.h" #include "analyzer/Tag.h" #include "plugin/Manager.h" @@ -227,25 +223,6 @@ void usage() fprintf(stderr, " $BRO_PROFILER_FILE | Output file for script execution statistics (not set)\n"); fprintf(stderr, " $BRO_DISABLE_BROXYGEN | Disable Broxygen documentation support (%s)\n", getenv("BRO_DISABLE_BROXYGEN") ? "set" : "not set"); - fprintf(stderr, "\n"); - fprintf(stderr, " Supported log formats: "); - - bool first = true; - list fmts = logging::Manager::SupportedFormats(); - - for ( list::const_iterator i = fmts.begin(); i != fmts.end(); ++i ) - { - if ( *i == "None" ) - // Skip, it's uninteresting. - continue; - - if ( ! first ) - fprintf(stderr, ","); - - fprintf(stderr, "%s", (*i).c_str()); - first = false; - } - fprintf(stderr, "\n"); exit(1); @@ -813,10 +790,6 @@ int main(int argc, char** argv) SSL_library_init(); SSL_load_error_strings(); -#ifdef USE_CURL - curl_global_init(CURL_GLOBAL_ALL); -#endif - int r = sqlite3_initialize(); if ( r != SQLITE_OK ) @@ -897,8 +870,6 @@ int main(int argc, char** argv) init_event_handlers(); - input::reader::Raw::ClassInit(); - md5_type = new OpaqueType("md5"); sha1_type = new OpaqueType("sha1"); sha256_type = new OpaqueType("sha256"); @@ -1252,10 +1223,6 @@ int main(int argc, char** argv) done_with_network(); net_delete(); -#ifdef USE_CURL - curl_global_cleanup(); -#endif - terminate_bro(); sqlite3_shutdown(); diff --git a/src/plugin/ComponentManager.h b/src/plugin/ComponentManager.h index 0427c1d919..1d9bd8bcda 100644 --- a/src/plugin/ComponentManager.h +++ b/src/plugin/ComponentManager.h @@ -27,13 +27,16 @@ class ComponentManager { public: /** - * Constructor creates a new enum type called a "Tag" to associate with + * Constructor creates a new enum type to associate with * a component. * - * @param module The script-layer module in which to install the "Tag" ID + * @param module The script-layer module in which to install the ID * representing an enum type. + * + * @param local_id The local part of the ID of the new enum type + * (e.g., "Tag"). */ - ComponentManager(const string& module); + ComponentManager(const string& module, const string& local_id); /** * @return The script-layer module in which the component's "Tag" ID lives. @@ -125,13 +128,15 @@ private: }; template -ComponentManager::ComponentManager(const string& arg_module) +ComponentManager::ComponentManager(const string& arg_module, const string& local_id) : module(arg_module) { - tag_enum_type = new EnumType(module + "::Tag"); - ::ID* id = install_ID("Tag", module.c_str(), true, true); + tag_enum_type = new EnumType(module + "::" + local_id); + ::ID* id = install_ID(local_id.c_str(), module.c_str(), true, true); add_type(id, tag_enum_type, 0); broxygen_mgr->Identifier(id); + + // fprintf(stderr, "Enum: %s\n", id->Name()); } template @@ -241,6 +246,7 @@ void ComponentManager::RegisterComponent(C* component, string id = fmt("%s%s", prefix.c_str(), cname.c_str()); tag_enum_type->AddName(module, id.c_str(), component->Tag().AsEnumVal()->InternalInt(), true); + // fprintf(stderr, "Enum item: %s/%s\n", module.c_str(), id.c_str()); } } // namespace plugin diff --git a/src/types.bif b/src/types.bif index a44c3c1615..99df67c9d5 100644 --- a/src/types.bif +++ b/src/types.bif @@ -163,21 +163,6 @@ type ModbusHeaders: record; type ModbusCoils: vector; type ModbusRegisters: vector; -module Log; - -enum Writer %{ - WRITER_DEFAULT, - WRITER_NONE, - WRITER_ASCII, - WRITER_DATASERIES, - WRITER_SQLITE, - WRITER_ELASTICSEARCH, -%} - -enum ID %{ - Unknown, -%} - module Tunnel; enum Type %{ NONE, @@ -191,29 +176,6 @@ enum Type %{ type EncapsulatingConn: record; -module Input; - -enum Reader %{ - READER_DEFAULT, - READER_ASCII, - READER_RAW, - READER_BENCHMARK, - READER_BINARY, - READER_SQLITE, -%} - -enum Event %{ - EVENT_NEW, - EVENT_CHANGED, - EVENT_REMOVED, -%} - -enum Mode %{ - MANUAL = 0, - REREAD = 1, - STREAM = 2, -%} - module GLOBAL; type gtpv1_hdr: record; diff --git a/testing/btest/core/leaks/dataseries-rotate.bro b/testing/btest/core/leaks/dataseries-rotate.bro index 0d4b5d6f95..f43823cd5f 100644 --- a/testing/btest/core/leaks/dataseries-rotate.bro +++ b/testing/btest/core/leaks/dataseries-rotate.bro @@ -1,5 +1,5 @@ # -# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks diff --git a/testing/btest/core/leaks/dataseries.bro b/testing/btest/core/leaks/dataseries.bro index fcb5782f4e..c5556accf2 100644 --- a/testing/btest/core/leaks/dataseries.bro +++ b/testing/btest/core/leaks/dataseries.bro @@ -1,6 +1,6 @@ # Needs perftools support. # -# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro index fc3752a168..1b05ca9b8f 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro @@ -1,5 +1,5 @@ # -# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro index 7b708473e3..ebc7a15002 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro @@ -1,5 +1,5 @@ # -# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro index ee0426ae55..c030c58861 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro @@ -1,5 +1,5 @@ # -# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro index 5e3f864b33..87d33a46d1 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro @@ -1,5 +1,5 @@ # -# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro index ee1342c470..15e5ba16f1 100644 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro @@ -1,5 +1,5 @@ # -# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt # @TEST-GROUP: dataseries # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/error.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/error.bro index 2e5d22f188..e48e066c6c 100644 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/error.bro +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/error.bro @@ -1,6 +1,6 @@ # # @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer SQLite +# @TEST-REQUIRES: has-writer Bro::SQLiteWriter # @TEST-GROUP: sqlite # # @TEST-EXEC: cat ssh.sql | sqlite3 ssh.sqlite diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/set.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/set.bro index 7507316996..0cceb7af08 100644 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/set.bro +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/set.bro @@ -3,7 +3,7 @@ # chance of being off by one if someone changes it). # # @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer SQLite +# @TEST-REQUIRES: has-writer Bro::SQLiteWriter # @TEST-GROUP: sqlite # # @TEST-EXEC: bro -b %INPUT diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/types.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/types.bro index 7c896a7192..6c088e9f2f 100644 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/types.bro +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/types.bro @@ -1,6 +1,6 @@ # # @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer SQLite +# @TEST-REQUIRES: has-writer Bro::SQLiteWriter # @TEST-GROUP: sqlite # # @TEST-EXEC: bro -b %INPUT diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.bro index b48520440a..e45c42d7e2 100644 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.bro +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.bro @@ -1,6 +1,6 @@ # # @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer SQLite +# @TEST-REQUIRES: has-writer Bro::SQLiteWriter # @TEST-GROUP: sqlite # # @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_SQLITE diff --git a/testing/scripts/has-writer b/testing/scripts/has-writer index 683d31041f..4c5f38a6bb 100755 --- a/testing/scripts/has-writer +++ b/testing/scripts/has-writer @@ -1,6 +1,6 @@ #! /usr/bin/env bash # # Returns true if Bro has been compiled with support for writer type -# $1. The type name must match what "bro --help" prints. +# $1. The type name must match the plugin name that "bro -N" prints. -bro --helper 2>&1 | grep -qi "Supported log formats:.*$1" +bro -N $1 >/dev/null From 22aa821506b094134cb814a0c898bea8d7632649 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 31 Jul 2014 10:49:33 -0500 Subject: [PATCH 006/106] Split the types and attributes reference doc into two docs Also moved them up in the index so that the more fundamental material comes before the more advanced material in the table of contents. --- doc/script-reference/attributes.rst | 139 +++++++++++++++++ doc/script-reference/index.rst | 3 +- .../{builtins.rst => types.rst} | 144 +----------------- 3 files changed, 142 insertions(+), 144 deletions(-) create mode 100644 doc/script-reference/attributes.rst rename doc/script-reference/{builtins.rst => types.rst} (84%) diff --git a/doc/script-reference/attributes.rst b/doc/script-reference/attributes.rst new file mode 100644 index 0000000000..ca66ab2112 --- /dev/null +++ b/doc/script-reference/attributes.rst @@ -0,0 +1,139 @@ +Attributes +========== + +Attributes occur at the end of type or event declarations and change their +behavior. The syntax is ``&key`` or ``&key=val``, e.g., ``type T: +set[count] &read_expire=5min`` or ``event foo() &priority=-3``. The Bro +scripting language supports the following attributes. + +.. bro:attr:: &optional + + Allows a record field to be missing. For example the type ``record { + a: addr; b: port &optional; }`` could be instantiated both as + singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``. + +.. bro:attr:: &default + + Uses a default value for a record field, a function/hook/event + parameter, or container elements. For example, ``table[int] of + string &default="foo"`` would create a table that returns the + :bro:type:`string` ``"foo"`` for any non-existing index. + +.. bro:attr:: &redef + + Allows for redefinition of initial object values. This is typically + used with constants, for example, ``const clever = T &redef;`` would + allow the constant to be redefined at some later point during script + execution. + +.. bro:attr:: &rotate_interval + + Rotates a file after a specified interval. + +.. bro:attr:: &rotate_size + + Rotates a file after it has reached a given size in bytes. + +.. bro:attr:: &add_func + + Can be applied to an identifier with &redef to specify a function to + be called any time a "redef += ..." declaration is parsed. The + function takes two arguments of the same type as the identifier, the first + being the old value of the variable and the second being the new + value given after the "+=" operator in the "redef" declaration. The + return value of the function will be the actual new value of the + variable after the "redef" declaration is parsed. + +.. bro:attr:: &delete_func + + Same as &add_func, except for "redef" declarations that use the "-=" + operator. + +.. bro:attr:: &expire_func + + Called right before a container element expires. The function's + first parameter is of the same type of the container and the second + parameter the same type of the container's index. The return + value is an :bro:type:`interval` indicating the amount of additional + time to wait before expiring the container element at the given + index (which will trigger another execution of this function). + +.. bro:attr:: &read_expire + + Specifies a read expiration timeout for container elements. That is, + the element expires after the given amount of time since the last + time it has been read. Note that a write also counts as a read. + +.. bro:attr:: &write_expire + + Specifies a write expiration timeout for container elements. That + is, the element expires after the given amount of time since the + last time it has been written. + +.. bro:attr:: &create_expire + + Specifies a creation expiration timeout for container elements. That + is, the element expires after the given amount of time since it has + been inserted into the container, regardless of any reads or writes. + +.. bro:attr:: &persistent + + Makes a variable persistent, i.e., its value is written to disk (per + default at shutdown time). + +.. bro:attr:: &synchronized + + Synchronizes variable accesses across nodes. The value of a + ``&synchronized`` variable is automatically propagated to all peers + when it changes. + +.. bro:attr:: &encrypt + + Encrypts files right before writing them to disk. + +.. TODO: needs to be documented in more detail. + +.. bro:attr:: &raw_output + + Opens a file in raw mode, i.e., non-ASCII characters are not + escaped. + +.. bro:attr:: &mergeable + + Prefers set union to assignment for synchronized state. This + attribute is used in conjunction with :bro:attr:`&synchronized` + container types: when the same container is updated at two peers + with different value, the propagation of the state causes a race + condition, where the last update succeeds. This can cause + inconsistencies and can be avoided by unifying the two sets, rather + than merely overwriting the old value. + +.. bro:attr:: &priority + + Specifies the execution priority (as a signed integer) of a hook or + event handler. Higher values are executed before lower ones. The + default value is 0. + +.. bro:attr:: &group + + Groups event handlers such that those in the same group can be + jointly activated or deactivated. + +.. bro:attr:: &log + + Writes a record field to the associated log stream. + +.. bro:attr:: &error_handler + + Internally set on the events that are associated with the reporter + framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and + :bro:id:`reporter_error`. It prevents any handlers of those events + from being able to generate reporter messages that go through any of + those events (i.e., it prevents an infinite event recursion). Instead, + such nested reporter messages are output to stderr. + +.. bro:attr:: &type_column + + Used by the input framework. It can be used on columns of type + :bro:type:`port` and specifies the name of an additional column in + the input file which specifies the protocol of the port (tcp/udp/icmp). diff --git a/doc/script-reference/index.rst b/doc/script-reference/index.rst index bd600e4a97..a2c6f0a24f 100644 --- a/doc/script-reference/index.rst +++ b/doc/script-reference/index.rst @@ -5,10 +5,11 @@ Script Reference .. toctree:: :maxdepth: 1 + types + attributes notices proto-analyzers file-analyzers - builtins packages scripts Broxygen Example Script diff --git a/doc/script-reference/builtins.rst b/doc/script-reference/types.rst similarity index 84% rename from doc/script-reference/builtins.rst rename to doc/script-reference/types.rst index 85e9cd14c8..049b43c04a 100644 --- a/doc/script-reference/builtins.rst +++ b/doc/script-reference/types.rst @@ -1,8 +1,5 @@ -Types and Attributes -==================== - Types ------ +===== Every value in a Bro script has a type (see below for a list of all built-in types). Although Bro variables have static types (meaning that their type @@ -859,142 +856,3 @@ The Bro scripting language supports the following built-in types. executed due to one handler body exiting as a result of a ``break`` statement. -Attributes ----------- - -Attributes occur at the end of type/event declarations and change their -behavior. The syntax is ``&key`` or ``&key=val``, e.g., ``type T: -set[count] &read_expire=5min`` or ``event foo() &priority=-3``. The Bro -scripting language supports the following built-in attributes. - -.. bro:attr:: &optional - - Allows a record field to be missing. For example the type ``record { - a: addr; b: port &optional; }`` could be instantiated both as - singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``. - -.. bro:attr:: &default - - Uses a default value for a record field, a function/hook/event - parameter, or container elements. For example, ``table[int] of - string &default="foo"`` would create a table that returns the - :bro:type:`string` ``"foo"`` for any non-existing index. - -.. bro:attr:: &redef - - Allows for redefinition of initial object values. This is typically - used with constants, for example, ``const clever = T &redef;`` would - allow the constant to be redefined at some later point during script - execution. - -.. bro:attr:: &rotate_interval - - Rotates a file after a specified interval. - -.. bro:attr:: &rotate_size - - Rotates a file after it has reached a given size in bytes. - -.. bro:attr:: &add_func - - Can be applied to an identifier with &redef to specify a function to - be called any time a "redef += ..." declaration is parsed. The - function takes two arguments of the same type as the identifier, the first - being the old value of the variable and the second being the new - value given after the "+=" operator in the "redef" declaration. The - return value of the function will be the actual new value of the - variable after the "redef" declaration is parsed. - -.. bro:attr:: &delete_func - - Same as &add_func, except for "redef" declarations that use the "-=" - operator. - -.. bro:attr:: &expire_func - - Called right before a container element expires. The function's - first parameter is of the same type of the container and the second - parameter the same type of the container's index. The return - value is an :bro:type:`interval` indicating the amount of additional - time to wait before expiring the container element at the given - index (which will trigger another execution of this function). - -.. bro:attr:: &read_expire - - Specifies a read expiration timeout for container elements. That is, - the element expires after the given amount of time since the last - time it has been read. Note that a write also counts as a read. - -.. bro:attr:: &write_expire - - Specifies a write expiration timeout for container elements. That - is, the element expires after the given amount of time since the - last time it has been written. - -.. bro:attr:: &create_expire - - Specifies a creation expiration timeout for container elements. That - is, the element expires after the given amount of time since it has - been inserted into the container, regardless of any reads or writes. - -.. bro:attr:: &persistent - - Makes a variable persistent, i.e., its value is written to disk (per - default at shutdown time). - -.. bro:attr:: &synchronized - - Synchronizes variable accesses across nodes. The value of a - ``&synchronized`` variable is automatically propagated to all peers - when it changes. - -.. bro:attr:: &encrypt - - Encrypts files right before writing them to disk. - -.. TODO: needs to be documented in more detail. - -.. bro:attr:: &raw_output - - Opens a file in raw mode, i.e., non-ASCII characters are not - escaped. - -.. bro:attr:: &mergeable - - Prefers set union to assignment for synchronized state. This - attribute is used in conjunction with :bro:attr:`&synchronized` - container types: when the same container is updated at two peers - with different value, the propagation of the state causes a race - condition, where the last update succeeds. This can cause - inconsistencies and can be avoided by unifying the two sets, rather - than merely overwriting the old value. - -.. bro:attr:: &priority - - Specifies the execution priority (as a signed integer) of a hook or - event handler. Higher values are executed before lower ones. The - default value is 0. - -.. bro:attr:: &group - - Groups event handlers such that those in the same group can be - jointly activated or deactivated. - -.. bro:attr:: &log - - Writes a record field to the associated log stream. - -.. bro:attr:: &error_handler - - Internally set on the events that are associated with the reporter - framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and - :bro:id:`reporter_error`. It prevents any handlers of those events - from being able to generate reporter messages that go through any of - those events (i.e., it prevents an infinite event recursion). Instead, - such nested reporter messages are output to stderr. - -.. bro:attr:: &type_column - - Used by the input framework. It can be used on columns of type - :bro:type:`port` and specifies the name of an additional column in - the input file which specifies the protocol of the port (tcp/udp/icmp). From aa731eeaecbea1c44f52bc58a08deaff691ff450 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 31 Jul 2014 10:49:50 -0700 Subject: [PATCH 007/106] Final fixes, preparing for merge. --- src/CMakeLists.txt | 9 +++++---- src/Val.cc | 8 -------- src/input/CMakeLists.txt | 1 + src/logging/CMakeLists.txt | 1 + src/logging/writers/dataseries/CMakeLists.txt | 7 +++++++ src/logging/writers/dataseries/DataSeries.h | 1 + 6 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d2a272b467..04867b7189 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -153,16 +153,17 @@ list(APPEND BINPAC_OUTPUTS "${BINPAC_OUTPUT_CC}") set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE) set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE) +add_subdirectory(analyzer) +add_subdirectory(broxygen) +add_subdirectory(file_analysis) add_subdirectory(input) add_subdirectory(logging) -add_subdirectory(analyzer) -add_subdirectory(file_analysis) add_subdirectory(probabilistic) -add_subdirectory(broxygen) set(bro_SUBDIRS - ${bro_SUBDIR_LIBS} + # Order is important here. ${bro_PLUGIN_LIBS} + ${bro_SUBDIR_LIBS} ) if ( NOT bro_HAVE_OBJECT_LIBRARIES ) diff --git a/src/Val.cc b/src/Val.cc index ad7a920010..5f605a178e 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -2920,15 +2920,7 @@ void EnumVal::ValDescribe(ODesc* d) const const char* ename = type->AsEnumType()->Lookup(val.int_val); if ( ! ename ) - { - EnumType::enum_name_list l = type->AsEnumType()->Names(); - - for ( EnumType::enum_name_list::const_iterator iter = l.begin(); - iter != l.end(); ++iter ) - fprintf(stderr, "%s -> %lld\n", iter->first.c_str(), iter->second); - ename = ""; - } d->Add(ename); } diff --git a/src/input/CMakeLists.txt b/src/input/CMakeLists.txt index 6ef56cc65e..b1c79d2bd0 100644 --- a/src/input/CMakeLists.txt +++ b/src/input/CMakeLists.txt @@ -19,4 +19,5 @@ set(input_SRCS bif_target(input.bif) bro_add_subdir_library(input ${input_SRCS} ${BIF_OUTPUT_CC}) +add_dependencies(bro_input generate_outputs) diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 24dbc9860b..f7ed586014 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -19,4 +19,5 @@ set(logging_SRCS bif_target(logging.bif) bro_add_subdir_library(logging ${logging_SRCS} ${BIF_OUTPUT_CC}) +add_dependencies(bro_logging generate_outputs) diff --git a/src/logging/writers/dataseries/CMakeLists.txt b/src/logging/writers/dataseries/CMakeLists.txt index 0917a092b0..f7f54e4c0f 100644 --- a/src/logging/writers/dataseries/CMakeLists.txt +++ b/src/logging/writers/dataseries/CMakeLists.txt @@ -10,9 +10,16 @@ if (NOT DISABLE_DATASERIES AND include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + include_directories(BEFORE ${Lintel_INCLUDE_DIR}) + include_directories(BEFORE ${DataSeries_INCLUDE_DIR}) + include_directories(BEFORE ${LibXML2_INCLUDE_DIR}) + bro_plugin_begin(Bro DataSeriesWriter) bro_plugin_cc(DataSeries.cc Plugin.cc) bro_plugin_bif(dataseries.bif) + bro_plugin_link_library(${Lintel_LIBRARIES}) + bro_plugin_link_library(${DataSeries_LIBRARIES}) + bro_plugin_link_library(${LibXML2_LIBRARIES}) bro_plugin_end() endif() diff --git a/src/logging/writers/dataseries/DataSeries.h b/src/logging/writers/dataseries/DataSeries.h index 43ad60d291..cdc4d9a66c 100644 --- a/src/logging/writers/dataseries/DataSeries.h +++ b/src/logging/writers/dataseries/DataSeries.h @@ -12,6 +12,7 @@ #include #include "logging/WriterBackend.h" +#include "threading/formatters/Ascii.h" namespace logging { namespace writer { From f45526f3735132c7b85b4a0ffba46f703fa62ba3 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 31 Jul 2014 11:40:25 -0700 Subject: [PATCH 008/106] Test case for a dynamic log writer. --- cmake | 2 +- testing/btest/Baseline/plugins.writer/output | 22 +++++++++++ .../btest/plugins/writer-plugin/.btest-ignore | 0 .../plugins/writer-plugin/CMakeLists.txt | 17 ++++++++ .../btest/plugins/writer-plugin/src/Foo.cc | 31 +++++++++++++++ testing/btest/plugins/writer-plugin/src/Foo.h | 39 +++++++++++++++++++ .../btest/plugins/writer-plugin/src/Plugin.cc | 19 +++++++++ .../btest/plugins/writer-plugin/src/Plugin.h | 22 +++++++++++ testing/btest/plugins/writer.bro | 8 ++++ 9 files changed, 159 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/plugins.writer/output create mode 100644 testing/btest/plugins/writer-plugin/.btest-ignore create mode 100644 testing/btest/plugins/writer-plugin/CMakeLists.txt create mode 100644 testing/btest/plugins/writer-plugin/src/Foo.cc create mode 100644 testing/btest/plugins/writer-plugin/src/Foo.h create mode 100644 testing/btest/plugins/writer-plugin/src/Plugin.cc create mode 100644 testing/btest/plugins/writer-plugin/src/Plugin.h create mode 100644 testing/btest/plugins/writer.bro diff --git a/cmake b/cmake index fb67896d2c..f2e8ba6b90 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit fb67896d2c3fe61c6479e78c3e9b12de8637be87 +Subproject commit f2e8ba6b90b3a2da9f1f77c55d0e718c25376bbb diff --git a/testing/btest/Baseline/plugins.writer/output b/testing/btest/Baseline/plugins.writer/output new file mode 100644 index 0000000000..f737e892a0 --- /dev/null +++ b/testing/btest/Baseline/plugins.writer/output @@ -0,0 +1,22 @@ +Demo::Foo - A Foo test logging writer (dynamic, version 1.0) + [Writer] Foo (Log::WRITER_FOO) + +=== +[packet_filter] 1406831942.605829|bro|ip or not ip|T|T +[conn] 1340213005.165293|CXWv6p3arKYeMETxOg|10.0.0.55|53994|60.190.189.214|8124|tcp|-|4.314406|0|0|S0|-|0|S|5|320|0|0| +[tunnel] 1340213015.276495|-|10.0.0.55|0|60.190.189.214|8124|Tunnel::SOCKS|Tunnel::DISCOVER +[socks] 1340213015.276495|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|-|succeeded|-|www.osnews.com|80|192.168.0.31|-|2688 +[http] 1340213019.013158|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|1|GET|www.osnews.com|/images/printer2.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213019.013426|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|2|GET|www.osnews.com|/img2/shorturl.jpg|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213019.580162|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|3|GET|www.osnews.com|/images/icons/9.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213020.155861|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|4|GET|www.osnews.com|/images/icons/26.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|1368|200|OK|-|-|-||-|-|-|-|-|FBtZ7y1ppK8iIeY622|image/gif +[files] 1340213020.732581|FBtZ7y1ppK8iIeY622|60.190.189.214|10.0.0.55|CjhGID4nQcgTWjvg4c|HTTP|0||image/gif|-|0.000000|-|F|1368|1368|0|0|F|-|-|-|-|- +[http] 1340213020.732963|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|GET|www.osnews.com|/images/icons/17.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213021.300269|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|6|GET|www.osnews.com|/images/left.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[conn] 1340213010.582723|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|tcp|http,socks|13.839419|3860|2934|SF|-|0|ShADadfF|23|5080|20|3986| +[http] 1340213021.861584|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|7|GET|www.osnews.com|/images/icons/32.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[conn] 1340213048.780152|CCvvfg3TEfuqmmG4bh|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213097.272764|CsRx2w45OKnoww6xl4|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213162.160367|CRJuHdVW0XPVINV8a|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213226.561757|CPbrpk1qSsw6ESzHV4|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213290.981995|C6pKV8GSxOnSLghOa|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| diff --git a/testing/btest/plugins/writer-plugin/.btest-ignore b/testing/btest/plugins/writer-plugin/.btest-ignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/btest/plugins/writer-plugin/CMakeLists.txt b/testing/btest/plugins/writer-plugin/CMakeLists.txt new file mode 100644 index 0000000000..2234907ad2 --- /dev/null +++ b/testing/btest/plugins/writer-plugin/CMakeLists.txt @@ -0,0 +1,17 @@ + +project(Bro-Plugin-Demo-Foo) + +cmake_minimum_required(VERSION 2.6.3) + +if ( NOT BRO_DIST ) + message(FATAL_ERROR "BRO_DIST not set") +endif () + +set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) + +include(BroPlugin) + +bro_plugin_begin(Demo Foo) +bro_plugin_cc(src/Plugin.cc) +bro_plugin_cc(src/Foo.cc) +bro_plugin_end() diff --git a/testing/btest/plugins/writer-plugin/src/Foo.cc b/testing/btest/plugins/writer-plugin/src/Foo.cc new file mode 100644 index 0000000000..891a17422b --- /dev/null +++ b/testing/btest/plugins/writer-plugin/src/Foo.cc @@ -0,0 +1,31 @@ + +#include "Foo.h" + +using namespace logging; +using namespace writer; + +bool Foo::DoInit(const WriterInfo& info, int num_fields, + const threading::Field* const * fields) + { + desc.EnableEscaping(); + desc.AddEscapeSequence("|"); + threading::formatter::Ascii::SeparatorInfo sep_info("|", ",", "-", ""); + formatter = new threading::formatter::Ascii(this, sep_info); + path = info.path; + + return true; + } + +bool Foo::DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals) + { + desc.Clear(); + + if ( ! formatter->Describe(&desc, num_fields, fields, vals) ) + return false; + + printf("[%s] %s\n", path.c_str(), desc.Description()); + + return true; + } + diff --git a/testing/btest/plugins/writer-plugin/src/Foo.h b/testing/btest/plugins/writer-plugin/src/Foo.h new file mode 100644 index 0000000000..8710863990 --- /dev/null +++ b/testing/btest/plugins/writer-plugin/src/Foo.h @@ -0,0 +1,39 @@ + +#ifndef BRO_PLUGIN_DEMO_FOO_H +#define BRO_PLUGIN_DEMO_FOO_H + +#include "logging/WriterBackend.h" +#include "threading/formatters/Ascii.h" + +namespace logging { namespace writer { + +class Foo : public WriterBackend { +public: + Foo(WriterFrontend* frontend) : WriterBackend(frontend) {} + ~Foo() {}; + + static WriterBackend* Instantiate(WriterFrontend* frontend) + { return new Foo(frontend); } + +protected: + virtual bool DoInit(const WriterInfo& info, int num_fields, + const threading::Field* const * fields); + + virtual bool DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals); + virtual bool DoSetBuf(bool enabled) { return true; } + virtual bool DoRotate(const char* rotated_path, double open, + double close, bool terminating) { return true; } + virtual bool DoFlush(double network_time) { return true; } + virtual bool DoFinish(double network_time) { return true; } + virtual bool DoHeartbeat(double network_time, double current_time) { return true; } + +private: + string path; + ODesc desc; + threading::formatter::Formatter* formatter; +}; + +} } + +#endif diff --git a/testing/btest/plugins/writer-plugin/src/Plugin.cc b/testing/btest/plugins/writer-plugin/src/Plugin.cc new file mode 100644 index 0000000000..e07e071204 --- /dev/null +++ b/testing/btest/plugins/writer-plugin/src/Plugin.cc @@ -0,0 +1,19 @@ +#include "Plugin.h" + +#include "Foo.h" + +namespace plugin { namespace Demo_Foo { Plugin plugin; } } + +using namespace plugin::Demo_Foo; + +plugin::Configuration Plugin::Configure() + { + AddComponent(new ::logging::Component("Foo", ::logging::writer::Foo::Instantiate)); + + plugin::Configuration config; + config.name = "Demo::Foo"; + config.description = "A Foo test logging writer"; + config.version.major = 1; + config.version.minor = 0; + return config; + } diff --git a/testing/btest/plugins/writer-plugin/src/Plugin.h b/testing/btest/plugins/writer-plugin/src/Plugin.h new file mode 100644 index 0000000000..c65eac01a1 --- /dev/null +++ b/testing/btest/plugins/writer-plugin/src/Plugin.h @@ -0,0 +1,22 @@ + +#ifndef BRO_PLUGIN_DEMO_FOO +#define BRO_PLUGIN_DEMO_FOO + +#include + +namespace plugin { +namespace Demo_Foo { + +class Plugin : public ::plugin::Plugin +{ +protected: + // Overridden from plugin::Plugin. + virtual plugin::Configuration Configure(); +}; + +extern Plugin plugin; + +} +} + +#endif diff --git a/testing/btest/plugins/writer.bro b/testing/btest/plugins/writer.bro new file mode 100644 index 0000000000..49fbbb9395 --- /dev/null +++ b/testing/btest/plugins/writer.bro @@ -0,0 +1,8 @@ +# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo +# @TEST-EXEC: cp -r %DIR/writer-plugin/* . +# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/socks.trace Log::default_writer=Log::WRITER_FOO %INPUT >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output + From 3d1442e86b60d7bea4698b97a1dc91a75d3e70f4 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 31 Jul 2014 12:04:27 -0700 Subject: [PATCH 009/106] Test case for a dynamic input reader. --- .../canonified_loaded_scripts.log | 82 ++++---- .../canonified_loaded_scripts.log | 82 ++++---- testing/btest/Baseline/plugins.reader/out | 10 + testing/btest/Baseline/plugins.reader/output | 4 + .../btest/plugins/reader-plugin/.btest-ignore | 0 .../plugins/reader-plugin/CMakeLists.txt | 17 ++ .../btest/plugins/reader-plugin/src/Foo.cc | 185 ++++++++++++++++++ testing/btest/plugins/reader-plugin/src/Foo.h | 34 ++++ .../btest/plugins/reader-plugin/src/Plugin.cc | 19 ++ .../btest/plugins/reader-plugin/src/Plugin.h | 22 +++ testing/btest/plugins/reader.bro | 40 ++++ 11 files changed, 423 insertions(+), 72 deletions(-) create mode 100644 testing/btest/Baseline/plugins.reader/out create mode 100644 testing/btest/Baseline/plugins.reader/output create mode 100644 testing/btest/plugins/reader-plugin/.btest-ignore create mode 100644 testing/btest/plugins/reader-plugin/CMakeLists.txt create mode 100644 testing/btest/plugins/reader-plugin/src/Foo.cc create mode 100644 testing/btest/plugins/reader-plugin/src/Foo.h create mode 100644 testing/btest/plugins/reader-plugin/src/Plugin.cc create mode 100644 testing/btest/plugins/reader-plugin/src/Plugin.h create mode 100644 testing/btest/plugins/reader.bro diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index 8128554281..c4a29ca44d 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-05-15-14-10-48 +#open 2014-07-31-19-06-48 #fields name #types string scripts/base/init-bare.bro @@ -14,6 +14,40 @@ scripts/base/init-bare.bro build/scripts/base/bif/reporter.bif.bro build/scripts/base/bif/plugins/Bro_SNMP.types.bif.bro build/scripts/base/bif/event.bif.bro + scripts/base/frameworks/logging/__load__.bro + scripts/base/frameworks/logging/main.bro + build/scripts/base/bif/logging.bif.bro + scripts/base/frameworks/logging/postprocessors/__load__.bro + scripts/base/frameworks/logging/postprocessors/scp.bro + scripts/base/frameworks/logging/postprocessors/sftp.bro + scripts/base/frameworks/logging/writers/ascii.bro + scripts/base/frameworks/logging/writers/dataseries.bro + scripts/base/frameworks/logging/writers/sqlite.bro + scripts/base/frameworks/logging/writers/elasticsearch.bro + scripts/base/frameworks/logging/writers/none.bro + scripts/base/frameworks/input/__load__.bro + scripts/base/frameworks/input/main.bro + build/scripts/base/bif/input.bif.bro + scripts/base/frameworks/input/readers/ascii.bro + scripts/base/frameworks/input/readers/raw.bro + scripts/base/frameworks/input/readers/benchmark.bro + scripts/base/frameworks/input/readers/binary.bro + scripts/base/frameworks/input/readers/sqlite.bro + scripts/base/frameworks/analyzer/__load__.bro + scripts/base/frameworks/analyzer/main.bro + scripts/base/frameworks/packet-filter/utils.bro + build/scripts/base/bif/analyzer.bif.bro + scripts/base/frameworks/files/__load__.bro + scripts/base/frameworks/files/main.bro + build/scripts/base/bif/file_analysis.bif.bro + scripts/base/utils/site.bro + scripts/base/utils/patterns.bro + scripts/base/frameworks/files/magic/__load__.bro + build/scripts/base/bif/__load__.bro + build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/bloom-filter.bif.bro + build/scripts/base/bif/cardinality-counter.bif.bro + build/scripts/base/bif/top-k.bif.bro build/scripts/base/bif/plugins/__load__.bro build/scripts/base/bif/plugins/Bro_ARP.events.bif.bro build/scripts/base/bif/plugins/Bro_AYIYA.events.bif.bro @@ -71,40 +105,16 @@ scripts/base/init-bare.bro build/scripts/base/bif/plugins/Bro_X509.events.bif.bro build/scripts/base/bif/plugins/Bro_X509.types.bif.bro build/scripts/base/bif/plugins/Bro_X509.functions.bif.bro - scripts/base/frameworks/logging/__load__.bro - scripts/base/frameworks/logging/main.bro - build/scripts/base/bif/logging.bif.bro - scripts/base/frameworks/logging/postprocessors/__load__.bro - scripts/base/frameworks/logging/postprocessors/scp.bro - scripts/base/frameworks/logging/postprocessors/sftp.bro - scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/dataseries.bro - scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/elasticsearch.bro - scripts/base/frameworks/logging/writers/none.bro - scripts/base/frameworks/input/__load__.bro - scripts/base/frameworks/input/main.bro - build/scripts/base/bif/input.bif.bro - scripts/base/frameworks/input/readers/ascii.bro - scripts/base/frameworks/input/readers/raw.bro - scripts/base/frameworks/input/readers/benchmark.bro - scripts/base/frameworks/input/readers/binary.bro - scripts/base/frameworks/input/readers/sqlite.bro - scripts/base/frameworks/analyzer/__load__.bro - scripts/base/frameworks/analyzer/main.bro - scripts/base/frameworks/packet-filter/utils.bro - build/scripts/base/bif/analyzer.bif.bro - scripts/base/frameworks/files/__load__.bro - scripts/base/frameworks/files/main.bro - build/scripts/base/bif/file_analysis.bif.bro - scripts/base/utils/site.bro - scripts/base/utils/patterns.bro - scripts/base/frameworks/files/magic/__load__.bro - build/scripts/base/bif/__load__.bro - build/scripts/base/bif/bloom-filter.bif.bro - build/scripts/base/bif/cardinality-counter.bif.bro - build/scripts/base/bif/top-k.bif.bro - build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiReader.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_BenchmarkReader.benchmark.bif.bro + build/scripts/base/bif/plugins/Bro_BinaryReader.binary.bif.bro + build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_DataSeriesWriter.dataseries.bif.bro + build/scripts/base/bif/plugins/Bro_ElasticSearchWriter.elasticsearch.bif.bro + build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro scripts/policy/misc/loaded-scripts.bro scripts/base/utils/paths.bro -#close 2014-05-15-14-10-48 +#close 2014-07-31-19-06-48 diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 03c299141c..661d58501a 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-05-15-14-12-26 +#open 2014-07-31-19-07-23 #fields name #types string scripts/base/init-bare.bro @@ -14,6 +14,40 @@ scripts/base/init-bare.bro build/scripts/base/bif/reporter.bif.bro build/scripts/base/bif/plugins/Bro_SNMP.types.bif.bro build/scripts/base/bif/event.bif.bro + scripts/base/frameworks/logging/__load__.bro + scripts/base/frameworks/logging/main.bro + build/scripts/base/bif/logging.bif.bro + scripts/base/frameworks/logging/postprocessors/__load__.bro + scripts/base/frameworks/logging/postprocessors/scp.bro + scripts/base/frameworks/logging/postprocessors/sftp.bro + scripts/base/frameworks/logging/writers/ascii.bro + scripts/base/frameworks/logging/writers/dataseries.bro + scripts/base/frameworks/logging/writers/sqlite.bro + scripts/base/frameworks/logging/writers/elasticsearch.bro + scripts/base/frameworks/logging/writers/none.bro + scripts/base/frameworks/input/__load__.bro + scripts/base/frameworks/input/main.bro + build/scripts/base/bif/input.bif.bro + scripts/base/frameworks/input/readers/ascii.bro + scripts/base/frameworks/input/readers/raw.bro + scripts/base/frameworks/input/readers/benchmark.bro + scripts/base/frameworks/input/readers/binary.bro + scripts/base/frameworks/input/readers/sqlite.bro + scripts/base/frameworks/analyzer/__load__.bro + scripts/base/frameworks/analyzer/main.bro + scripts/base/frameworks/packet-filter/utils.bro + build/scripts/base/bif/analyzer.bif.bro + scripts/base/frameworks/files/__load__.bro + scripts/base/frameworks/files/main.bro + build/scripts/base/bif/file_analysis.bif.bro + scripts/base/utils/site.bro + scripts/base/utils/patterns.bro + scripts/base/frameworks/files/magic/__load__.bro + build/scripts/base/bif/__load__.bro + build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/bloom-filter.bif.bro + build/scripts/base/bif/cardinality-counter.bif.bro + build/scripts/base/bif/top-k.bif.bro build/scripts/base/bif/plugins/__load__.bro build/scripts/base/bif/plugins/Bro_ARP.events.bif.bro build/scripts/base/bif/plugins/Bro_AYIYA.events.bif.bro @@ -71,40 +105,16 @@ scripts/base/init-bare.bro build/scripts/base/bif/plugins/Bro_X509.events.bif.bro build/scripts/base/bif/plugins/Bro_X509.types.bif.bro build/scripts/base/bif/plugins/Bro_X509.functions.bif.bro - scripts/base/frameworks/logging/__load__.bro - scripts/base/frameworks/logging/main.bro - build/scripts/base/bif/logging.bif.bro - scripts/base/frameworks/logging/postprocessors/__load__.bro - scripts/base/frameworks/logging/postprocessors/scp.bro - scripts/base/frameworks/logging/postprocessors/sftp.bro - scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/dataseries.bro - scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/elasticsearch.bro - scripts/base/frameworks/logging/writers/none.bro - scripts/base/frameworks/input/__load__.bro - scripts/base/frameworks/input/main.bro - build/scripts/base/bif/input.bif.bro - scripts/base/frameworks/input/readers/ascii.bro - scripts/base/frameworks/input/readers/raw.bro - scripts/base/frameworks/input/readers/benchmark.bro - scripts/base/frameworks/input/readers/binary.bro - scripts/base/frameworks/input/readers/sqlite.bro - scripts/base/frameworks/analyzer/__load__.bro - scripts/base/frameworks/analyzer/main.bro - scripts/base/frameworks/packet-filter/utils.bro - build/scripts/base/bif/analyzer.bif.bro - scripts/base/frameworks/files/__load__.bro - scripts/base/frameworks/files/main.bro - build/scripts/base/bif/file_analysis.bif.bro - scripts/base/utils/site.bro - scripts/base/utils/patterns.bro - scripts/base/frameworks/files/magic/__load__.bro - build/scripts/base/bif/__load__.bro - build/scripts/base/bif/bloom-filter.bif.bro - build/scripts/base/bif/cardinality-counter.bif.bro - build/scripts/base/bif/top-k.bif.bro - build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiReader.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_BenchmarkReader.benchmark.bif.bro + build/scripts/base/bif/plugins/Bro_BinaryReader.binary.bif.bro + build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_DataSeriesWriter.dataseries.bif.bro + build/scripts/base/bif/plugins/Bro_ElasticSearchWriter.elasticsearch.bif.bro + build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro scripts/base/init-default.bro scripts/base/utils/active-http.bro scripts/base/utils/exec.bro @@ -236,4 +246,4 @@ scripts/base/init-default.bro scripts/base/misc/find-checksum-offloading.bro scripts/base/misc/find-filtered-trace.bro scripts/policy/misc/loaded-scripts.bro -#close 2014-05-15-14-12-26 +#close 2014-07-31-19-07-23 diff --git a/testing/btest/Baseline/plugins.reader/out b/testing/btest/Baseline/plugins.reader/out new file mode 100644 index 0000000000..9dd3101a8d --- /dev/null +++ b/testing/btest/Baseline/plugins.reader/out @@ -0,0 +1,10 @@ +Input::EVENT_NEW +^)kHV32-J_ +Input::EVENT_NEW +(s[Q8J4Pu4 +Input::EVENT_NEW ++3iDbOB}kq +Input::EVENT_NEW +tz9dFehHz) +Input::EVENT_NEW +d&@3g)NljG diff --git a/testing/btest/Baseline/plugins.reader/output b/testing/btest/Baseline/plugins.reader/output new file mode 100644 index 0000000000..fa218d04a5 --- /dev/null +++ b/testing/btest/Baseline/plugins.reader/output @@ -0,0 +1,4 @@ +Demo::Foo - A Foo test input reader (dynamic, version 1.0) + [Writer] Foo (Input::READER_FOO) + +=== diff --git a/testing/btest/plugins/reader-plugin/.btest-ignore b/testing/btest/plugins/reader-plugin/.btest-ignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/btest/plugins/reader-plugin/CMakeLists.txt b/testing/btest/plugins/reader-plugin/CMakeLists.txt new file mode 100644 index 0000000000..2234907ad2 --- /dev/null +++ b/testing/btest/plugins/reader-plugin/CMakeLists.txt @@ -0,0 +1,17 @@ + +project(Bro-Plugin-Demo-Foo) + +cmake_minimum_required(VERSION 2.6.3) + +if ( NOT BRO_DIST ) + message(FATAL_ERROR "BRO_DIST not set") +endif () + +set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) + +include(BroPlugin) + +bro_plugin_begin(Demo Foo) +bro_plugin_cc(src/Plugin.cc) +bro_plugin_cc(src/Foo.cc) +bro_plugin_end() diff --git a/testing/btest/plugins/reader-plugin/src/Foo.cc b/testing/btest/plugins/reader-plugin/src/Foo.cc new file mode 100644 index 0000000000..9d79361068 --- /dev/null +++ b/testing/btest/plugins/reader-plugin/src/Foo.cc @@ -0,0 +1,185 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include +#include +#include + +#include "Foo.h" + +#include "threading/SerialTypes.h" +#include "threading/Manager.h" + +using namespace input::reader; +using threading::Value; +using threading::Field; + +Foo::Foo(ReaderFrontend *frontend) : ReaderBackend(frontend) + { + ascii = new threading::formatter::Ascii(this, threading::formatter::Ascii::SeparatorInfo()); + } + +Foo::~Foo() + { + DoClose(); + delete ascii; + } + +void Foo::DoClose() + { + } + +bool Foo::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fields) + { + DoUpdate(); + return true; + } + +string Foo::RandomString(const int len) + { + string s(len, ' '); + + static const char values[] = + "0123456789!@#$%^&*()-_=+{}[]\\|" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + + for (int i = 0; i < len; ++i) + s[i] = values[random() / (RAND_MAX / sizeof(values))]; + + return s; + } + +// read the entire file and send appropriate thingies back to InputMgr +bool Foo::DoUpdate() + { + int linestosend = 5; + for ( int i = 0; i < linestosend; i++ ) + { + Value** field = new Value*[NumFields()]; + for (int j = 0; j < NumFields(); j++ ) + field[j] = EntryToVal(Fields()[j]->type, Fields()[j]->subtype); + + SendEntry(field); + } + + EndCurrentSend(); + + return true; +} + +threading::Value* Foo::EntryToVal(TypeTag type, TypeTag subtype) + { + Value* val = new Value(type, true); + + // basically construct something random from the fields that we want. + + switch ( type ) { + case TYPE_ENUM: + assert(false); // no enums, please. + + case TYPE_STRING: + { + string rnd = RandomString(10); + val->val.string_val.data = copy_string(rnd.c_str()); + val->val.string_val.length = rnd.size(); + break; + } + + case TYPE_BOOL: + val->val.int_val = 1; // we never lie. + break; + + case TYPE_INT: + val->val.int_val = random(); + break; + + case TYPE_TIME: + val->val.double_val = 0; + break; + + case TYPE_DOUBLE: + case TYPE_INTERVAL: + val->val.double_val = random(); + break; + + case TYPE_COUNT: + case TYPE_COUNTER: + val->val.uint_val = random(); + break; + + case TYPE_PORT: + val->val.port_val.port = random() / (RAND_MAX / 60000); + val->val.port_val.proto = TRANSPORT_UNKNOWN; + break; + + case TYPE_SUBNET: + { + val->val.subnet_val.prefix = ascii->ParseAddr("192.168.17.1"); + val->val.subnet_val.length = 16; + } + break; + + case TYPE_ADDR: + val->val.addr_val = ascii->ParseAddr("192.168.17.1"); + break; + + case TYPE_TABLE: + case TYPE_VECTOR: + // First - common initialization + // Then - initialization for table. + // Then - initialization for vector. + // Then - common stuff + { + // how many entries do we have... + unsigned int length = random() / (RAND_MAX / 15); + + Value** lvals = new Value* [length]; + + if ( type == TYPE_TABLE ) + { + val->val.set_val.vals = lvals; + val->val.set_val.size = length; + } + else if ( type == TYPE_VECTOR ) + { + val->val.vector_val.vals = lvals; + val->val.vector_val.size = length; + } + else + assert(false); + + if ( length == 0 ) + break; //empty + + for ( unsigned int pos = 0; pos < length; pos++ ) + { + Value* newval = EntryToVal(subtype, TYPE_ENUM); + if ( newval == 0 ) + { + Error("Error while reading set"); + delete val; + return 0; + } + lvals[pos] = newval; + } + + break; + } + + + default: + Error(Fmt("unsupported field format %d", type)); + delete val; + return 0; + } + + return val; + + } + + +bool Foo::DoHeartbeat(double network_time, double current_time) +{ + return true; +} diff --git a/testing/btest/plugins/reader-plugin/src/Foo.h b/testing/btest/plugins/reader-plugin/src/Foo.h new file mode 100644 index 0000000000..490b209191 --- /dev/null +++ b/testing/btest/plugins/reader-plugin/src/Foo.h @@ -0,0 +1,34 @@ + +#ifndef BRO_PLUGIN_DEMO_FOO_H +#define BRO_PLUGIN_DEMO_FOO_H + +#include "input/ReaderBackend.h" +#include "threading/formatters/Ascii.h" + +namespace input { namespace reader { + +/** + * A Foo reader to measure performance of the input framework. + */ +class Foo : public ReaderBackend { +public: + Foo(ReaderFrontend* frontend); + ~Foo(); + + static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Foo(frontend); } + +protected: + virtual bool DoInit(const ReaderInfo& info, int arg_num_fields, const threading::Field* const* fields); + virtual void DoClose(); + virtual bool DoUpdate(); + virtual bool DoHeartbeat(double network_time, double current_time); + +private: + string RandomString(const int len); + threading::Value* EntryToVal(TypeTag Type, TypeTag subtype); + threading::formatter::Ascii* ascii; +}; + +} } + +#endif diff --git a/testing/btest/plugins/reader-plugin/src/Plugin.cc b/testing/btest/plugins/reader-plugin/src/Plugin.cc new file mode 100644 index 0000000000..acc715511a --- /dev/null +++ b/testing/btest/plugins/reader-plugin/src/Plugin.cc @@ -0,0 +1,19 @@ +#include "Plugin.h" + +#include "Foo.h" + +namespace plugin { namespace Demo_Foo { Plugin plugin; } } + +using namespace plugin::Demo_Foo; + +plugin::Configuration Plugin::Configure() + { + AddComponent(new ::input::Component("Foo", ::input::reader::Foo::Instantiate)); + + plugin::Configuration config; + config.name = "Demo::Foo"; + config.description = "A Foo test input reader"; + config.version.major = 1; + config.version.minor = 0; + return config; + } diff --git a/testing/btest/plugins/reader-plugin/src/Plugin.h b/testing/btest/plugins/reader-plugin/src/Plugin.h new file mode 100644 index 0000000000..c65eac01a1 --- /dev/null +++ b/testing/btest/plugins/reader-plugin/src/Plugin.h @@ -0,0 +1,22 @@ + +#ifndef BRO_PLUGIN_DEMO_FOO +#define BRO_PLUGIN_DEMO_FOO + +#include + +namespace plugin { +namespace Demo_Foo { + +class Plugin : public ::plugin::Plugin +{ +protected: + // Overridden from plugin::Plugin. + virtual plugin::Configuration Configure(); +}; + +extern Plugin plugin; + +} +} + +#endif diff --git a/testing/btest/plugins/reader.bro b/testing/btest/plugins/reader.bro new file mode 100644 index 0000000000..cecb5306da --- /dev/null +++ b/testing/btest/plugins/reader.bro @@ -0,0 +1,40 @@ +# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo +# @TEST-EXEC: cp -r %DIR/reader-plugin/* . +# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` btest-bg-run bro bro %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff out + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, tpe; + print outfile, s; + try = try + 1; + if ( try == 5 ) + { + Input::remove("input"); + close(outfile); + terminate(); + } + } + +event bro_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $reader=Input::READER_FOO, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + } From ffd3d9d185f1b92bf792178ac12a866414bce9fc Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 31 Jul 2014 15:01:50 -0700 Subject: [PATCH 010/106] More polishing. --- scripts/base/frameworks/logging/main.bro | 8 ++++---- scripts/base/frameworks/logging/writers/dataseries.bro | 2 +- scripts/base/frameworks/logging/writers/none.bro | 2 +- scripts/base/init-bare.bro | 1 - src/Type.cc | 6 +++--- src/logging/Component.cc | 3 --- src/plugin/ComponentManager.h | 3 --- 7 files changed, 9 insertions(+), 16 deletions(-) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index d2013ba8dc..bf1affcb01 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -6,9 +6,9 @@ module Log; export { - ## Type that defines a ID unique for each log stream. Scripts creating new log + ## Type that defines an ID unique to each log stream. Scripts creating new log ## streams need to redef this enum to add their own specific log ID. The log ID - ## implicitly determines the default name of the generated log file. + ## implicitly determines the default name of the generated log file. type Log::ID: enum { ## Dummy place-holder. UNKNOWN @@ -33,13 +33,13 @@ export { const set_separator = "," &redef; ## String to use for empty fields. This should be different from - ## *unset_field* to make the output unambiguous. + ## *unset_field* to make the output unambiguous. ## Can be overwritten by individual writers. const empty_field = "(empty)" &redef; ## String to use for an unset &optional field. ## Can be overwritten by individual writers. - const unset_field = "-" &redef; + const unset_field = "-" &redef; ## Type defining the content of a logging stream. type Stream: record { diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro index 6fd65debdb..b24601d6b9 100644 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -57,4 +57,4 @@ function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool return Log::run_rotation_postprocessor_cmd(info, dst); } -# redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; +redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/logging/writers/none.bro b/scripts/base/frameworks/logging/writers/none.bro index 3a2e3c0e81..5763b796a9 100644 --- a/scripts/base/frameworks/logging/writers/none.bro +++ b/scripts/base/frameworks/logging/writers/none.bro @@ -13,5 +13,5 @@ function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool return T; } -# redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func }; +redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func }; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 9b82f2a2d2..1199bdd7bc 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -3364,7 +3364,6 @@ const global_hash_seed: string = "" &redef; ## The maximum is currently 128 bits. const bits_per_uid: count = 96 &redef; - # Load these frameworks here because they use fairly deep integration with # BiFs and script-land defined types. @load base/frameworks/logging diff --git a/src/Type.cc b/src/Type.cc index 17a6efa203..f941041414 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -1476,9 +1476,9 @@ void EnumType::CheckAndAddName(const string& module_name, const char* name, } else { - // We allow double-definitions if matching exactly. This is - // so that we can define an enum both in a *.bif and *.bro to - // avoid cyclic dependencies. + // We allow double-definitions if matching exactly. This is so that + // we can define an enum both in a *.bif and *.bro for avoiding + // cyclic dependencies. if ( id->Name() != make_full_var_name(module_name.c_str(), name) || (id->HasVal() && val != id->ID_Val()->AsEnum()) ) { diff --git a/src/logging/Component.cc b/src/logging/Component.cc index 90bc9be819..3af29fd96f 100644 --- a/src/logging/Component.cc +++ b/src/logging/Component.cc @@ -2,7 +2,6 @@ #include "Component.h" #include "Manager.h" - #include "../Desc.h" #include "../util.h" @@ -25,5 +24,3 @@ void Component::DoDescribe(ODesc* d) const d->Add("Log::WRITER_"); d->Add(CanonicalName()); } - - diff --git a/src/plugin/ComponentManager.h b/src/plugin/ComponentManager.h index 9f15bcf59d..7337cf069a 100644 --- a/src/plugin/ComponentManager.h +++ b/src/plugin/ComponentManager.h @@ -135,8 +135,6 @@ ComponentManager::ComponentManager(const string& arg_module, const string& ::ID* id = install_ID(local_id.c_str(), module.c_str(), true, true); add_type(id, tag_enum_type, 0); broxygen_mgr->Identifier(id); - - // fprintf(stderr, "Enum: %s\n", id->Name()); } template @@ -246,7 +244,6 @@ void ComponentManager::RegisterComponent(C* component, string id = fmt("%s%s", prefix.c_str(), cname.c_str()); tag_enum_type->AddName(module, id.c_str(), component->Tag().AsEnumVal()->InternalInt(), true); - // fprintf(stderr, "Enum item: %s/%s\n", module.c_str(), id.c_str()); } } // namespace plugin From 8031da4ee724df8cd013571f2b1cbe062350dfc6 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 31 Jul 2014 15:01:50 -0700 Subject: [PATCH 011/106] More polishing of some of the branche's changes. --- scripts/base/frameworks/logging/main.bro | 8 ++++---- scripts/base/frameworks/logging/writers/dataseries.bro | 2 +- scripts/base/frameworks/logging/writers/none.bro | 2 +- scripts/base/init-bare.bro | 1 - src/Type.cc | 6 +++--- src/logging/Component.cc | 3 --- src/plugin/ComponentManager.h | 3 --- 7 files changed, 9 insertions(+), 16 deletions(-) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index d2013ba8dc..bf1affcb01 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -6,9 +6,9 @@ module Log; export { - ## Type that defines a ID unique for each log stream. Scripts creating new log + ## Type that defines an ID unique to each log stream. Scripts creating new log ## streams need to redef this enum to add their own specific log ID. The log ID - ## implicitly determines the default name of the generated log file. + ## implicitly determines the default name of the generated log file. type Log::ID: enum { ## Dummy place-holder. UNKNOWN @@ -33,13 +33,13 @@ export { const set_separator = "," &redef; ## String to use for empty fields. This should be different from - ## *unset_field* to make the output unambiguous. + ## *unset_field* to make the output unambiguous. ## Can be overwritten by individual writers. const empty_field = "(empty)" &redef; ## String to use for an unset &optional field. ## Can be overwritten by individual writers. - const unset_field = "-" &redef; + const unset_field = "-" &redef; ## Type defining the content of a logging stream. type Stream: record { diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro index 6fd65debdb..b24601d6b9 100644 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -57,4 +57,4 @@ function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool return Log::run_rotation_postprocessor_cmd(info, dst); } -# redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; +redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/logging/writers/none.bro b/scripts/base/frameworks/logging/writers/none.bro index 3a2e3c0e81..5763b796a9 100644 --- a/scripts/base/frameworks/logging/writers/none.bro +++ b/scripts/base/frameworks/logging/writers/none.bro @@ -13,5 +13,5 @@ function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool return T; } -# redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func }; +redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func }; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 9b82f2a2d2..1199bdd7bc 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -3364,7 +3364,6 @@ const global_hash_seed: string = "" &redef; ## The maximum is currently 128 bits. const bits_per_uid: count = 96 &redef; - # Load these frameworks here because they use fairly deep integration with # BiFs and script-land defined types. @load base/frameworks/logging diff --git a/src/Type.cc b/src/Type.cc index 17a6efa203..f941041414 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -1476,9 +1476,9 @@ void EnumType::CheckAndAddName(const string& module_name, const char* name, } else { - // We allow double-definitions if matching exactly. This is - // so that we can define an enum both in a *.bif and *.bro to - // avoid cyclic dependencies. + // We allow double-definitions if matching exactly. This is so that + // we can define an enum both in a *.bif and *.bro for avoiding + // cyclic dependencies. if ( id->Name() != make_full_var_name(module_name.c_str(), name) || (id->HasVal() && val != id->ID_Val()->AsEnum()) ) { diff --git a/src/logging/Component.cc b/src/logging/Component.cc index 90bc9be819..3af29fd96f 100644 --- a/src/logging/Component.cc +++ b/src/logging/Component.cc @@ -2,7 +2,6 @@ #include "Component.h" #include "Manager.h" - #include "../Desc.h" #include "../util.h" @@ -25,5 +24,3 @@ void Component::DoDescribe(ODesc* d) const d->Add("Log::WRITER_"); d->Add(CanonicalName()); } - - diff --git a/src/plugin/ComponentManager.h b/src/plugin/ComponentManager.h index 9f15bcf59d..7337cf069a 100644 --- a/src/plugin/ComponentManager.h +++ b/src/plugin/ComponentManager.h @@ -135,8 +135,6 @@ ComponentManager::ComponentManager(const string& arg_module, const string& ::ID* id = install_ID(local_id.c_str(), module.c_str(), true, true); add_type(id, tag_enum_type, 0); broxygen_mgr->Identifier(id); - - // fprintf(stderr, "Enum: %s\n", id->Name()); } template @@ -246,7 +244,6 @@ void ComponentManager::RegisterComponent(C* component, string id = fmt("%s%s", prefix.c_str(), cname.c_str()); tag_enum_type->AddName(module, id.c_str(), component->Tag().AsEnumVal()->InternalInt(), true); - // fprintf(stderr, "Enum item: %s/%s\n", module.c_str(), id.c_str()); } } // namespace plugin From 8737eae906fd3c8c59d0bdeec08f21e1342fd4da Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 6 Aug 2014 18:43:11 -0700 Subject: [PATCH 012/106] Move DataSeries and ElasticSearch into plugins. --- .gitmodules | 3 + aux/bro-aux | 2 +- aux/plugins | 1 + cmake | 2 +- doc/frameworks/logging-dataseries.rst | 186 ------- doc/frameworks/logging-elasticsearch.rst | 89 ---- doc/frameworks/logging.rst | 6 +- scripts/base/frameworks/logging/__load__.bro | 2 - .../frameworks/logging/writers/dataseries.bro | 60 --- .../logging/writers/elasticsearch.bro | 48 -- .../policy/tuning/logs-to-elasticsearch.bro | 36 -- src/logging/writers/CMakeLists.txt | 2 - src/logging/writers/dataseries/CMakeLists.txt | 26 - src/logging/writers/dataseries/DataSeries.cc | 459 ------------------ src/logging/writers/dataseries/DataSeries.h | 128 ----- src/logging/writers/dataseries/Plugin.cc | 25 - src/logging/writers/dataseries/dataseries.bif | 10 - .../writers/elasticsearch/CMakeLists.txt | 15 - .../writers/elasticsearch/ElasticSearch.cc | 290 ----------- .../writers/elasticsearch/ElasticSearch.h | 86 ---- src/logging/writers/elasticsearch/Plugin.cc | 37 -- .../writers/elasticsearch/elasticsearch.bif | 14 - .../ssh.ds.xml | 16 - .../out | 290 ----------- .../ssh.ds.txt | 34 -- .../conn.ds.txt | 89 ---- .../conn.ds.txt | 89 ---- .../http.ds.txt | 83 ---- .../btest/core/leaks/dataseries-rotate.bro | 36 -- testing/btest/core/leaks/dataseries.bro | 11 - .../frameworks/logging/dataseries/options.bro | 44 -- .../frameworks/logging/dataseries/rotate.bro | 34 -- .../logging/dataseries/test-logging.bro | 35 -- .../logging/dataseries/time-as-int.bro | 9 - .../logging/dataseries/wikipedia.bro | 9 - 35 files changed, 9 insertions(+), 2297 deletions(-) create mode 160000 aux/plugins delete mode 100644 doc/frameworks/logging-dataseries.rst delete mode 100644 doc/frameworks/logging-elasticsearch.rst delete mode 100644 scripts/base/frameworks/logging/writers/dataseries.bro delete mode 100644 scripts/base/frameworks/logging/writers/elasticsearch.bro delete mode 100644 scripts/policy/tuning/logs-to-elasticsearch.bro delete mode 100644 src/logging/writers/dataseries/CMakeLists.txt delete mode 100644 src/logging/writers/dataseries/DataSeries.cc delete mode 100644 src/logging/writers/dataseries/DataSeries.h delete mode 100644 src/logging/writers/dataseries/Plugin.cc delete mode 100644 src/logging/writers/dataseries/dataseries.bif delete mode 100644 src/logging/writers/elasticsearch/CMakeLists.txt delete mode 100644 src/logging/writers/elasticsearch/ElasticSearch.cc delete mode 100644 src/logging/writers/elasticsearch/ElasticSearch.h delete mode 100644 src/logging/writers/elasticsearch/Plugin.cc delete mode 100644 src/logging/writers/elasticsearch/elasticsearch.bif delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt delete mode 100644 testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt delete mode 100644 testing/btest/core/leaks/dataseries-rotate.bro delete mode 100644 testing/btest/core/leaks/dataseries.bro delete mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/options.bro delete mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro delete mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro delete mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro delete mode 100644 testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro diff --git a/.gitmodules b/.gitmodules index 4998cc6b80..24375ce23d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -19,3 +19,6 @@ [submodule "src/3rdparty"] path = src/3rdparty url = git://git.bro.org/bro-3rdparty +[submodule "aux/plugins"] + path = aux/plugins + url = git://git.bro.org/bro-plugins diff --git a/aux/bro-aux b/aux/bro-aux index 8bd5f849c8..9ffdd276f9 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 8bd5f849c8f0e1c1e8397c3ad678eb8d3532fb24 +Subproject commit 9ffdd276f9c60db6fecff36751a15cdaec75ca4f diff --git a/aux/plugins b/aux/plugins new file mode 160000 index 0000000000..43b7ac7b4a --- /dev/null +++ b/aux/plugins @@ -0,0 +1 @@ +Subproject commit 43b7ac7b4aa192b8e2595c55192222cef057e65a diff --git a/cmake b/cmake index f2e8ba6b90..99486bfe54 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit f2e8ba6b90b3a2da9f1f77c55d0e718c25376bbb +Subproject commit 99486bfe5430d04169297b4e4debd5078f0a435f diff --git a/doc/frameworks/logging-dataseries.rst b/doc/frameworks/logging-dataseries.rst deleted file mode 100644 index cc479eae76..0000000000 --- a/doc/frameworks/logging-dataseries.rst +++ /dev/null @@ -1,186 +0,0 @@ - -============================= -Binary Output with DataSeries -============================= - -.. rst-class:: opening - - Bro's default ASCII log format is not exactly the most efficient - way for storing and searching large volumes of data. An an - alternative, Bro comes with experimental support for `DataSeries - `_ - output, an efficient binary format for recording structured bulk - data. DataSeries is developed and maintained at HP Labs. - -.. contents:: - -Installing DataSeries ---------------------- - -To use DataSeries, its libraries must be available at compile-time, -along with the supporting *Lintel* package. Generally, both are -distributed on `HP Labs' web site -`_. Currently, however, you need -to use recent development versions for both packages, which you can -download from github like this:: - - git clone http://github.com/dataseries/Lintel - git clone http://github.com/dataseries/DataSeries - -To build and install the two into ````, do:: - - ( cd Lintel && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) - ( cd DataSeries && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) - -Please refer to the packages' documentation for more information about -the installation process. In particular, there's more information on -required and optional `dependencies for Lintel -`_ -and `dependencies for DataSeries -`_. -For users on RedHat-style systems, you'll need the following:: - - yum install libxml2-devel boost-devel - -Compiling Bro with DataSeries Support -------------------------------------- - -Once you have installed DataSeries, Bro's ``configure`` should pick it -up automatically as long as it finds it in a standard system location. -Alternatively, you can specify the DataSeries installation prefix -manually with ``--with-dataseries=``. Keep an eye on -``configure``'s summary output, if it looks like the following, Bro -found DataSeries and will compile in the support:: - - # ./configure --with-dataseries=/usr/local - [...] - ====================| Bro Build Summary |===================== - [...] - DataSeries: true - [...] - ================================================================ - -Activating DataSeries ---------------------- - -The direct way to use DataSeries is to switch *all* log files over to -the binary format. To do that, just add ``redef -Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro``. -For testing, you can also just pass that on the command line:: - - bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES - -With that, Bro will now write all its output into DataSeries files -``*.ds``. You can inspect these using DataSeries's set of command line -tools, which its installation process installs into ``/bin``. -For example, to convert a file back into an ASCII representation:: - - $ ds2txt conn.log - [... We skip a bunch of metadata here ...] - ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes - 1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 - 1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 - 1300475167.099816 pXPi1kPMgxb 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 - 1300475168.853899 R7sOc16woCj 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117 - 1300475168.854378 Z6dfHVmt0X7 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127 - 1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 - [...] - -(``--skip-all`` suppresses the metadata.) - -Note that the ASCII conversion is *not* equivalent to Bro's default -output format. - -You can also switch only individual files over to DataSeries by adding -code like this to your ``local.bro``: - -.. code:: bro - - event bro_init() - { - local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log. - f$writer = Log::WRITER_DATASERIES; # Change writer type. - Log::add_filter(Conn::LOG, f); # Replace filter with adapted version. - } - -Bro's DataSeries writer comes with a few tuning options, see -:doc:`/scripts/base/frameworks/logging/writers/dataseries.bro`. - -Working with DataSeries -======================= - -Here are a few examples of using DataSeries command line tools to work -with the output files. - -* Printing CSV:: - - $ ds2txt --csv conn.log - ts,uid,id.orig_h,id.orig_p,id.resp_h,id.resp_p,proto,service,duration,orig_bytes,resp_bytes,conn_state,local_orig,missed_bytes,history,orig_pkts,orig_ip_bytes,resp_pkts,resp_ip_bytes - 1258790493.773208,ZTtgbHvf4s3,192.168.1.104,137,192.168.1.255,137,udp,dns,3.748891,350,0,S0,F,0,D,7,546,0,0 - 1258790451.402091,pOY6Rw7lhUd,192.168.1.106,138,192.168.1.255,138,udp,,0.000000,0,0,S0,F,0,D,1,229,0,0 - 1258790493.787448,pn5IiEslca9,192.168.1.104,138,192.168.1.255,138,udp,,2.243339,348,0,S0,F,0,D,2,404,0,0 - 1258790615.268111,D9slyIu3hFj,192.168.1.106,137,192.168.1.255,137,udp,dns,3.764626,350,0,S0,F,0,D,7,546,0,0 - [...] - - Add ``--separator=X`` to set a different separator. - -* Extracting a subset of columns:: - - $ ds2txt --select '*' ts,id.resp_h,id.resp_p --skip-all conn.log - 1258790493.773208 192.168.1.255 137 - 1258790451.402091 192.168.1.255 138 - 1258790493.787448 192.168.1.255 138 - 1258790615.268111 192.168.1.255 137 - 1258790615.289842 192.168.1.255 138 - [...] - -* Filtering rows:: - - $ ds2txt --where '*' 'duration > 5 && id.resp_p > 1024' --skip-all conn.ds - 1258790631.532888 V8mV5WLITu5 192.168.1.105 55890 239.255.255.250 1900 udp 15.004568 798 0 S0 F 0 D 6 966 0 0 - 1258792413.439596 tMcWVWQptvd 192.168.1.105 55890 239.255.255.250 1900 udp 15.004581 798 0 S0 F 0 D 6 966 0 0 - 1258794195.346127 cQwQMRdBrKa 192.168.1.105 55890 239.255.255.250 1900 udp 15.005071 798 0 S0 F 0 D 6 966 0 0 - 1258795977.253200 i8TEjhWd2W8 192.168.1.105 55890 239.255.255.250 1900 udp 15.004824 798 0 S0 F 0 D 6 966 0 0 - 1258797759.160217 MsLsBA8Ia49 192.168.1.105 55890 239.255.255.250 1900 udp 15.005078 798 0 S0 F 0 D 6 966 0 0 - 1258799541.068452 TsOxRWJRGwf 192.168.1.105 55890 239.255.255.250 1900 udp 15.004082 798 0 S0 F 0 D 6 966 0 0 - [...] - -* Calculate some statistics: - - Mean/stddev/min/max over a column:: - - $ dsstatgroupby '*' basic duration from conn.ds - # Begin DSStatGroupByModule - # processed 2159 rows, where clause eliminated 0 rows - # count(*), mean(duration), stddev, min, max - 2159, 42.7938, 1858.34, 0, 86370 - [...] - - Quantiles of total connection volume:: - - $ dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds - [...] - 2159 data points, mean 24616 +- 343295 [0,1.26615e+07] - quantiles about every 216 data points: - 10%: 0, 124, 317, 348, 350, 350, 601, 798, 1469 - tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262 - [...] - -The ``man`` pages for these tools show further options, and their -``-h`` option gives some more information (either can be a bit cryptic -unfortunately though). - -Deficiencies ------------- - -Due to limitations of the DataSeries format, one cannot inspect its -files before they have been fully written. In other words, when using -DataSeries, it's currently not possible to inspect the live log -files inside the spool directory before they are rotated to their -final location. It seems that this could be fixed with some effort, -and we will work with DataSeries development team on that if the -format gains traction among Bro users. - -Likewise, we're considering writing custom command line tools for -interacting with DataSeries files, making that a bit more convenient -than what the standard utilities provide. diff --git a/doc/frameworks/logging-elasticsearch.rst b/doc/frameworks/logging-elasticsearch.rst deleted file mode 100644 index 3f1eac859d..0000000000 --- a/doc/frameworks/logging-elasticsearch.rst +++ /dev/null @@ -1,89 +0,0 @@ - -========================================= -Indexed Logging Output with ElasticSearch -========================================= - -.. rst-class:: opening - - Bro's default ASCII log format is not exactly the most efficient - way for searching large volumes of data. ElasticSearch - is a new data storage technology for dealing with tons of data. - It's also a search engine built on top of Apache's Lucene - project. It scales very well, both for distributed indexing and - distributed searching. - -.. contents:: - -Warning -------- - -This writer plugin is still in testing and is not yet recommended for -production use! The approach to how logs are handled in the plugin is "fire -and forget" at this time, there is no error handling if the server fails to -respond successfully to the insertion request. - -Installing ElasticSearch ------------------------- - -Download the latest version from: http://www.elasticsearch.org/download/. -Once extracted, start ElasticSearch with:: - -# ./bin/elasticsearch - -For more detailed information, refer to the ElasticSearch installation -documentation: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html - -Compiling Bro with ElasticSearch Support ----------------------------------------- - -First, ensure that you have libcurl installed then run configure:: - - # ./configure - [...] - ====================| Bro Build Summary |===================== - [...] - cURL: true - [...] - ElasticSearch: true - [...] - ================================================================ - -Activating ElasticSearch ------------------------- - -The easiest way to enable ElasticSearch output is to load the -tuning/logs-to-elasticsearch.bro script. If you are using BroControl, -the following line in local.bro will enable it: - -.. console:: - - @load tuning/logs-to-elasticsearch - -With that, Bro will now write most of its logs into ElasticSearch in addition -to maintaining the Ascii logs like it would do by default. That script has -some tunable options for choosing which logs to send to ElasticSearch, refer -to the autogenerated script documentation for those options. - -There is an interface being written specifically to integrate with the data -that Bro outputs into ElasticSearch named Brownian. It can be found here:: - - https://github.com/grigorescu/Brownian - -Tuning ------- - -A common problem encountered with ElasticSearch is too many files being held -open. The ElasticSearch website has some suggestions on how to increase the -open file limit. - - - http://www.elasticsearch.org/tutorials/too-many-open-files/ - -TODO ----- - -Lots. - -- Perform multicast discovery for server. -- Better error detection. -- Better defaults (don't index loaded-plugins, for instance). -- diff --git a/doc/frameworks/logging.rst b/doc/frameworks/logging.rst index 47d3338e8a..e5990fea72 100644 --- a/doc/frameworks/logging.rst +++ b/doc/frameworks/logging.rst @@ -380,11 +380,11 @@ uncommon to need to delete that data before the end of the connection. Other Writers ------------- -Bro supports the following output formats other than ASCII: +Bro supports the following built-in output formats other than ASCII: .. toctree:: :maxdepth: 1 - logging-dataseries - logging-elasticsearch logging-input-sqlite + +Further formats are available as external plugins. diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.bro index 44293de5cb..74c7362846 100644 --- a/scripts/base/frameworks/logging/__load__.bro +++ b/scripts/base/frameworks/logging/__load__.bro @@ -1,7 +1,5 @@ @load ./main @load ./postprocessors @load ./writers/ascii -@load ./writers/dataseries @load ./writers/sqlite -@load ./writers/elasticsearch @load ./writers/none diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro deleted file mode 100644 index b24601d6b9..0000000000 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ /dev/null @@ -1,60 +0,0 @@ -##! Interface for the DataSeries log writer. - -module LogDataSeries; - -export { - ## Compression to use with the DS output file. Options are: - ## - ## 'none' -- No compression. - ## 'lzf' -- LZF compression (very quick, but leads to larger output files). - ## 'lzo' -- LZO compression (very fast decompression times). - ## 'zlib' -- GZIP compression (slower than LZF, but also produces smaller output). - ## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output). - const compression = "zlib" &redef; - - ## The extent buffer size. - ## Larger values here lead to better compression and more efficient writes, - ## but also increase the lag between the time events are received and - ## the time they are actually written to disk. - const extent_size = 65536 &redef; - - ## Should we dump the XML schema we use for this DS file to disk? - ## If yes, the XML schema shares the name of the logfile, but has - ## an XML ending. - const dump_schema = F &redef; - - ## How many threads should DataSeries spawn to perform compression? - ## Note that this dictates the number of threads per log stream. If - ## you're using a lot of streams, you may want to keep this number - ## relatively small. - ## - ## Default value is 1, which will spawn one thread / stream. - ## - ## Maximum is 128, minimum is 1. - const num_threads = 1 &redef; - - ## Should time be stored as an integer or a double? - ## Storing time as a double leads to possible precision issues and - ## can (significantly) increase the size of the resulting DS log. - ## That said, timestamps stored in double form are consistent - ## with the rest of Bro, including the standard ASCII log. Hence, we - ## use them by default. - const use_integer_for_time = F &redef; -} - -# Default function to postprocess a rotated DataSeries log file. It moves the -# rotated file to a new name that includes a timestamp with the opening time, -# and then runs the writer's default postprocessor command on it. -function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool - { - # Move file to name including both opening and closing time. - local dst = fmt("%s.%s.ds", info$path, - strftime(Log::default_rotation_date_format, info$open)); - - system(fmt("/bin/mv %s %s", info$fname, dst)); - - # Run default postprocessor. - return Log::run_rotation_postprocessor_cmd(info, dst); - } - -redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro deleted file mode 100644 index 6292876bd0..0000000000 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ /dev/null @@ -1,48 +0,0 @@ -##! Log writer for sending logs to an ElasticSearch server. -##! -##! Note: This module is in testing and is not yet considered stable! -##! -##! There is one known memory issue. If your elasticsearch server is -##! running slowly and taking too long to return from bulk insert -##! requests, the message queue to the writer thread will continue -##! growing larger and larger giving the appearance of a memory leak. - -module LogElasticSearch; - -export { - ## Name of the ES cluster. - const cluster_name = "elasticsearch" &redef; - - ## ES server. - const server_host = "127.0.0.1" &redef; - - ## ES port. - const server_port = 9200 &redef; - - ## Name of the ES index. - const index_prefix = "bro" &redef; - - ## The ES type prefix comes before the name of the related log. - ## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc. - const type_prefix = "" &redef; - - ## The time before an ElasticSearch transfer will timeout. Note that - ## the fractional part of the timeout will be ignored. In particular, - ## time specifications less than a second result in a timeout value of - ## 0, which means "no timeout." - const transfer_timeout = 2secs; - - ## The batch size is the number of messages that will be queued up before - ## they are sent to be bulk indexed. - const max_batch_size = 1000 &redef; - - ## The maximum amount of wall-clock time that is allowed to pass without - ## finishing a bulk log send. This represents the maximum delay you - ## would like to have with your logs before they are sent to ElasticSearch. - const max_batch_interval = 1min &redef; - - ## The maximum byte size for a buffered JSON string to send to the bulk - ## insert API. - const max_byte_size = 1024 * 1024 &redef; -} - diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro deleted file mode 100644 index b770b8f84b..0000000000 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ /dev/null @@ -1,36 +0,0 @@ -##! Load this script to enable global log output to an ElasticSearch database. - -module LogElasticSearch; - -export { - ## An elasticsearch specific rotation interval. - const rotation_interval = 3hr &redef; - - ## Optionally ignore any :bro:type:`Log::ID` from being sent to - ## ElasticSearch with this script. - const excluded_log_ids: set[Log::ID] &redef; - - ## If you want to explicitly only send certain :bro:type:`Log::ID` - ## streams, add them to this set. If the set remains empty, all will - ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option - ## will remain in effect as well. - const send_logs: set[Log::ID] &redef; -} - -event bro_init() &priority=-5 - { - if ( server_host == "" ) - return; - - for ( stream_id in Log::active_streams ) - { - if ( stream_id in excluded_log_ids || - (|send_logs| > 0 && stream_id !in send_logs) ) - next; - - local filter: Log::Filter = [$name = "default-es", - $writer = Log::WRITER_ELASTICSEARCH, - $interv = LogElasticSearch::rotation_interval]; - Log::add_filter(stream_id, filter); - } - } diff --git a/src/logging/writers/CMakeLists.txt b/src/logging/writers/CMakeLists.txt index 9718a412d0..867ad58c47 100644 --- a/src/logging/writers/CMakeLists.txt +++ b/src/logging/writers/CMakeLists.txt @@ -1,6 +1,4 @@ add_subdirectory(ascii) -add_subdirectory(dataseries) -add_subdirectory(elasticsearch) add_subdirectory(none) add_subdirectory(sqlite) diff --git a/src/logging/writers/dataseries/CMakeLists.txt b/src/logging/writers/dataseries/CMakeLists.txt deleted file mode 100644 index f7f54e4c0f..0000000000 --- a/src/logging/writers/dataseries/CMakeLists.txt +++ /dev/null @@ -1,26 +0,0 @@ - -include(BroPlugin) - -find_package(Lintel) -find_package(DataSeries) -find_package(LibXML2) - -if (NOT DISABLE_DATASERIES AND - LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) - - include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) - - include_directories(BEFORE ${Lintel_INCLUDE_DIR}) - include_directories(BEFORE ${DataSeries_INCLUDE_DIR}) - include_directories(BEFORE ${LibXML2_INCLUDE_DIR}) - - bro_plugin_begin(Bro DataSeriesWriter) - bro_plugin_cc(DataSeries.cc Plugin.cc) - bro_plugin_bif(dataseries.bif) - bro_plugin_link_library(${Lintel_LIBRARIES}) - bro_plugin_link_library(${DataSeries_LIBRARIES}) - bro_plugin_link_library(${LibXML2_LIBRARIES}) - bro_plugin_end() - -endif() - diff --git a/src/logging/writers/dataseries/DataSeries.cc b/src/logging/writers/dataseries/DataSeries.cc deleted file mode 100644 index 5f039ac5d5..0000000000 --- a/src/logging/writers/dataseries/DataSeries.cc +++ /dev/null @@ -1,459 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include "config.h" - -#include -#include -#include - -#include - -#include "NetVar.h" -#include "threading/SerialTypes.h" - -#include "DataSeries.h" -#include "dataseries.bif.h" - -using namespace logging; -using namespace writer; - -std::string DataSeries::LogValueToString(threading::Value *val) - { - // In some cases, no value is attached. If this is the case, return - // an empty string. - if( ! val->present ) - return ""; - - switch(val->type) { - case TYPE_BOOL: - return (val->val.int_val ? "true" : "false"); - - case TYPE_INT: - { - std::ostringstream ostr; - ostr << val->val.int_val; - return ostr.str(); - } - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - { - std::ostringstream ostr; - ostr << val->val.uint_val; - return ostr.str(); - } - - case TYPE_SUBNET: - return ascii->Render(val->val.subnet_val); - - case TYPE_ADDR: - return ascii->Render(val->val.addr_val); - - // Note: These two cases are relatively special. We need to convert - // these values into their integer equivalents to maximize precision. - // At the moment, there won't be a noticeable effect (Bro uses the - // double format everywhere internally, so we've already lost the - // precision we'd gain here), but timestamps may eventually switch to - // this representation within Bro. - // - // In the near-term, this *should* lead to better pack_relative (and - // thus smaller output files). - case TYPE_TIME: - case TYPE_INTERVAL: - if ( ds_use_integer_for_time ) - { - std::ostringstream ostr; - ostr << (uint64_t)(DataSeries::TIME_SCALE * val->val.double_val); - return ostr.str(); - } - else - return ascii->Render(val->val.double_val); - - case TYPE_DOUBLE: - return ascii->Render(val->val.double_val); - - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_FUNC: - if ( ! val->val.string_val.length ) - return ""; - - return string(val->val.string_val.data, val->val.string_val.length); - - case TYPE_TABLE: - { - if ( ! val->val.set_val.size ) - return ""; - - string tmpString = ""; - - for ( int j = 0; j < val->val.set_val.size; j++ ) - { - if ( j > 0 ) - tmpString += ds_set_separator; - - tmpString += LogValueToString(val->val.set_val.vals[j]); - } - - return tmpString; - } - - case TYPE_VECTOR: - { - if ( ! val->val.vector_val.size ) - return ""; - - string tmpString = ""; - - for ( int j = 0; j < val->val.vector_val.size; j++ ) - { - if ( j > 0 ) - tmpString += ds_set_separator; - - tmpString += LogValueToString(val->val.vector_val.vals[j]); - } - - return tmpString; - } - - default: - InternalError(Fmt("unknown type %s in DataSeries::LogValueToString", type_name(val->type))); - return "cannot be reached"; - } -} - -string DataSeries::GetDSFieldType(const threading::Field *field) -{ - switch(field->type) { - case TYPE_BOOL: - return "bool"; - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - case TYPE_INT: - return "int64"; - - case TYPE_DOUBLE: - return "double"; - - case TYPE_TIME: - case TYPE_INTERVAL: - return ds_use_integer_for_time ? "int64" : "double"; - - case TYPE_SUBNET: - case TYPE_ADDR: - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_TABLE: - case TYPE_VECTOR: - case TYPE_FUNC: - return "variable32"; - - default: - InternalError(Fmt("unknown type %s in DataSeries::GetDSFieldType", type_name(field->type))); - return "cannot be reached"; - } -} - -string DataSeries::BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle) - { - if( ! sTitle.size() ) - sTitle = "GenericBroStream"; - - string xmlschema = "\n"; - - for( size_t i = 0; i < vals.size(); ++i ) - { - xmlschema += "\t\n"; - } - - xmlschema += "\n"; - - for( size_t i = 0; i < vals.size(); ++i ) - { - xmlschema += "\n"; - } - - return xmlschema; -} - -std::string DataSeries::GetDSOptionsForType(const threading::Field *field) -{ - switch( field->type ) { - case TYPE_TIME: - case TYPE_INTERVAL: - { - std::string s; - s += "pack_relative=\"" + std::string(field->name) + "\""; - - if ( ! ds_use_integer_for_time ) - s += " pack_scale=\"1e-6\" print_format=\"%.6f\" pack_scale_warn=\"no\""; - else - s += string(" units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; - - return s; - } - - case TYPE_SUBNET: - case TYPE_ADDR: - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_TABLE: - case TYPE_VECTOR: - return "pack_unique=\"yes\""; - - default: - return ""; - } -} - -DataSeries::DataSeries(WriterFrontend* frontend) : WriterBackend(frontend) -{ - ds_compression = string((const char *)BifConst::LogDataSeries::compression->Bytes(), - BifConst::LogDataSeries::compression->Len()); - ds_dump_schema = BifConst::LogDataSeries::dump_schema; - ds_extent_size = BifConst::LogDataSeries::extent_size; - ds_num_threads = BifConst::LogDataSeries::num_threads; - ds_use_integer_for_time = BifConst::LogDataSeries::use_integer_for_time; - ds_set_separator = ","; - - threading::formatter::Ascii::SeparatorInfo sep_info; - ascii = new threading::formatter::Ascii(this, sep_info); - - compress_type = Extent::compress_mode_none; - log_file = 0; - log_output = 0; -} - -DataSeries::~DataSeries() - { - delete ascii; - } - -bool DataSeries::OpenLog(string path) - { - log_file = new DataSeriesSink(path + ".ds", compress_type); - log_file->writeExtentLibrary(log_types); - - for( size_t i = 0; i < schema_list.size(); ++i ) - { - string fn = schema_list[i].field_name; - GeneralField* gf = 0; -#ifdef USE_PERFTOOLS_DEBUG - { - // GeneralField isn't cleaning up some results of xml parsing, reported - // here: https://github.com/dataseries/DataSeries/issues/1 - // Ignore for now to make leak tests pass. There's confidence that - // we do clean up the GeneralField* since the ExtentSeries dtor for - // member log_series would trigger an assert if dynamically allocated - // fields aren't deleted beforehand. - HeapLeakChecker::Disabler disabler; -#endif - gf = GeneralField::create(log_series, fn); -#ifdef USE_PERFTOOLS_DEBUG - } -#endif - extents.insert(std::make_pair(fn, gf)); - } - - if ( ds_extent_size < ROW_MIN ) - { - Warning(Fmt("%d is not a valid value for 'rows'. Using min of %d instead", (int)ds_extent_size, (int)ROW_MIN)); - ds_extent_size = ROW_MIN; - } - - else if( ds_extent_size > ROW_MAX ) - { - Warning(Fmt("%d is not a valid value for 'rows'. Using max of %d instead", (int)ds_extent_size, (int)ROW_MAX)); - ds_extent_size = ROW_MAX; - } - - log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); - - return true; - } - -bool DataSeries::DoInit(const WriterInfo& info, int num_fields, const threading::Field* const * fields) - { - // We first construct an XML schema thing (and, if ds_dump_schema is - // set, dump it to path + ".ds.xml"). Assuming that goes well, we - // use that schema to build our output logfile and prepare it to be - // written to. - - // Note: compressor count must be set *BEFORE* DataSeriesSink is - // instantiated. - if( ds_num_threads < THREAD_MIN && ds_num_threads != 0 ) - { - Warning(Fmt("%d is too few threads! Using %d instead", (int)ds_num_threads, (int)THREAD_MIN)); - ds_num_threads = THREAD_MIN; - } - - if( ds_num_threads > THREAD_MAX ) - { - Warning(Fmt("%d is too many threads! Dropping back to %d", (int)ds_num_threads, (int)THREAD_MAX)); - ds_num_threads = THREAD_MAX; - } - - if( ds_num_threads > 0 ) - DataSeriesSink::setCompressorCount(ds_num_threads); - - for ( int i = 0; i < num_fields; i++ ) - { - const threading::Field* field = fields[i]; - SchemaValue val; - val.ds_type = GetDSFieldType(field); - val.field_name = string(field->name); - val.field_options = GetDSOptionsForType(field); - val.bro_type = field->TypeName(); - schema_list.push_back(val); - } - - string schema = BuildDSSchemaFromFieldTypes(schema_list, info.path); - - if( ds_dump_schema ) - { - string name = string(info.path) + ".ds.xml"; - FILE* pFile = fopen(name.c_str(), "wb" ); - - if( pFile ) - { - fwrite(schema.c_str(), 1, schema.length(), pFile); - fclose(pFile); - } - - else - Error(Fmt("cannot dump schema: %s", Strerror(errno))); - } - - compress_type = Extent::compress_all; - - if( ds_compression == "lzf" ) - compress_type = Extent::compress_mode_lzf; - - else if( ds_compression == "lzo" ) - compress_type = Extent::compress_mode_lzo; - - else if( ds_compression == "zlib" ) - compress_type = Extent::compress_mode_zlib; - - else if( ds_compression == "bz2" ) - compress_type = Extent::compress_mode_bz2; - - else if( ds_compression == "none" ) - compress_type = Extent::compress_mode_none; - - else if( ds_compression == "any" ) - compress_type = Extent::compress_all; - - else - Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'zlib', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); - - log_type = log_types.registerTypePtr(schema); - log_series.setType(log_type); - - return OpenLog(info.path); - } - -bool DataSeries::DoFlush(double network_time) -{ - // Flushing is handled by DataSeries automatically, so this function - // doesn't do anything. - return true; -} - -void DataSeries::CloseLog() - { - for( ExtentIterator iter = extents.begin(); iter != extents.end(); ++iter ) - delete iter->second; - - extents.clear(); - - // Don't delete the file before you delete the output, or bad things - // will happen. - delete log_output; - delete log_file; - - log_output = 0; - log_file = 0; - } - -bool DataSeries::DoFinish(double network_time) -{ - CloseLog(); - return true; -} - -bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, - threading::Value** vals) -{ - log_output->newRecord(); - - for( size_t i = 0; i < (size_t)num_fields; ++i ) - { - ExtentIterator iter = extents.find(fields[i]->name); - assert(iter != extents.end()); - - if( iter != extents.end() ) - { - GeneralField *cField = iter->second; - - if( vals[i]->present ) - cField->set(LogValueToString(vals[i])); - } - } - - return true; -} - -bool DataSeries::DoRotate(const char* rotated_path, double open, double close, bool terminating) -{ - // Note that if DS files are rotated too often, the aggregate log - // size will be (much) larger. - CloseLog(); - - string dsname = string(Info().path) + ".ds"; - string nname = string(rotated_path) + ".ds"; - - if ( rename(dsname.c_str(), nname.c_str()) != 0 ) - { - char buf[256]; - strerror_r(errno, buf, sizeof(buf)); - Error(Fmt("failed to rename %s to %s: %s", dsname.c_str(), - nname.c_str(), buf)); - FinishedRotation(); - return false; - } - - if ( ! FinishedRotation(nname.c_str(), dsname.c_str(), open, close, terminating) ) - { - Error(Fmt("error rotating %s to %s", dsname.c_str(), nname.c_str())); - return false; - } - - return OpenLog(Info().path); -} - -bool DataSeries::DoSetBuf(bool enabled) -{ - // DataSeries is *always* buffered to some degree. This option is ignored. - return true; -} - -bool DataSeries::DoHeartbeat(double network_time, double current_time) -{ - return true; -} diff --git a/src/logging/writers/dataseries/DataSeries.h b/src/logging/writers/dataseries/DataSeries.h deleted file mode 100644 index cdc4d9a66c..0000000000 --- a/src/logging/writers/dataseries/DataSeries.h +++ /dev/null @@ -1,128 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// A binary log writer producing DataSeries output. See doc/data-series.rst -// for more information. - -#ifndef LOGGING_WRITER_DATA_SERIES_H -#define LOGGING_WRITER_DATA_SERIES_H - -#include -#include -#include -#include - -#include "logging/WriterBackend.h" -#include "threading/formatters/Ascii.h" - -namespace logging { namespace writer { - -class DataSeries : public WriterBackend { -public: - DataSeries(WriterFrontend* frontend); - ~DataSeries(); - - static WriterBackend* Instantiate(WriterFrontend* frontend) - { return new DataSeries(frontend); } - -protected: - // Overidden from WriterBackend. - - virtual bool DoInit(const WriterInfo& info, int num_fields, - const threading::Field* const * fields); - - virtual bool DoWrite(int num_fields, const threading::Field* const* fields, - threading::Value** vals); - virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(const char* rotated_path, double open, - double close, bool terminating); - virtual bool DoFlush(double network_time); - virtual bool DoFinish(double network_time); - virtual bool DoHeartbeat(double network_time, double current_time); - -private: - static const size_t ROW_MIN = 2048; // Minimum extent size. - static const size_t ROW_MAX = (1024 * 1024 * 100); // Maximum extent size. - static const size_t THREAD_MIN = 1; // Minimum number of compression threads that DataSeries may spawn. - static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. - static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. - const char* TIME_UNIT() { return "microseconds"; } // DS name for time resolution when converted to integers. Must match TIME_SCALE. - - struct SchemaValue - { - string ds_type; - string bro_type; - string field_name; - string field_options; - }; - - /** - * Turns a log value into a std::string. Uses an ostringstream to do the - * heavy lifting, but still need to switch on the type to know which value - * in the union to give to the string string for processing. - * - * @param val The value we wish to convert to a string - * @return the string value of val - */ - std::string LogValueToString(threading::Value *val); - - /** - * Takes a field type and converts it to a relevant DataSeries type. - * - * @param field We extract the type from this and convert it into a relevant DS type. - * @return String representation of type that DataSeries can understand. - */ - string GetDSFieldType(const threading::Field *field); - - /** - * Are there any options we should put into the XML schema? - * - * @param field We extract the type from this and return any options that make sense for that type. - * @return Options that can be added directly to the XML (e.g. "pack_relative=\"yes\"") - */ - std::string GetDSOptionsForType(const threading::Field *field); - - /** - * Takes a list of types, a list of names, and a title, and uses it to construct a valid DataSeries XML schema - * thing, which is then returned as a std::string - * - * @param opts std::vector of strings containing a list of options to be appended to each field (e.g. "pack_relative=yes") - * @param sTitle Name of this schema. Ideally, these schemas would be aggregated and re-used. - */ - string BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle); - - /** Closes the currently open file. */ - void CloseLog(); - - /** Opens a new file. */ - bool OpenLog(string path); - - typedef std::map ExtentMap; - typedef ExtentMap::iterator ExtentIterator; - - // Internal DataSeries structures we need to keep track of. - vector schema_list; - ExtentTypeLibrary log_types; - ExtentType::Ptr log_type; - ExtentSeries log_series; - ExtentMap extents; - int compress_type; - - DataSeriesSink* log_file; - OutputModule* log_output; - - // Options set from the script-level. - uint64 ds_extent_size; - uint64 ds_num_threads; - string ds_compression; - bool ds_dump_schema; - bool ds_use_integer_for_time; - string ds_set_separator; - - threading::formatter::Ascii* ascii; -}; - -} -} - -#endif - diff --git a/src/logging/writers/dataseries/Plugin.cc b/src/logging/writers/dataseries/Plugin.cc deleted file mode 100644 index 271f523ffa..0000000000 --- a/src/logging/writers/dataseries/Plugin.cc +++ /dev/null @@ -1,25 +0,0 @@ -// See the file in the main distribution directory for copyright. - - -#include "plugin/Plugin.h" - -#include "DataSeries.h" - -namespace plugin { -namespace Bro_DataSeriesWriter { - -class Plugin : public plugin::Plugin { -public: - plugin::Configuration Configure() - { - AddComponent(new ::logging::Component("DataSeries", ::logging::writer::DataSeries::Instantiate)); - - plugin::Configuration config; - config.name = "Bro::DataSeriesWriter"; - config.description = "DataSeries log writer"; - return config; - } -} plugin; - -} -} diff --git a/src/logging/writers/dataseries/dataseries.bif b/src/logging/writers/dataseries/dataseries.bif deleted file mode 100644 index 2c83a369f2..0000000000 --- a/src/logging/writers/dataseries/dataseries.bif +++ /dev/null @@ -1,10 +0,0 @@ - -# Options for the DataSeries writer. - -module LogDataSeries; - -const compression: string; -const extent_size: count; -const dump_schema: bool; -const use_integer_for_time: bool; -const num_threads: count; diff --git a/src/logging/writers/elasticsearch/CMakeLists.txt b/src/logging/writers/elasticsearch/CMakeLists.txt deleted file mode 100644 index 6240b3cf63..0000000000 --- a/src/logging/writers/elasticsearch/CMakeLists.txt +++ /dev/null @@ -1,15 +0,0 @@ - -include(BroPlugin) - -find_package(LibCURL) - -if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND) - include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) - bro_plugin_begin(Bro ElasticSearchWriter) - bro_plugin_cc(ElasticSearch.cc Plugin.cc) - bro_plugin_bif(elasticsearch.bif) - bro_plugin_link_library(${LibCURL_LIBRARIES}) - bro_plugin_end() -endif() - - diff --git a/src/logging/writers/elasticsearch/ElasticSearch.cc b/src/logging/writers/elasticsearch/ElasticSearch.cc deleted file mode 100644 index 8cf052f4ce..0000000000 --- a/src/logging/writers/elasticsearch/ElasticSearch.cc +++ /dev/null @@ -1,290 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// This is experimental code that is not yet ready for production usage. -// - - -#include "config.h" - -#include "util.h" // Needs to come first for stdint.h - -#include -#include -#include -#include - -#include "BroString.h" -#include "threading/SerialTypes.h" - -#include "ElasticSearch.h" -#include "elasticsearch.bif.h" - -using namespace logging; -using namespace writer; -using threading::Value; -using threading::Field; - -ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) - { - cluster_name_len = BifConst::LogElasticSearch::cluster_name->Len(); - cluster_name = new char[cluster_name_len + 1]; - memcpy(cluster_name, BifConst::LogElasticSearch::cluster_name->Bytes(), cluster_name_len); - cluster_name[cluster_name_len] = 0; - - index_prefix = string((const char*) BifConst::LogElasticSearch::index_prefix->Bytes(), BifConst::LogElasticSearch::index_prefix->Len()); - - es_server = string(Fmt("http://%s:%d", BifConst::LogElasticSearch::server_host->Bytes(), - (int) BifConst::LogElasticSearch::server_port)); - bulk_url = string(Fmt("%s/_bulk", es_server.c_str())); - - http_headers = curl_slist_append(NULL, "Content-Type: text/json; charset=utf-8"); - buffer.Clear(); - counter = 0; - current_index = string(); - prev_index = string(); - last_send = current_time(); - failing = false; - - transfer_timeout = static_cast(BifConst::LogElasticSearch::transfer_timeout); - - curl_handle = HTTPSetup(); - - json = new threading::formatter::JSON(this, threading::formatter::JSON::TS_MILLIS); -} - -ElasticSearch::~ElasticSearch() - { - delete [] cluster_name; - delete json; - } - -bool ElasticSearch::DoInit(const WriterInfo& info, int num_fields, const threading::Field* const* fields) - { - return true; - } - -bool ElasticSearch::DoFlush(double network_time) - { - BatchIndex(); - return true; - } - -bool ElasticSearch::DoFinish(double network_time) - { - BatchIndex(); - curl_slist_free_all(http_headers); - curl_easy_cleanup(curl_handle); - return true; - } - -bool ElasticSearch::BatchIndex() - { - curl_easy_reset(curl_handle); - curl_easy_setopt(curl_handle, CURLOPT_URL, bulk_url.c_str()); - curl_easy_setopt(curl_handle, CURLOPT_POST, 1); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)buffer.Len()); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); - failing = ! HTTPSend(curl_handle); - - // We are currently throwing the data out regardless of if the send failed. Fire and forget! - buffer.Clear(); - counter = 0; - last_send = current_time(); - - return true; - } - -bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, - Value** vals) - { - if ( current_index.empty() ) - UpdateIndex(network_time, Info().rotation_interval, Info().rotation_base); - - // Our action line looks like: - buffer.AddRaw("{\"index\":{\"_index\":\"", 20); - buffer.Add(current_index); - buffer.AddRaw("\",\"_type\":\"", 11); - buffer.Add(Info().path); - buffer.AddRaw("\"}}\n", 4); - - json->Describe(&buffer, num_fields, fields, vals); - - buffer.AddRaw("\n", 1); - - counter++; - if ( counter >= BifConst::LogElasticSearch::max_batch_size || - uint(buffer.Len()) >= BifConst::LogElasticSearch::max_byte_size ) - BatchIndex(); - - return true; - } - -bool ElasticSearch::UpdateIndex(double now, double rinterval, double rbase) - { - if ( rinterval == 0 ) - { - // if logs aren't being rotated, don't use a rotation oriented index name. - current_index = index_prefix; - } - else - { - double nr = calc_next_rotate(now, rinterval, rbase); - double interval_beginning = now - (rinterval - nr); - - struct tm tm; - char buf[128]; - time_t teatime = (time_t)interval_beginning; - localtime_r(&teatime, &tm); - strftime(buf, sizeof(buf), "%Y%m%d%H%M", &tm); - - prev_index = current_index; - current_index = index_prefix + "-" + buf; - - // Send some metadata about this index. - buffer.AddRaw("{\"index\":{\"_index\":\"@", 21); - buffer.Add(index_prefix); - buffer.AddRaw("-meta\",\"_type\":\"index\",\"_id\":\"", 30); - buffer.Add(current_index); - buffer.AddRaw("-", 1); - buffer.Add(Info().rotation_base); - buffer.AddRaw("-", 1); - buffer.Add(Info().rotation_interval); - buffer.AddRaw("\"}}\n{\"name\":\"", 13); - buffer.Add(current_index); - buffer.AddRaw("\",\"start\":", 10); - buffer.Add(interval_beginning); - buffer.AddRaw(",\"end\":", 7); - buffer.Add(interval_beginning+rinterval); - buffer.AddRaw("}\n", 2); - } - - //printf("%s - prev:%s current:%s\n", Info().path.c_str(), prev_index.c_str(), current_index.c_str()); - return true; - } - - -bool ElasticSearch::DoRotate(const char* rotated_path, double open, double close, bool terminating) - { - // Update the currently used index to the new rotation interval. - UpdateIndex(close, Info().rotation_interval, Info().rotation_base); - - // Only do this stuff if there was a previous index. - if ( ! prev_index.empty() ) - { - // FIXME: I think this section is taking too long and causing the thread to die. - - // Compress the previous index - //curl_easy_reset(curl_handle); - //curl_easy_setopt(curl_handle, CURLOPT_URL, Fmt("%s/%s/_settings", es_server.c_str(), prev_index.c_str())); - //curl_easy_setopt(curl_handle, CURLOPT_CUSTOMREQUEST, "PUT"); - //curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, "{\"index\":{\"store.compress.stored\":\"true\"}}"); - //curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t) 42); - //HTTPSend(curl_handle); - - // Optimize the previous index. - // TODO: make this into variables. - //curl_easy_reset(curl_handle); - //curl_easy_setopt(curl_handle, CURLOPT_URL, Fmt("%s/%s/_optimize?max_num_segments=1&wait_for_merge=false", es_server.c_str(), prev_index.c_str())); - //HTTPSend(curl_handle); - } - - if ( ! FinishedRotation(current_index.c_str(), prev_index.c_str(), open, close, terminating) ) - Error(Fmt("error rotating %s to %s", prev_index.c_str(), current_index.c_str())); - - return true; - } - -bool ElasticSearch::DoSetBuf(bool enabled) - { - // Nothing to do. - return true; - } - -bool ElasticSearch::DoHeartbeat(double network_time, double current_time) - { - if ( last_send > 0 && buffer.Len() > 0 && - current_time-last_send > BifConst::LogElasticSearch::max_batch_interval ) - { - BatchIndex(); - } - - return true; - } - - -CURL* ElasticSearch::HTTPSetup() - { - CURL* handle = curl_easy_init(); - if ( ! handle ) - { - Error("cURL did not initialize correctly."); - return 0; - } - - return handle; - } - -size_t ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata) - { - //TODO: Do some verification on the result? - return size; - } - -bool ElasticSearch::HTTPSend(CURL *handle) - { - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, http_headers); - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, &logging::writer::ElasticSearch::HTTPReceive); // This gets called with the result. - // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. - // The best (only?) way to disable that is to just use HTTP 1.0 - curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); - - // Some timeout options. These will need more attention later. - curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1); - curl_easy_setopt(handle, CURLOPT_CONNECTTIMEOUT, transfer_timeout); - curl_easy_setopt(handle, CURLOPT_TIMEOUT, transfer_timeout); - curl_easy_setopt(handle, CURLOPT_DNS_CACHE_TIMEOUT, 60*60); - - CURLcode return_code = curl_easy_perform(handle); - - switch ( return_code ) - { - case CURLE_COULDNT_CONNECT: - case CURLE_COULDNT_RESOLVE_HOST: - case CURLE_WRITE_ERROR: - case CURLE_RECV_ERROR: - { - if ( ! failing ) - Error(Fmt("ElasticSearch server may not be accessible.")); - - break; - } - - case CURLE_OPERATION_TIMEDOUT: - { - if ( ! failing ) - Warning(Fmt("HTTP operation with elasticsearch server timed out at %" PRIu64 " msecs.", transfer_timeout)); - - break; - } - - case CURLE_OK: - { - long http_code = 0; - curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &http_code); - if ( http_code == 200 ) - // Hopefully everything goes through here. - return true; - else if ( ! failing ) - Error(Fmt("Received a non-successful status code back from ElasticSearch server, check the elasticsearch server log.")); - - break; - } - - default: - { - break; - } - } - // The "successful" return happens above - return false; - } diff --git a/src/logging/writers/elasticsearch/ElasticSearch.h b/src/logging/writers/elasticsearch/ElasticSearch.h deleted file mode 100644 index 5f3d229b5b..0000000000 --- a/src/logging/writers/elasticsearch/ElasticSearch.h +++ /dev/null @@ -1,86 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// Log writer for writing to an ElasticSearch database -// -// This is experimental code that is not yet ready for production usage. -// - -#ifndef LOGGING_WRITER_ELASTICSEARCH_H -#define LOGGING_WRITER_ELASTICSEARCH_H - -#include - -#include "logging/WriterBackend.h" -#include "threading/formatters/JSON.h" - -namespace logging { namespace writer { - -class ElasticSearch : public WriterBackend { -public: - ElasticSearch(WriterFrontend* frontend); - ~ElasticSearch(); - - static string LogExt(); - - static WriterBackend* Instantiate(WriterFrontend* frontend) - { return new ElasticSearch(frontend); } - -protected: - // Overidden from WriterBackend. - - virtual bool DoInit(const WriterInfo& info, int num_fields, - const threading::Field* const* fields); - - virtual bool DoWrite(int num_fields, const threading::Field* const* fields, - threading::Value** vals); - virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(const char* rotated_path, double open, - double close, bool terminating); - virtual bool DoFlush(double network_time); - virtual bool DoFinish(double network_time); - virtual bool DoHeartbeat(double network_time, double current_time); - -private: - bool AddFieldToBuffer(ODesc *b, threading::Value* val, const threading::Field* field); - bool AddValueToBuffer(ODesc *b, threading::Value* val); - bool BatchIndex(); - bool SendMappings(); - bool UpdateIndex(double now, double rinterval, double rbase); - - CURL* HTTPSetup(); - size_t HTTPReceive(void* ptr, int size, int nmemb, void* userdata); - bool HTTPSend(CURL *handle); - - // Buffers, etc. - ODesc buffer; - uint64 counter; - double last_send; - string current_index; - string prev_index; - - CURL* curl_handle; - - // From scripts - char* cluster_name; - int cluster_name_len; - - string es_server; - string bulk_url; - - struct curl_slist *http_headers; - - string path; - string index_prefix; - long transfer_timeout; - bool failing; - - uint64 batch_size; - - threading::formatter::JSON* json; -}; - -} -} - - -#endif diff --git a/src/logging/writers/elasticsearch/Plugin.cc b/src/logging/writers/elasticsearch/Plugin.cc deleted file mode 100644 index 2abb7080e4..0000000000 --- a/src/logging/writers/elasticsearch/Plugin.cc +++ /dev/null @@ -1,37 +0,0 @@ -// See the file in the main distribution directory for copyright. - -#include - -#include "plugin/Plugin.h" - -#include "ElasticSearch.h" - -namespace plugin { -namespace Bro_ElasticSearchWriter { - -class Plugin : public plugin::Plugin { -public: - plugin::Configuration Configure() - { - AddComponent(new ::logging::Component("ElasticSearch", ::logging::writer::ElasticSearch::Instantiate)); - - plugin::Configuration config; - config.name = "Bro::ElasticSearchWriter"; - config.description = "ElasticSearch log writer"; - return config; - } - - virtual void InitPreScript() - { - curl_global_init(CURL_GLOBAL_ALL); - } - - virtual void Done() - { - curl_global_cleanup(); - } - -} plugin; - -} -} diff --git a/src/logging/writers/elasticsearch/elasticsearch.bif b/src/logging/writers/elasticsearch/elasticsearch.bif deleted file mode 100644 index 3d56dd7dd4..0000000000 --- a/src/logging/writers/elasticsearch/elasticsearch.bif +++ /dev/null @@ -1,14 +0,0 @@ - -# Options for the ElasticSearch writer. - -module LogElasticSearch; - -const cluster_name: string; -const server_host: string; -const server_port: count; -const index_prefix: string; -const type_prefix: string; -const transfer_timeout: interval; -const max_batch_size: count; -const max_batch_interval: interval; -const max_byte_size: count; diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml deleted file mode 100644 index a1e65c254e..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out deleted file mode 100644 index 94f25c37f4..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out +++ /dev/null @@ -1,290 +0,0 @@ -test.2011-03-07-03-00-05.ds test 11-03-07_03.00.05 11-03-07_04.00.05 0 dataseries -test.2011-03-07-04-00-05.ds test 11-03-07_04.00.05 11-03-07_05.00.05 0 dataseries -test.2011-03-07-05-00-05.ds test 11-03-07_05.00.05 11-03-07_06.00.05 0 dataseries -test.2011-03-07-06-00-05.ds test 11-03-07_06.00.05 11-03-07_07.00.05 0 dataseries -test.2011-03-07-07-00-05.ds test 11-03-07_07.00.05 11-03-07_08.00.05 0 dataseries -test.2011-03-07-08-00-05.ds test 11-03-07_08.00.05 11-03-07_09.00.05 0 dataseries -test.2011-03-07-09-00-05.ds test 11-03-07_09.00.05 11-03-07_10.00.05 0 dataseries -test.2011-03-07-10-00-05.ds test 11-03-07_10.00.05 11-03-07_11.00.05 0 dataseries -test.2011-03-07-11-00-05.ds test 11-03-07_11.00.05 11-03-07_12.00.05 0 dataseries -test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataseries -> test.2011-03-07-03-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299466805.000000 10.0.0.1 20 10.0.0.2 1024 -1299470395.000000 10.0.0.2 20 10.0.0.3 0 -> test.2011-03-07-04-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299470405.000000 10.0.0.1 20 10.0.0.2 1025 -1299473995.000000 10.0.0.2 20 10.0.0.3 1 -> test.2011-03-07-05-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299474005.000000 10.0.0.1 20 10.0.0.2 1026 -1299477595.000000 10.0.0.2 20 10.0.0.3 2 -> test.2011-03-07-06-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299477605.000000 10.0.0.1 20 10.0.0.2 1027 -1299481195.000000 10.0.0.2 20 10.0.0.3 3 -> test.2011-03-07-07-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299481205.000000 10.0.0.1 20 10.0.0.2 1028 -1299484795.000000 10.0.0.2 20 10.0.0.3 4 -> test.2011-03-07-08-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299484805.000000 10.0.0.1 20 10.0.0.2 1029 -1299488395.000000 10.0.0.2 20 10.0.0.3 5 -> test.2011-03-07-09-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299488405.000000 10.0.0.1 20 10.0.0.2 1030 -1299491995.000000 10.0.0.2 20 10.0.0.3 6 -> test.2011-03-07-10-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299492005.000000 10.0.0.1 20 10.0.0.2 1031 -1299495595.000000 10.0.0.2 20 10.0.0.3 7 -> test.2011-03-07-11-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299495605.000000 10.0.0.1 20 10.0.0.2 1032 -1299499195.000000 10.0.0.2 20 10.0.0.3 8 -> test.2011-03-07-12-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299499205.000000 10.0.0.1 20 10.0.0.2 1033 -1299502795.000000 10.0.0.2 20 10.0.0.3 9 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt deleted file mode 100644 index 225217faea..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt +++ /dev/null @@ -1,34 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='ssh' -t id.orig_h id.orig_p id.resp_h id.resp_p status country -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 success unknown -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure US -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure UK -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 success BR -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure MX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt deleted file mode 100644 index a832005c83..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt +++ /dev/null @@ -1,89 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents -1300475167096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 -1300475167097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp dns 0 0 0 S0 F 0 D 1 199 0 0 -1300475167099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 179 0 0 -1300475168853899 CPbrpk1qSsw6ESzHV4 141.142.220.118 43927 141.142.2.2 53 udp dns 435 38 89 SF F 0 Dd 1 66 1 117 -1300475168854378 C6pKV8GSxOnSLghOa 141.142.220.118 37676 141.142.2.2 53 udp dns 420 52 99 SF F 0 Dd 1 80 1 127 -1300475168854837 CIPOse170MGiRM1Qf4 141.142.220.118 40526 141.142.2.2 53 udp dns 391 38 183 SF F 0 Dd 1 66 1 211 -1300475168857956 CMXxB5GvmoxJFXdTa 141.142.220.118 32902 141.142.2.2 53 udp dns 317 38 89 SF F 0 Dd 1 66 1 117 -1300475168858306 Caby8b1slFea8xwSmb 141.142.220.118 59816 141.142.2.2 53 udp dns 343 52 99 SF F 0 Dd 1 80 1 127 -1300475168858713 Che1bq3i2rO3KD1Syg 141.142.220.118 59714 141.142.2.2 53 udp dns 375 38 183 SF F 0 Dd 1 66 1 211 -1300475168891644 CEle3f3zno26fFZkrh 141.142.220.118 58206 141.142.2.2 53 udp dns 339 38 89 SF F 0 Dd 1 66 1 117 -1300475168892037 CwSkQu4eWZCH7OONC1 141.142.220.118 38911 141.142.2.2 53 udp dns 334 52 99 SF F 0 Dd 1 80 1 127 -1300475168892414 CfTOmO0HKorjr8Zp7 141.142.220.118 59746 141.142.2.2 53 udp dns 420 38 183 SF F 0 Dd 1 66 1 211 -1300475168893988 Cab0vO1xNYSS2hJkle 141.142.220.118 45000 141.142.2.2 53 udp dns 384 38 89 SF F 0 Dd 1 66 1 117 -1300475168894422 Cx2FqO23omNawSNrxj 141.142.220.118 48479 141.142.2.2 53 udp dns 316 52 99 SF F 0 Dd 1 80 1 127 -1300475168894787 Cx3C534wEyF3OvvcQe 141.142.220.118 48128 141.142.2.2 53 udp dns 422 38 183 SF F 0 Dd 1 66 1 211 -1300475168901749 CUKS0W3HFYOnBqSE5e 141.142.220.118 56056 141.142.2.2 53 udp dns 402 36 131 SF F 0 Dd 1 64 1 159 -1300475168902195 CRrfvP2lalMAYOCLhj 141.142.220.118 55092 141.142.2.2 53 udp dns 374 36 198 SF F 0 Dd 1 64 1 226 -1300475169899438 CojBOU3CXcLHl1r6x1 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 -1300475170862384 CJzVQRGJrX6V15ik7 141.142.220.226 137 141.142.220.255 137 udp dns 2613016 350 0 S0 F 0 D 7 546 0 0 -1300475171675372 ClAbxY1nmdjCuo0Le2 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 100096 66 0 S0 F 0 D 2 162 0 0 -1300475171677081 CwG0BF1VXE0gWgs78 141.142.220.226 55131 224.0.0.252 5355 udp dns 100020 66 0 S0 F 0 D 2 122 0 0 -1300475173116749 CisNaL1Cm73CiNOmcg fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 99801 66 0 S0 F 0 D 2 162 0 0 -1300475173117362 CBQnJn22qN8TOeeZil 141.142.220.226 55671 224.0.0.252 5355 udp dns 99848 66 0 S0 F 0 D 2 122 0 0 -1300475173153679 CbEsuD3dgDDngdlbKf 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 -1300475168859163 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 tcp http 215893 1130 734 S1 F 0 ShADad 6 1450 4 950 -1300475168652003 CsRx2w45OKnoww6xl4 141.142.220.118 35634 208.80.152.2 80 tcp 61328 463 350 OTH F 0 DdA 2 567 1 402 -1300475168895267 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 tcp http 227283 1178 734 S1 F 0 ShADad 6 1498 4 950 -1300475168902635 Cn78a440HlxuyZKs6f 141.142.220.118 35642 208.80.152.2 80 tcp http 120040 534 412 S1 F 0 ShADad 4 750 3 576 -1300475168892936 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 tcp http 229603 1148 734 S1 F 0 ShADad 6 1468 4 950 -1300475168855305 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 tcp http 218501 1171 733 S1 F 0 ShADad 6 1491 4 949 -1300475168892913 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 tcp http 220960 1137 733 S1 F 0 ShADad 6 1457 4 949 -1300475169780331 CUof3F2yAIid8QS3dk 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 -1300475168724007 CRJuHdVW0XPVINV8a 141.142.220.118 48649 208.80.152.118 80 tcp http 119904 525 232 S1 F 0 ShADad 4 741 3 396 -1300475168855330 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp http 219720 1125 734 S1 F 0 ShADad 6 1445 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt deleted file mode 100644 index afb44e36eb..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ /dev/null @@ -1,89 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents -1300475167.096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 -1300475167.097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp dns 0.000000 0 0 S0 F 0 D 1 199 0 0 -1300475167.099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 179 0 0 -1300475168.853899 CPbrpk1qSsw6ESzHV4 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117 -1300475168.854378 C6pKV8GSxOnSLghOa 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127 -1300475168.854837 CIPOse170MGiRM1Qf4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 -1300475168.857956 CMXxB5GvmoxJFXdTa 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 38 89 SF F 0 Dd 1 66 1 117 -1300475168.858306 Caby8b1slFea8xwSmb 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 52 99 SF F 0 Dd 1 80 1 127 -1300475168.858713 Che1bq3i2rO3KD1Syg 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 38 183 SF F 0 Dd 1 66 1 211 -1300475168.891644 CEle3f3zno26fFZkrh 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 38 89 SF F 0 Dd 1 66 1 117 -1300475168.892037 CwSkQu4eWZCH7OONC1 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 52 99 SF F 0 Dd 1 80 1 127 -1300475168.892414 CfTOmO0HKorjr8Zp7 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 38 183 SF F 0 Dd 1 66 1 211 -1300475168.893988 Cab0vO1xNYSS2hJkle 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 38 89 SF F 0 Dd 1 66 1 117 -1300475168.894422 Cx2FqO23omNawSNrxj 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 52 99 SF F 0 Dd 1 80 1 127 -1300475168.894787 Cx3C534wEyF3OvvcQe 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 38 183 SF F 0 Dd 1 66 1 211 -1300475168.901749 CUKS0W3HFYOnBqSE5e 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 36 131 SF F 0 Dd 1 64 1 159 -1300475168.902195 CRrfvP2lalMAYOCLhj 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 36 198 SF F 0 Dd 1 64 1 226 -1300475169.899438 CojBOU3CXcLHl1r6x1 141.142.220.44 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 85 0 0 -1300475170.862384 CJzVQRGJrX6V15ik7 141.142.220.226 137 141.142.220.255 137 udp dns 2.613017 350 0 S0 F 0 D 7 546 0 0 -1300475171.675372 ClAbxY1nmdjCuo0Le2 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0.100096 66 0 S0 F 0 D 2 162 0 0 -1300475171.677081 CwG0BF1VXE0gWgs78 141.142.220.226 55131 224.0.0.252 5355 udp dns 0.100021 66 0 S0 F 0 D 2 122 0 0 -1300475173.116749 CisNaL1Cm73CiNOmcg fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0.099801 66 0 S0 F 0 D 2 162 0 0 -1300475173.117362 CBQnJn22qN8TOeeZil 141.142.220.226 55671 224.0.0.252 5355 udp dns 0.099849 66 0 S0 F 0 D 2 122 0 0 -1300475173.153679 CbEsuD3dgDDngdlbKf 141.142.220.238 56641 141.142.220.255 137 udp dns 0.000000 0 0 S0 F 0 D 1 78 0 0 -1300475168.859163 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 tcp http 0.215893 1130 734 S1 F 0 ShADad 6 1450 4 950 -1300475168.652003 CsRx2w45OKnoww6xl4 141.142.220.118 35634 208.80.152.2 80 tcp 0.061329 463 350 OTH F 0 DdA 2 567 1 402 -1300475168.895267 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 tcp http 0.227284 1178 734 S1 F 0 ShADad 6 1498 4 950 -1300475168.902635 Cn78a440HlxuyZKs6f 141.142.220.118 35642 208.80.152.2 80 tcp http 0.120041 534 412 S1 F 0 ShADad 4 750 3 576 -1300475168.892936 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 tcp http 0.229603 1148 734 S1 F 0 ShADad 6 1468 4 950 -1300475168.855305 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 tcp http 0.218501 1171 733 S1 F 0 ShADad 6 1491 4 949 -1300475168.892913 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 tcp http 0.220961 1137 733 S1 F 0 ShADad 6 1457 4 949 -1300475169.780331 CUof3F2yAIid8QS3dk 141.142.220.235 6705 173.192.163.128 80 tcp 0.000000 0 0 OTH F 0 h 0 0 1 48 -1300475168.724007 CRJuHdVW0XPVINV8a 141.142.220.118 48649 208.80.152.118 80 tcp http 0.119905 525 232 S1 F 0 ShADad 4 741 3 396 -1300475168.855330 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp http 0.219720 1125 734 S1 F 0 ShADad 6 1445 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt deleted file mode 100644 index eec7031ba7..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt +++ /dev/null @@ -1,83 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='http' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied orig_fuids orig_mime_types resp_fuids resp_mime_types -1300475168.784020 CRJuHdVW0XPVINV8a 141.142.220.118 48649 208.80.152.118 80 1 GET bits.wikimedia.org /skins-1.5/monobook/main.css http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.916018 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/6/63/Wikipedia-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.916183 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/b/bb/Wikipedia_wordmark.svg/174px-Wikipedia_wordmark.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.918358 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/b/bd/Bookshelf-40x201_6.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.952307 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/8/8a/Wikinews-logo.png/35px-Wikinews-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.952296 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/4/4a/Wiktionary-logo-en-35px.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.954820 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/35px-Wikiquote-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.962687 Cn78a440HlxuyZKs6f 141.142.220.118 35642 208.80.152.2 80 1 GET meta.wikimedia.org /images/wikimedia-button.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.975934 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikibooks-logo.svg/35px-Wikibooks-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.976436 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/d/df/Wikispecies-logo.svg/35px-Wikispecies-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.979264 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4c/Wikisource-logo.svg/35px-Wikisource-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475169.014619 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4a/Commons-logo.svg/35px-Commons-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475169.014593 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/9/91/Wikiversity-logo.svg/35px-Wikiversity-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475169.014927 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/7/75/Wikimedia_Community_Logo.svg/35px-Wikimedia_Community_Logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 diff --git a/testing/btest/core/leaks/dataseries-rotate.bro b/testing/btest/core/leaks/dataseries-rotate.bro deleted file mode 100644 index f43823cd5f..0000000000 --- a/testing/btest/core/leaks/dataseries-rotate.bro +++ /dev/null @@ -1,36 +0,0 @@ -# -# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-GROUP: leaks -# @TEST-GROUP: dataseries -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/rotation.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: btest-bg-wait 15 - -module Test; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - } &log; -} - -redef Log::default_rotation_interval = 1hr; -redef Log::default_rotation_postprocessor_cmd = "echo"; - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); -} - -event new_connection(c: connection) - { - Log::write(Test::LOG, [$t=network_time(), $id=c$id]); - } diff --git a/testing/btest/core/leaks/dataseries.bro b/testing/btest/core/leaks/dataseries.bro deleted file mode 100644 index c5556accf2..0000000000 --- a/testing/btest/core/leaks/dataseries.bro +++ /dev/null @@ -1,11 +0,0 @@ -# Needs perftools support. -# -# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-GROUP: leaks -# @TEST-GROUP: dataseries -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: btest-bg-wait 25 diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro deleted file mode 100644 index 1b05ca9b8f..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro +++ /dev/null @@ -1,44 +0,0 @@ -# -# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt -# @TEST-GROUP: dataseries -# -# @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: test -e ssh.ds.xml -# @TEST-EXEC: btest-diff ssh.ds.xml - -module SSH; - -redef LogDataSeries::dump_schema = T; - -# Haven't yet found a way to check for the effect of these. -redef LogDataSeries::compression = "bz2"; -redef LogDataSeries::extent_size = 1000; -redef LogDataSeries::num_threads = 5; - -# LogDataSeries::use_integer_for_time is tested separately. - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro deleted file mode 100644 index ebc7a15002..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro +++ /dev/null @@ -1,34 +0,0 @@ -# -# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt -# @TEST-GROUP: dataseries -# -# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out -# @TEST-EXEC: for i in test.*.ds; do printf '> %s\n' $i; ds2txt --skip-index $i; done >>out -# @TEST-EXEC: btest-diff out - -module Test; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - } &log; -} - -redef Log::default_rotation_interval = 1hr; -redef Log::default_rotation_postprocessor_cmd = "echo"; - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); -} - -event new_connection(c: connection) - { - Log::write(Test::LOG, [$t=network_time(), $id=c$id]); - } diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro deleted file mode 100644 index c030c58861..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro +++ /dev/null @@ -1,35 +0,0 @@ -# -# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt -# @TEST-GROUP: dataseries -# -# @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt --skip-index ssh.ds >ssh.ds.txt -# @TEST-EXEC: btest-diff ssh.ds.txt - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro deleted file mode 100644 index 87d33a46d1..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt -# @TEST-GROUP: dataseries -# -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt --skip-index conn.ds >conn.ds.txt -# @TEST-EXEC: btest-diff conn.ds.txt - -redef LogDataSeries::use_integer_for_time = T; diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro deleted file mode 100644 index 15e5ba16f1..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-REQUIRES: has-writer Bro::DataSeriesWriter && which ds2txt -# @TEST-GROUP: dataseries -# -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: ds2txt --skip-index conn.ds >conn.ds.txt -# @TEST-EXEC: ds2txt --skip-index http.ds >http.ds.txt -# @TEST-EXEC: btest-diff conn.ds.txt -# @TEST-EXEC: btest-diff http.ds.txt From 865ac94f71b84d24af11774becc2bf6ee7c7d69b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 8 Aug 2014 18:28:43 -0700 Subject: [PATCH 013/106] Updating submodule. --- aux/bro-aux | 2 +- aux/plugins | 2 +- cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aux/bro-aux b/aux/bro-aux index 9ffdd276f9..92351e44ee 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 9ffdd276f9c60db6fecff36751a15cdaec75ca4f +Subproject commit 92351e44ee59e424546148ecb6a292ca6d625e75 diff --git a/aux/plugins b/aux/plugins index 43b7ac7b4a..9617273c1e 160000 --- a/aux/plugins +++ b/aux/plugins @@ -1 +1 @@ -Subproject commit 43b7ac7b4aa192b8e2595c55192222cef057e65a +Subproject commit 9617273c1e81257c71b3b92a893437e5ea0e8547 diff --git a/cmake b/cmake index 99486bfe54..9915ce1965 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 99486bfe5430d04169297b4e4debd5078f0a435f +Subproject commit 9915ce19658958d8ce573cd711e1055d3c39b69f From f884fc6c11e0012c15f155c59c1fb4e82212da4a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 13 Aug 2014 21:16:01 -0700 Subject: [PATCH 014/106] Removing DataSeries and ElasticSearch from configure script. --- aux/bro-aux | 2 +- aux/plugins | 2 +- configure | 21 --------------------- doc/frameworks/logging.rst | 20 ++++++++++---------- 4 files changed, 12 insertions(+), 33 deletions(-) diff --git a/aux/bro-aux b/aux/bro-aux index 92351e44ee..2e19a879bd 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 92351e44ee59e424546148ecb6a292ca6d625e75 +Subproject commit 2e19a879bd022e419016bb16803ee237afe00f12 diff --git a/aux/plugins b/aux/plugins index 9617273c1e..cf6617b1a7 160000 --- a/aux/plugins +++ b/aux/plugins @@ -1 +1 @@ -Subproject commit 9617273c1e81257c71b3b92a893437e5ea0e8547 +Subproject commit cf6617b1a7831ea1398fd87ca4a400ff1f583b50 diff --git a/configure b/configure index 35095c333a..5747586db8 100755 --- a/configure +++ b/configure @@ -39,8 +39,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --disable-auxtools don't build or install auxiliary tools --disable-perftools don't try to build with Google Perftools --disable-python don't try to build python bindings for broccoli - --disable-dataseries don't use the optional DataSeries log writer - --disable-elasticsearch don't use the optional ElasticSearch log writer Required Packages in Non-Standard Locations: --with-openssl=PATH path to OpenSSL install root @@ -62,9 +60,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --with-ruby-lib=PATH path to ruby library --with-ruby-inc=PATH path to ruby headers --with-swig=PATH path to SWIG executable - --with-dataseries=PATH path to DataSeries and Lintel libraries - --with-xml2=PATH path to libxml2 installation (for DataSeries) - --with-curl=PATH path to libcurl install root (for ElasticSearch) Packaging Options (for developers): --binary-package toggle special logic for binary packaging @@ -183,12 +178,6 @@ while [ $# -ne 0 ]; do --enable-ruby) append_cache_entry DISABLE_RUBY_BINDINGS BOOL false ;; - --disable-dataseries) - append_cache_entry DISABLE_DATASERIES BOOL true - ;; - --disable-elasticsearch) - append_cache_entry DISABLE_ELASTICSEARCH BOOL true - ;; --with-openssl=*) append_cache_entry OpenSSL_ROOT_DIR PATH $optarg ;; @@ -243,16 +232,6 @@ while [ $# -ne 0 ]; do --with-swig=*) append_cache_entry SWIG_EXECUTABLE PATH $optarg ;; - --with-dataseries=*) - append_cache_entry DataSeries_ROOT_DIR PATH $optarg - append_cache_entry Lintel_ROOT_DIR PATH $optarg - ;; - --with-xml2=*) - append_cache_entry LibXML2_ROOT_DIR PATH $optarg - ;; - --with-curl=*) - append_cache_entry LibCURL_ROOT_DIR PATH $optarg - ;; --binary-package) append_cache_entry BINARY_PACKAGING_MODE BOOL true ;; diff --git a/doc/frameworks/logging.rst b/doc/frameworks/logging.rst index e5990fea72..c64ab02489 100644 --- a/doc/frameworks/logging.rst +++ b/doc/frameworks/logging.rst @@ -38,7 +38,7 @@ Bro's logging interface is built around three main abstractions: Writers A writer defines the actual output format for the information being logged. At the moment, Bro comes with only one type of - writer, which produces tab separated ASCII files. In the + writer, which produces tab separated ASCII files. In the future we will add further writers, like for binary output and direct logging into a database. @@ -98,7 +98,7 @@ Note the fields that are set for the filter: ``include`` A set limiting the fields to the ones given. The names correspond to those in the :bro:type:`Conn::Info` record, with - sub-records unrolled by concatenating fields (separated with + sub-records unrolled by concatenating fields (separated with dots). Using the code above, you will now get a new log file ``origs.log`` @@ -155,7 +155,7 @@ that returns the desired path: { local filter: Log::Filter = [$name="conn-split", $path_func=split_log, $include=set("ts", "id.orig_h")]; Log::add_filter(Conn::LOG, filter); - } + } Running this will now produce two files, ``local.log`` and ``remote.log``, with the corresponding entries. One could extend this @@ -263,7 +263,7 @@ specific destination exceeds a certain duration: .. code:: bro redef enum Notice::Type += { - ## Indicates that a connection remained established longer + ## Indicates that a connection remained established longer ## than 5 minutes. Long_Conn_Found }; @@ -271,8 +271,8 @@ specific destination exceeds a certain duration: event Conn::log_conn(rec: Conn::Info) { if ( rec$duration > 5mins ) - NOTICE([$note=Long_Conn_Found, - $msg=fmt("unusually long conn to %s", rec$id$resp_h), + NOTICE([$note=Long_Conn_Found, + $msg=fmt("unusually long conn to %s", rec$id$resp_h), $id=rec$id]); } @@ -335,11 +335,11 @@ example for the ``Foo`` module: # Define a hook event. By convention, this is called # "log_". global log_foo: event(rec: Info); - + } # This event should be handled at a higher priority so that when - # users modify your stream later and they do it at priority 0, + # users modify your stream later and they do it at priority 0, # their code runs after this. event bro_init() &priority=5 { @@ -356,7 +356,7 @@ it easily accessible across event handlers: foo: Info &optional; } -Now you can use the :bro:id:`Log::write` method to output log records and +Now you can use the :bro:id:`Log::write` method to output log records and save the logged ``Foo::Info`` record into the connection record: .. code:: bro @@ -387,4 +387,4 @@ Bro supports the following built-in output formats other than ASCII: logging-input-sqlite -Further formats are available as external plugins. +Further formats are available as external plugins. From 996d118d683dcf6fa692d0bb4a0d689f7c289785 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 13 Aug 2014 21:33:03 -0700 Subject: [PATCH 015/106] Fixing tests. --- .../frameworks/logging/writers/dataseries.bro | 60 ------------------- scripts/test-all-policy.bro | 3 - .../canonified_loaded_scripts.log | 8 +-- .../canonified_loaded_scripts.log | 8 +-- testing/external/scripts/testing-setup.bro | 6 -- 5 files changed, 4 insertions(+), 81 deletions(-) delete mode 100644 scripts/base/frameworks/logging/writers/dataseries.bro diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro deleted file mode 100644 index b24601d6b9..0000000000 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ /dev/null @@ -1,60 +0,0 @@ -##! Interface for the DataSeries log writer. - -module LogDataSeries; - -export { - ## Compression to use with the DS output file. Options are: - ## - ## 'none' -- No compression. - ## 'lzf' -- LZF compression (very quick, but leads to larger output files). - ## 'lzo' -- LZO compression (very fast decompression times). - ## 'zlib' -- GZIP compression (slower than LZF, but also produces smaller output). - ## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output). - const compression = "zlib" &redef; - - ## The extent buffer size. - ## Larger values here lead to better compression and more efficient writes, - ## but also increase the lag between the time events are received and - ## the time they are actually written to disk. - const extent_size = 65536 &redef; - - ## Should we dump the XML schema we use for this DS file to disk? - ## If yes, the XML schema shares the name of the logfile, but has - ## an XML ending. - const dump_schema = F &redef; - - ## How many threads should DataSeries spawn to perform compression? - ## Note that this dictates the number of threads per log stream. If - ## you're using a lot of streams, you may want to keep this number - ## relatively small. - ## - ## Default value is 1, which will spawn one thread / stream. - ## - ## Maximum is 128, minimum is 1. - const num_threads = 1 &redef; - - ## Should time be stored as an integer or a double? - ## Storing time as a double leads to possible precision issues and - ## can (significantly) increase the size of the resulting DS log. - ## That said, timestamps stored in double form are consistent - ## with the rest of Bro, including the standard ASCII log. Hence, we - ## use them by default. - const use_integer_for_time = F &redef; -} - -# Default function to postprocess a rotated DataSeries log file. It moves the -# rotated file to a new name that includes a timestamp with the opening time, -# and then runs the writer's default postprocessor command on it. -function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool - { - # Move file to name including both opening and closing time. - local dst = fmt("%s.%s.ds", info$path, - strftime(Log::default_rotation_date_format, info$open)); - - system(fmt("/bin/mv %s %s", info$fname, dst)); - - # Run default postprocessor. - return Log::run_rotation_postprocessor_cmd(info, dst); - } - -redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/scripts/test-all-policy.bro b/scripts/test-all-policy.bro index 5ab596dbfb..1146f274bb 100644 --- a/scripts/test-all-policy.bro +++ b/scripts/test-all-policy.bro @@ -98,7 +98,4 @@ @load tuning/defaults/packet-fragments.bro @load tuning/defaults/warnings.bro @load tuning/json-logs.bro -@load tuning/logs-to-elasticsearch.bro @load tuning/track-all-assets.bro - -redef LogElasticSearch::server_host = ""; diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index c4a29ca44d..bcd32fa94c 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-07-31-19-06-48 +#open 2014-08-14-04-31-10 #fields name #types string scripts/base/init-bare.bro @@ -21,9 +21,7 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/postprocessors/scp.bro scripts/base/frameworks/logging/postprocessors/sftp.bro scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/dataseries.bro scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/elasticsearch.bro scripts/base/frameworks/logging/writers/none.bro scripts/base/frameworks/input/__load__.bro scripts/base/frameworks/input/main.bro @@ -111,10 +109,8 @@ scripts/base/init-bare.bro build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro - build/scripts/base/bif/plugins/Bro_DataSeriesWriter.dataseries.bif.bro - build/scripts/base/bif/plugins/Bro_ElasticSearchWriter.elasticsearch.bif.bro build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro scripts/policy/misc/loaded-scripts.bro scripts/base/utils/paths.bro -#close 2014-07-31-19-06-48 +#close 2014-08-14-04-31-10 diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 661d58501a..b100d86ecb 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-07-31-19-07-23 +#open 2014-08-14-04-31-11 #fields name #types string scripts/base/init-bare.bro @@ -21,9 +21,7 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/postprocessors/scp.bro scripts/base/frameworks/logging/postprocessors/sftp.bro scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/dataseries.bro scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/elasticsearch.bro scripts/base/frameworks/logging/writers/none.bro scripts/base/frameworks/input/__load__.bro scripts/base/frameworks/input/main.bro @@ -111,8 +109,6 @@ scripts/base/init-bare.bro build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro - build/scripts/base/bif/plugins/Bro_DataSeriesWriter.dataseries.bif.bro - build/scripts/base/bif/plugins/Bro_ElasticSearchWriter.elasticsearch.bif.bro build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro scripts/base/init-default.bro @@ -246,4 +242,4 @@ scripts/base/init-default.bro scripts/base/misc/find-checksum-offloading.bro scripts/base/misc/find-filtered-trace.bro scripts/policy/misc/loaded-scripts.bro -#close 2014-07-31-19-07-23 +#close 2014-08-14-04-31-11 diff --git a/testing/external/scripts/testing-setup.bro b/testing/external/scripts/testing-setup.bro index 5ef35ff3b2..282cf41119 100644 --- a/testing/external/scripts/testing-setup.bro +++ b/testing/external/scripts/testing-setup.bro @@ -5,12 +5,6 @@ redef SMTP::never_calc_md5 = T; @endif -@ifdef ( LogElasticSearch::server_host ) - # Set to empty so that logs-to-elasticsearch.bro doesn't try to setup - #log forwarding to ES. - redef LogElasticSearch::server_host = ""; -@endif - @ifdef ( LogAscii::use_json ) # Don't start logging everything as JSON. # (json-logs.bro activates this). From 97ef77c2b76082f467386f7c01cab9c54c66eb2b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 13 Aug 2014 21:38:18 -0700 Subject: [PATCH 016/106] Adding plugin testing to Makefile's test-all. --- Makefile | 1 + aux/plugins | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2b8e66503b..49d9a6173c 100644 --- a/Makefile +++ b/Makefile @@ -56,6 +56,7 @@ test-all: test test -d aux/broctl && ( cd aux/broctl && make test ) test -d aux/btest && ( cd aux/btest && make test ) test -d aux/bro-aux && ( cd aux/bro-aux && make test ) + test -d aux/plugins && ( cd aux/plugins && make test-all ) configured: @test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 ) diff --git a/aux/plugins b/aux/plugins index cf6617b1a7..ee5a9b81f9 160000 --- a/aux/plugins +++ b/aux/plugins @@ -1 +1 @@ -Subproject commit cf6617b1a7831ea1398fd87ca4a400ff1f583b50 +Subproject commit ee5a9b81f99336842164442fd5ff64c55f2882f2 From 6f27d3dd409198a90a60b774235815f5e95c6f57 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 19 Aug 2014 11:00:42 -0500 Subject: [PATCH 017/106] Silence some doc-related warnings when using `bro -e`. BIT-1232 #close --- CHANGES | 7 +++++++ VERSION | 2 +- src/broxygen/Manager.cc | 19 +++++++++++++------ .../Baseline/doc.broxygen.command_line/output | 1 + testing/btest/doc/broxygen/command_line.bro | 7 +++++++ 5 files changed, 29 insertions(+), 7 deletions(-) create mode 100644 testing/btest/Baseline/doc.broxygen.command_line/output create mode 100644 testing/btest/doc/broxygen/command_line.bro diff --git a/CHANGES b/CHANGES index e4588559c8..6c669a12e7 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,11 @@ +2.3-98 | 2014-08-19 11:03:46 -0500 + + * Silence some doc-related warnings when using `bro -e`. + Closes BIT-1232. (Jon Siwek) + + * Fix possible null ptr derefs reported by Coverity. (Jon Siwek) + 2.3-96 | 2014-08-01 14:35:01 -0700 * Small change to DHCP documentation. In server->client messages the diff --git a/VERSION b/VERSION index e247bc816b..a296e6c63e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3-96 +2.3-98 diff --git a/src/broxygen/Manager.cc b/src/broxygen/Manager.cc index 2b1159a3a0..3a07191f33 100644 --- a/src/broxygen/Manager.cc +++ b/src/broxygen/Manager.cc @@ -16,6 +16,16 @@ static void DbgAndWarn(const char* msg) DBG_LOG(DBG_BROXYGEN, "%s", msg); } +static void WarnMissingScript(const char* type, const ID* id, + string script) + { + if ( script == "" ) + return; + + DbgAndWarn(fmt("Can't document %s %s, lookup of %s failed", + type, id->Name(), script.c_str())); + } + static string RemoveLeadingSpace(const string& s) { if ( s.empty() || s[0] != ' ' ) @@ -220,8 +230,7 @@ void Manager::StartType(ID* id) if ( ! script_info ) { - DbgAndWarn(fmt("Can't document identifier %s, lookup of %s failed", - id->Name(), script.c_str())); + WarnMissingScript("identifier", id, script); return; } @@ -285,8 +294,7 @@ void Manager::Identifier(ID* id) if ( ! script_info ) { - DbgAndWarn(fmt("Can't document identifier %s, lookup of %s failed", - id->Name(), script.c_str())); + WarnMissingScript("identifier", id, script); return; } @@ -340,8 +348,7 @@ void Manager::Redef(const ID* id, const string& path) if ( ! script_info ) { - DbgAndWarn(fmt("Can't document redef of %s, lookup of %s failed", - id->Name(), from_script.c_str())); + WarnMissingScript("redef", id, from_script); return; } diff --git a/testing/btest/Baseline/doc.broxygen.command_line/output b/testing/btest/Baseline/doc.broxygen.command_line/output new file mode 100644 index 0000000000..f599e28b8a --- /dev/null +++ b/testing/btest/Baseline/doc.broxygen.command_line/output @@ -0,0 +1 @@ +10 diff --git a/testing/btest/doc/broxygen/command_line.bro b/testing/btest/doc/broxygen/command_line.bro new file mode 100644 index 0000000000..95558f7461 --- /dev/null +++ b/testing/btest/doc/broxygen/command_line.bro @@ -0,0 +1,7 @@ +# Shouldn't emit any warnings about not being able to document something +# that's supplied via command line script. + +# @TEST-EXEC: bro %INPUT -e 'redef myvar=10; print myvar' >output 2>&1 +# @TEST-EXEC: btest-diff output + +const myvar = 5 &redef; From 4c15b386e60bb2bbf1baadae3f2a0f7e1f2de592 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 20 Aug 2014 15:29:02 -0500 Subject: [PATCH 018/106] Updating submodule(s). [nomail] --- aux/broccoli | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broccoli b/aux/broccoli index 07cfcc76fb..29995a9fc1 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 07cfcc76fb08365b545bd3f412c3f6e6c92824e9 +Subproject commit 29995a9fc1f719d1b408c114e06a4c7b773a1470 From 8b0678b8a81b0e0e6214c9f6c3ce9d1d96b1b60a Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 21 Aug 2014 16:10:58 -0500 Subject: [PATCH 019/106] Updating CHANGES and VERSION. --- CHANGES | 9 +++++++++ VERSION | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 6c669a12e7..fcf8dff913 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,13 @@ +2.3-116 | 2014-08-21 16:04:13 -0500 + + * Adding plugin testing to Makefile's test-all. (Robin Sommer) + + * Converting log writers and input readers to plugins. + DataSeries and ElasticSearch plugins have moved to the new + bro-plugins repository, which is now a git submodule in the + aux/plugins directory. (Robin Sommer) + 2.3-98 | 2014-08-19 11:03:46 -0500 * Silence some doc-related warnings when using `bro -e`. diff --git a/VERSION b/VERSION index a296e6c63e..0c55fc3b06 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3-98 +2.3-116 From 1eb7d718d4f92a777e57a13d3a5c157471af5bc6 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 22 Aug 2014 12:26:59 -0500 Subject: [PATCH 020/106] Updating submodule(s). [nomail] --- aux/bro-aux | 2 +- aux/plugins | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aux/bro-aux b/aux/bro-aux index 9fa5b1fe8e..181f084432 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 9fa5b1fe8e1b8c7e5b5cfc7d59297fb2dba7d3a9 +Subproject commit 181f084432e277f899140647d9b788059b3cccb1 diff --git a/aux/plugins b/aux/plugins index 43fea76b16..68577f8bce 160000 --- a/aux/plugins +++ b/aux/plugins @@ -1 +1 @@ -Subproject commit 43fea76b161578821413ecf15fdaea16126a019d +Subproject commit 68577f8bceca584f95badba8d0a30f0bf70ae4ef From f8895843cf39bd3f8d2cd1398c0caa6d7e5160f7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 22 Aug 2014 14:43:20 -0500 Subject: [PATCH 021/106] Updating submodule(s). [nomail] --- aux/plugins | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/plugins b/aux/plugins index 68577f8bce..6de518922e 160000 --- a/aux/plugins +++ b/aux/plugins @@ -1 +1 @@ -Subproject commit 68577f8bceca584f95badba8d0a30f0bf70ae4ef +Subproject commit 6de518922e5f89d52d831ea6fb6adb7fff94437e From 3521a92a004020170dd79c79a7ad99d58eb9bd8a Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 22 Aug 2014 16:49:10 -0500 Subject: [PATCH 022/106] Detect functions that try to bind variables from an outer scope. And raise an error saying that's not supported. Addresses BIT-1233. --- src/Stmt.cc | 9 ++++- src/Var.cc | 39 +++++++++++++++++++ .../Baseline/language.outer_param_binding/out | 3 ++ .../btest/language/outer_param_binding.bro | 27 +++++++++++++ 4 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 testing/btest/Baseline/language.outer_param_binding/out create mode 100644 testing/btest/language/outer_param_binding.bro diff --git a/src/Stmt.cc b/src/Stmt.cc index 3571cad197..cb716b3f15 100644 --- a/src/Stmt.cc +++ b/src/Stmt.cc @@ -660,8 +660,13 @@ void Case::Describe(ODesc* d) const TraversalCode Case::Traverse(TraversalCallback* cb) const { - TraversalCode tc = cases->Traverse(cb); - HANDLE_TC_STMT_PRE(tc); + TraversalCode tc; + + if ( cases ) + { + tc = cases->Traverse(cb); + HANDLE_TC_STMT_PRE(tc); + } tc = s->Traverse(cb); HANDLE_TC_STMT_PRE(tc); diff --git a/src/Var.cc b/src/Var.cc index aa45faaf41..6cb113a11e 100644 --- a/src/Var.cc +++ b/src/Var.cc @@ -9,6 +9,7 @@ #include "Serializer.h" #include "RemoteSerializer.h" #include "EventRegistry.h" +#include "Traverse.h" static Val* init_val(Expr* init, const BroType* t, Val* aggr) { @@ -392,6 +393,34 @@ void begin_func(ID* id, const char* module_name, function_flavor flavor, } } +class OuterParamBindingFinder : public TraversalCallback { +public: + OuterParamBindingFinder(Scope* s) + : scope(s) { } + + virtual TraversalCode PreExpr(const Expr*); + + Scope* scope; + vector outer_param_references; +}; + +TraversalCode OuterParamBindingFinder::PreExpr(const Expr* expr) + { + if ( expr->Tag() != EXPR_NAME ) + return TC_CONTINUE; + + const NameExpr* e = static_cast(expr); + + if ( e->Id()->IsGlobal() ) + return TC_CONTINUE; + + if ( scope->GetIDs()->Lookup(e->Id()->Name()) ) + return TC_CONTINUE; + + outer_param_references.push_back(e); + return TC_CONTINUE; + } + void end_func(Stmt* body, attr_list* attrs) { int frame_size = current_scope()->Length(); @@ -429,6 +458,16 @@ void end_func(Stmt* body, attr_list* attrs) } } + if ( streq(id->Name(), "anonymous-function") ) + { + OuterParamBindingFinder cb(scope); + body->Traverse(&cb); + + for ( size_t i = 0; i < cb.outer_param_references.size(); ++i ) + cb.outer_param_references[i]->Error( + "referencing outer function parameters not supported"); + } + if ( id->HasVal() ) id->ID_Val()->AsFunc()->AddBody(body, inits, frame_size, priority); else diff --git a/testing/btest/Baseline/language.outer_param_binding/out b/testing/btest/Baseline/language.outer_param_binding/out new file mode 100644 index 0000000000..3dfc82da3e --- /dev/null +++ b/testing/btest/Baseline/language.outer_param_binding/out @@ -0,0 +1,3 @@ +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 16: referencing outer function parameters not supported (c) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 16: referencing outer function parameters not supported (d) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 17: referencing outer function parameters not supported (b) diff --git a/testing/btest/language/outer_param_binding.bro b/testing/btest/language/outer_param_binding.bro new file mode 100644 index 0000000000..fb37fd4712 --- /dev/null +++ b/testing/btest/language/outer_param_binding.bro @@ -0,0 +1,27 @@ +# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type Foo: record { + x: function(a: string) : string; +}; + +function bar(b: string, c: string) + { + local f: Foo; + local d = 8; + f = [$x=function(a: string) : string + { + local x = 0; + print x; + print c, d; + return cat(a, " ", b); + } + ]; + + print f$x("2"); + } + +event bro_init() + { + bar("1", "20"); + } From 93e6a4a9dbc13827e2269fb9faf0fa04a77d36de Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 22 Aug 2014 16:13:33 -0700 Subject: [PATCH 023/106] Removing netmap, remaining pieces of the 2ndary path, and left-over files of packet sorter. Netmap will move to a plugin. --- TODO.iosources | 1 + src/DebugLogger.cc | 2 +- src/Net.cc | 11 +- src/Net.h | 2 +- src/PacketSort.cc | 364 -------------------- src/PacketSort.h | 130 ------- src/PktSrc.cc | 95 ----- src/Sessions.cc | 49 --- src/Sessions.h | 8 +- src/iosource/pktsrc/CMakeLists.txt | 1 - src/iosource/pktsrc/PktSrc.h | 13 - src/iosource/pktsrc/netmap/CMakeLists.txt | 12 - src/iosource/pktsrc/netmap/FindNetmap.cmake | 33 -- src/iosource/pktsrc/netmap/Plugin.cc | 26 -- src/iosource/pktsrc/netmap/Source.cc | 127 ------- src/iosource/pktsrc/netmap/Source.h | 48 --- src/iosource/pktsrc/old-2ndary-code.h | 69 ---- src/iosource/pktsrc/pcap/Source.cc | 6 - src/main.cc | 14 +- src/plugin/Component.h | 4 +- 20 files changed, 16 insertions(+), 999 deletions(-) delete mode 100644 src/PacketSort.cc delete mode 100644 src/PacketSort.h delete mode 100644 src/iosource/pktsrc/netmap/CMakeLists.txt delete mode 100644 src/iosource/pktsrc/netmap/FindNetmap.cmake delete mode 100644 src/iosource/pktsrc/netmap/Plugin.cc delete mode 100644 src/iosource/pktsrc/netmap/Source.cc delete mode 100644 src/iosource/pktsrc/netmap/Source.h delete mode 100644 src/iosource/pktsrc/old-2ndary-code.h diff --git a/TODO.iosources b/TODO.iosources index 7380c89b92..7b84a60fb4 100644 --- a/TODO.iosources +++ b/TODO.iosources @@ -7,3 +7,4 @@ - PktDumper: Move Dump() to public and remove Record() - Wrap BPF_Program into namespace and clean up - Tests, in particular the packet dumping needs testing. +- Add an interface for derived pkt source to run a BPF filter. diff --git a/src/DebugLogger.cc b/src/DebugLogger.cc index 08b6a35203..6f025e3c2b 100644 --- a/src/DebugLogger.cc +++ b/src/DebugLogger.cc @@ -16,7 +16,7 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = { { "compressor", 0, false }, {"string", 0, false }, { "notifiers", 0, false }, { "main-loop", 0, false }, { "dpd", 0, false }, { "tm", 0, false }, - { "logging", 0, false }, {"input", 0, false }, + { "logging", 0, false }, {"input", 0, false }, { "threading", 0, false }, { "file_analysis", 0, false }, { "plugins", 0, false }, { "broxygen", 0, false }, { "pktio", 0, false} diff --git a/src/Net.cc b/src/Net.cc index 787bcb56a2..8168f6807d 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -155,10 +155,10 @@ void net_update_time(double new_network_time) void net_init(name_list& interfaces, name_list& readfiles, name_list& netflows, name_list& flowfiles, - const char* writefile, const char* filter, - const char* secondary_filter, int do_watchdog) + const char* writefile, const char* filter, + int do_watchdog) { - if ( readfiles.length() > 0 || flowfiles.length() > 0 ) + if ( readfiles.length() > 0 ) { reading_live = pseudo_realtime > 0.0; reading_traces = 1; @@ -202,6 +202,11 @@ void net_init(name_list& interfaces, name_list& readfiles, if ( writefile ) { pkt_dumper = iosource_mgr->OpenPktDumper(writefile, false); + assert(pkt_dumper); + + if ( pkt_dumper->ErrorMsg().size() ) + reporter->FatalError("problem opening dump file %s - %s\n", + writefile, pkt_dumper->ErrorMsg().c_str()); ID* id = global_scope()->Lookup("trace_output_file"); if ( ! id ) diff --git a/src/Net.h b/src/Net.h index bf42222538..06b7da9691 100644 --- a/src/Net.h +++ b/src/Net.h @@ -16,7 +16,7 @@ extern void net_init(name_list& interfaces, name_list& readfiles, name_list& netflows, name_list& flowfiles, const char* writefile, const char* filter, - const char* secondary_filter, int do_watchdog); + int do_watchdog); extern void net_run(); extern void net_get_final_stats(); extern void net_finish(int drain_events); diff --git a/src/PacketSort.cc b/src/PacketSort.cc deleted file mode 100644 index 606d21b689..0000000000 --- a/src/PacketSort.cc +++ /dev/null @@ -1,364 +0,0 @@ -#include "IP.h" -#include "PacketSort.h" - -const bool DEBUG_packetsort = false; - -PacketSortElement::PacketSortElement(iosource::PktSrc* arg_src, - double arg_timestamp, const struct pcap_pkthdr* arg_hdr, - const u_char* arg_pkt, int arg_hdr_size) - { - src = arg_src; - timestamp = arg_timestamp; - hdr = *arg_hdr; - hdr_size = arg_hdr_size; - - pkt = new u_char[hdr.caplen]; - memcpy(pkt, arg_pkt, hdr.caplen); - - is_tcp = 0; - ip_hdr = 0; - tcp_flags = 0; - endp = 0; - payload_length = 0; - key = 0; - - // Now check if it is a "parsable" TCP packet. - uint32 caplen = hdr.caplen; - uint32 tcp_offset; - - if ( caplen >= sizeof(struct ip) + hdr_size ) - { - const struct ip* ip = (const struct ip*) (pkt + hdr_size); - if ( ip->ip_v == 4 ) - ip_hdr = new IP_Hdr(ip, false); - else if ( ip->ip_v == 6 && (caplen >= sizeof(struct ip6_hdr) + hdr_size) ) - ip_hdr = new IP_Hdr((const struct ip6_hdr*) ip, false, caplen - hdr_size); - else - // Weird will be generated later in NetSessions::NextPacket. - return; - - if ( ip_hdr->NextProto() == IPPROTO_TCP && - // Note: can't sort fragmented packets - ( ! ip_hdr->IsFragment() ) ) - { - tcp_offset = hdr_size + ip_hdr->HdrLen(); - if ( caplen >= tcp_offset + sizeof(struct tcphdr) ) - { - const struct tcphdr* tp = (const struct tcphdr*) - (pkt + tcp_offset); - - id.src_addr = ip_hdr->SrcAddr(); - id.dst_addr = ip_hdr->DstAddr(); - id.src_port = tp->th_sport; - id.dst_port = tp->th_dport; - id.is_one_way = 0; - - endp = addr_port_canon_lt(id.src_addr, - id.src_port, - id.dst_addr, - id.dst_port) ? 0 : 1; - - seq[endp] = ntohl(tp->th_seq); - - if ( tp->th_flags & TH_ACK ) - seq[1-endp] = ntohl(tp->th_ack); - else - seq[1-endp] = 0; - - tcp_flags = tp->th_flags; - - // DEBUG_MSG("%.6f: %u, %u\n", timestamp, seq[0], seq[1]); - - payload_length = ip_hdr->PayloadLen() - tp->th_off * 4; - - key = BuildConnIDHashKey(id); - - is_tcp = 1; - } - } - } - - if ( DEBUG_packetsort && ! is_tcp ) - DEBUG_MSG("%.6f non-TCP packet\n", timestamp); - } - -PacketSortElement::~PacketSortElement() - { - delete [] pkt; - delete ip_hdr; - delete key; - } - -int PacketSortPQ::Timestamp_Cmp(PacketSortElement* a, PacketSortElement* b) - { - double d = a->timestamp - b->timestamp; - - if ( d > 0 ) return 1; - else if ( d < 0 ) return -1; - else return 0; - } - -int PacketSortPQ::UpdatePQ(PacketSortElement* prev_e, PacketSortElement* new_e) - { - int index = prev_e->pq_index[pq_level]; - - new_e->pq_index[pq_level] = index; - pq[index] = new_e; - - if ( Cmp(prev_e, new_e) > 0 ) - return FixUp(new_e, index); - else - { - FixDown(new_e, index); - return index == 0; - } - } - -int PacketSortPQ::AddToPQ(PacketSortElement* new_e) - { - int index = pq.size(); - - new_e->pq_index[pq_level] = index; - pq.push_back(new_e); - - return FixUp(new_e, index); - } - -int PacketSortPQ::RemoveFromPQ(PacketSortElement* prev_e) - { - if ( pq.size() > 1 ) - { - PacketSortElement* new_e = pq[pq.size() - 1]; - pq.pop_back(); - return UpdatePQ(prev_e, new_e); - } - else - { - pq.pop_back(); - return 1; - } - } - -void PacketSortPQ::Assign(int k, PacketSortElement* e) - { - pq[k] = e; - e->pq_index[pq_level] = k; - } - -PacketSortConnPQ::~PacketSortConnPQ() - { - // Delete elements only in ConnPQ (not in GlobalPQ) to avoid - // double delete. - for ( int i = 0; i < (int) pq.size(); ++i ) - { - delete pq[i]; - pq[i] = 0; - } - } - -int PacketSortConnPQ::Cmp(PacketSortElement* a, PacketSortElement* b) - { - // Note: here we do not distinguish between packets without - // an ACK and packets with seq/ack of 0. The later will sorted - // only by their timestamps. - - if ( a->seq[0] && b->seq[0] && a->seq[0] != b->seq[0] ) - return (a->seq[0] > b->seq[0]) ? 1 : -1; - - else if ( a->seq[1] && b->seq[1] && a->seq[1] != b->seq[1] ) - return (a->seq[1] > b->seq[1]) ? 1 : -1; - - else - return Timestamp_Cmp(a, b); - } - -int PacketSortPQ::FixUp(PacketSortElement* e, int k) - { - if ( k == 0 ) - { - Assign(0, e); - return 1; - } - - int parent = (k-1) / 2; - if ( Cmp(pq[parent], e) > 0 ) - { - Assign(k, pq[parent]); - return FixUp(e, parent); - } - else - { - Assign(k, e); - return 0; - } - } - -void PacketSortPQ::FixDown(PacketSortElement* e, int k) - { - uint32 kid = k * 2 + 1; - - if ( kid >= pq.size() ) - { - Assign(k, e); - return; - } - - if ( kid + 1 < pq.size() && Cmp(pq[kid], pq[kid+1]) > 0 ) - ++kid; - - if ( Cmp(e, pq[kid]) > 0 ) - { - Assign(k, pq[kid]); - FixDown(e, kid); - } - else - Assign(k, e); - } - - -int PacketSortConnPQ::Add(PacketSortElement* e) - { -#if 0 - int endp = e->endp; - uint32 end_seq = e->seq[endp] + e->payload_length; - - int p = 1 - endp; - if ( (e->tcp_flags & TH_RST) && ! (e->tcp_flags & TH_ACK) ) - { - DEBUG_MSG("%.6f %c: %u -> %u\n", - e->TimeStamp(), (p == endp) ? 'S' : 'A', - e->seq[p], next_seq[p]); - e->seq[p] = next_seq[p]; - } - - if ( end_seq > next_seq[endp] ) - next_seq[endp] = end_seq; -#endif - - return AddToPQ(e); - } - -void PacketSortConnPQ::UpdateDeliveredSeq(int endp, int seq, int len, int ack) - { - if ( delivered_seq[endp] == 0 || delivered_seq[endp] == seq ) - delivered_seq[endp] = seq + len; - if ( ack > delivered_seq[1 - endp] ) - delivered_seq[endp] = ack; - } - -bool PacketSortConnPQ::IsContentGapSafe(PacketSortElement* e) - { - int ack = e->seq[1 - e->endp]; - return ack <= delivered_seq[1 - e->endp]; - } - -int PacketSortConnPQ::Remove(PacketSortElement* e) - { - int ret = RemoveFromPQ(e); - UpdateDeliveredSeq(e->endp, e->seq[e->endp], e->payload_length, - e->seq[1 - e->endp]); - return ret; - } - -static void DeleteConnPQ(void* p) - { - delete (PacketSortConnPQ*) p; - } - -PacketSortGlobalPQ::PacketSortGlobalPQ() - { - pq_level = GLOBAL_PQ; - conn_pq_table.SetDeleteFunc(DeleteConnPQ); - } - -PacketSortGlobalPQ::~PacketSortGlobalPQ() - { - // Destruction of PacketSortConnPQ will delete all conn_pq's. - } - -int PacketSortGlobalPQ::Add(PacketSortElement* e) - { - if ( e->is_tcp ) - { - // TCP packets are sorted by sequence numbers - PacketSortConnPQ* conn_pq = FindConnPQ(e); - PacketSortElement* prev_min = conn_pq->Min(); - - if ( conn_pq->Add(e) ) - { - ASSERT(conn_pq->Min() != prev_min); - - if ( prev_min ) - return UpdatePQ(prev_min, e); - else - return AddToPQ(e); - } - - else - { - ASSERT(conn_pq->Min() == prev_min); - return 0; - } - } - else - return AddToPQ(e); - } - -PacketSortElement* PacketSortGlobalPQ::RemoveMin(double timestamp) - { - PacketSortElement* e = Min(); - - if ( ! e ) - return 0; - - if ( e->is_tcp ) - { - PacketSortConnPQ* conn_pq = FindConnPQ(e); - -#if 0 - // Note: the content gap safety check does not work - // because we remove the state for a connection once - // it has no packet in the priority queue. - - // Do not deliver e if it arrives later than timestamp, - // and is not content-gap-safe. - if ( e->timestamp > timestamp && - ! conn_pq->IsContentGapSafe(e) ) - return 0; -#else - if ( e->timestamp > timestamp ) - return 0; -#endif - - conn_pq->Remove(e); - PacketSortElement* new_e = conn_pq->Min(); - - if ( new_e ) - UpdatePQ(e, new_e); - else - { - RemoveFromPQ(e); - conn_pq_table.Remove(e->key); - delete conn_pq; - } - } - else - RemoveFromPQ(e); - - return e; - } - -PacketSortConnPQ* PacketSortGlobalPQ::FindConnPQ(PacketSortElement* e) - { - if ( ! e->is_tcp ) - reporter->InternalError("cannot find a connection for an invalid id"); - - PacketSortConnPQ* pq = (PacketSortConnPQ*) conn_pq_table.Lookup(e->key); - if ( ! pq ) - { - pq = new PacketSortConnPQ(); - conn_pq_table.Insert(e->key, pq); - } - - return pq; - } diff --git a/src/PacketSort.h b/src/PacketSort.h deleted file mode 100644 index d61f66994e..0000000000 --- a/src/PacketSort.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef packetsort_h -#define packetsort_h - -// Timestamps can be imprecise and even inconsistent among packets -// from different sources. This class tries to guess a "correct" -// order by looking at TCP sequence numbers. -// -// In particular, it tries to eliminate "false" content gaps. - -#include "Dict.h" -#include "Conn.h" - -enum { - CONN_PQ, - GLOBAL_PQ, - NUM_OF_PQ_LEVEL, -}; - -class PacketSortElement { -public: - PacketSortElement(iosource::PktSrc* src, double timestamp, - const struct pcap_pkthdr* hdr, - const u_char* pkt, int hdr_size); - ~PacketSortElement(); - - iosource::PktSrc* Src() const { return src; } - double TimeStamp() const { return timestamp; } - const struct pcap_pkthdr* Hdr() const { return &hdr; } - const u_char* Pkt() const { return pkt; } - int HdrSize() const { return hdr_size; } - const IP_Hdr* IPHdr() const { return ip_hdr; } - -protected: - iosource::PktSrc* src; - double timestamp; - struct pcap_pkthdr hdr; - u_char* pkt; - int hdr_size; - - IP_Hdr* ip_hdr; - int is_tcp; - ConnID id; - uint32 seq[2]; // indexed by endpoint - int tcp_flags; - int endp; // 0 or 1 - int payload_length; - - HashKey* key; - - int pq_index[NUM_OF_PQ_LEVEL]; - - friend class PacketSortPQ; - friend class PacketSortConnPQ; - friend class PacketSortGlobalPQ; -}; - -class PacketSortPQ { -public: - PacketSortPQ() - { pq_level = -1; } - virtual ~PacketSortPQ() {} - - PacketSortElement* Min() const { return (pq.size() > 0) ? pq[0] : 0; } - -protected: - virtual int Cmp(PacketSortElement* a, PacketSortElement* b) = 0; - int Timestamp_Cmp(PacketSortElement* a, PacketSortElement* b); - - int UpdatePQ(PacketSortElement* prev_e, PacketSortElement* new_e); - int AddToPQ(PacketSortElement* e); - int RemoveFromPQ(PacketSortElement* e); - - void Assign(int k, PacketSortElement* e); - int FixUp(PacketSortElement* e, int k); - void FixDown(PacketSortElement* e, int k); - - vector pq; - int pq_level; -}; - -// Sort by sequence numbers within a connection -class PacketSortConnPQ : public PacketSortPQ { -public: - PacketSortConnPQ() - { - pq_level = CONN_PQ; - delivered_seq[0] = delivered_seq[1] = 0; - } - ~PacketSortConnPQ(); - - int Add(PacketSortElement* e); - - int Remove(PacketSortElement* e); - - bool IsContentGapSafe(PacketSortElement* e); - -protected: - int Cmp(PacketSortElement* a, PacketSortElement* b); - void UpdateDeliveredSeq(int endp, int seq, int len, int ack); - - int delivered_seq[2]; -}; - -declare(PDict, PacketSortConnPQ); - -// Sort by timestamps. -class PacketSortGlobalPQ : public PacketSortPQ { -public: - PacketSortGlobalPQ(); - ~PacketSortGlobalPQ(); - - int Add(PacketSortElement* e); - - int Empty() const { return conn_pq_table.Length() == 0; } - - // Returns the next packet to dispatch if it arrives earlier than the - // given timestamp, otherwise returns 0. - // The packet, if to be returned, is also removed from the - // priority queue. - PacketSortElement* RemoveMin(double timestamp); - -protected: - int Cmp(PacketSortElement* a, PacketSortElement* b) - { return Timestamp_Cmp(a, b); } - PacketSortConnPQ* FindConnPQ(PacketSortElement* e); - - PDict(PacketSortConnPQ) conn_pq_table; -}; - -#endif diff --git a/src/PktSrc.cc b/src/PktSrc.cc index b5ac3a5d69..7a0ed4fa0b 100644 --- a/src/PktSrc.cc +++ b/src/PktSrc.cc @@ -364,10 +364,6 @@ int PktSrc::PrecompileFilter(int index, const char* filter) int PktSrc::SetFilter(int index) { - // We don't want load-level filters for the secondary path. - if ( filter_type == TYPE_FILTER_SECONDARY && index > 0 ) - return 1; - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); BPF_Program* code = filters.Lookup(hash); delete hash; @@ -421,28 +417,6 @@ void PktSrc::Close() } } -void PktSrc::AddSecondaryTablePrograms() - { - BPF_Program* program; - - loop_over_list(secondary_path->EventTable(), i) - { - SecondaryEvent* se = secondary_path->EventTable()[i]; - program = new BPF_Program(); - - if ( ! program->Compile(snaplen, datalink, se->Filter(), - netmask, errbuf, sizeof(errbuf)) ) - { - delete program; - Close(); - return; - } - - SecondaryProgram* sp = new SecondaryProgram(program, se); - program_list.append(sp); - } - } - void PktSrc::Statistics(Stats* s) { if ( reading_traces ) @@ -581,75 +555,6 @@ PktFileSrc::PktFileSrc(const char* arg_readfile, const char* filter, closed = true; } - -SecondaryPath::SecondaryPath() - { - filter = 0; - - // Glue together the secondary filter, if exists. - Val* secondary_fv = internal_val("secondary_filters"); - if ( secondary_fv->AsTableVal()->Size() == 0 ) - return; - - int did_first = 0; - const TableEntryValPDict* v = secondary_fv->AsTable(); - IterCookie* c = v->InitForIteration(); - TableEntryVal* tv; - HashKey* h; - - while ( (tv = v->NextEntry(h, c)) ) - { - // Get the index values. - ListVal* index = - secondary_fv->AsTableVal()->RecoverIndex(h); - - const char* str = - index->Index(0)->Ref()->AsString()->CheckString(); - - if ( ++did_first == 1 ) - { - filter = copy_string(str); - } - else - { - if ( strlen(filter) > 0 ) - { - char* tmp_f = new char[strlen(str) + strlen(filter) + 32]; - if ( strlen(str) == 0 ) - sprintf(tmp_f, "%s", filter); - else - sprintf(tmp_f, "(%s) or (%s)", filter, str); - delete [] filter; - filter = tmp_f; - } - } - - // Build secondary_path event table item and link it. - SecondaryEvent* se = - new SecondaryEvent(index->Index(0)->Ref()->AsString()->CheckString(), - tv->Value()->AsFunc() ); - - event_list.append(se); - - delete h; - Unref(index); - } - } - -SecondaryPath::~SecondaryPath() - { - loop_over_list(event_list, i) - delete event_list[i]; - - delete [] filter; - } - - -SecondaryProgram::~SecondaryProgram() - { - delete program; - } - PktDumper::PktDumper(const char* arg_filename, bool arg_append) { filename[0] = '\0'; diff --git a/src/Sessions.cc b/src/Sessions.cc index 01f81afde6..3aeeaadf3b 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -259,55 +259,6 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, DumpPacket(hdr, pkt); } -void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* hdr, - const u_char* const pkt, int hdr_size, - const iosource::PktSrc* src_ps) - { - SegmentProfiler(segment_logger, "processing-secondary-packet"); - - ++num_packets_processed; - - uint32 caplen = hdr->caplen - hdr_size; - if ( caplen < sizeof(struct ip) ) - { - Weird("truncated_IP", hdr, pkt); - return; - } - -#if 0 - const struct ip* ip = (const struct ip*) (pkt + hdr_size); - if ( ip->ip_v == 4 ) - { - const secondary_program_list& spt = src_ps->ProgramTable(); - - loop_over_list(spt, i) - { - SecondaryProgram* sp = spt[i]; - if ( ! net_packet_match(sp->Program(), pkt, - hdr->len, hdr->caplen) ) - continue; - - val_list* args = new val_list; - StringVal* cmd_val = - new StringVal(sp->Event()->Filter()); - args->append(cmd_val); - IP_Hdr ip_hdr(ip, false); - args->append(ip_hdr.BuildPktHdrVal()); - // ### Need to queue event here. - try - { - sp->Event()->Event()->Call(args); - } - - catch ( InterpreterException& e ) - { /* Already reported. */ } - - delete args; - } - } -#endif - } - int NetSessions::CheckConnectionTag(Connection* conn) { if ( current_iosrc->GetCurrentTag() ) diff --git a/src/Sessions.h b/src/Sessions.h index 619574cfc2..c46c092263 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -69,8 +69,8 @@ public: ~NetSessions(); // Main entry point for packet processing. Dispatches the packet - // either through NextPacket() or NextPacketSecondary(), optionally - // employing the packet sorter first. + // either through NextPacket(), optionally employing the packet + // sorter first. void DispatchPacket(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size, iosource::PktSrc* src_ps); @@ -221,10 +221,6 @@ protected: void NextPacket(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size); - void NextPacketSecondary(double t, const struct pcap_pkthdr* hdr, - const u_char* const pkt, int hdr_size, - const iosource::PktSrc* src_ps); - // Record the given packet (if a dumper is active). If len=0 // then the whole packet is recorded, otherwise just the first // len bytes. diff --git a/src/iosource/pktsrc/CMakeLists.txt b/src/iosource/pktsrc/CMakeLists.txt index 9c8a458c54..07303b46a3 100644 --- a/src/iosource/pktsrc/CMakeLists.txt +++ b/src/iosource/pktsrc/CMakeLists.txt @@ -1,3 +1,2 @@ add_subdirectory(pcap) -add_subdirectory(netmap) diff --git a/src/iosource/pktsrc/PktSrc.h b/src/iosource/pktsrc/PktSrc.h index edeecfe6cf..3eddaec6e8 100644 --- a/src/iosource/pktsrc/PktSrc.h +++ b/src/iosource/pktsrc/PktSrc.h @@ -59,14 +59,6 @@ public: static int GetLinkHeaderSize(int link_type); -#if 0 - PktSrc_Filter_Type FilterType() const { return filter_type; } - - void AddSecondaryTablePrograms(); - const secondary_program_list& ProgramTable() const - { return program_list; } -#endif - protected: // Methods to use by derived classes. @@ -129,11 +121,6 @@ private: double next_sync_point; // For trace synchronziation in pseudo-realtime std::string errbuf; - -#if 0 - PktSrc_Filter_Type filter_type; // normal path or secondary path - secondary_program_list program_list; -#endif }; } diff --git a/src/iosource/pktsrc/netmap/CMakeLists.txt b/src/iosource/pktsrc/netmap/CMakeLists.txt deleted file mode 100644 index a8a8a78a16..0000000000 --- a/src/iosource/pktsrc/netmap/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ - -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_SOURCE_DIR}) - -find_package(Netmap) - -if ( NETMAP_FOUND ) - include(BroPlugin) - include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${NETMAP_INCLUDE_DIR}/sys) - bro_plugin_begin(Bro Netmap) - bro_plugin_cc(Source.cc Plugin.cc) - bro_plugin_end() -endif () diff --git a/src/iosource/pktsrc/netmap/FindNetmap.cmake b/src/iosource/pktsrc/netmap/FindNetmap.cmake deleted file mode 100644 index a04da2a6a0..0000000000 --- a/src/iosource/pktsrc/netmap/FindNetmap.cmake +++ /dev/null @@ -1,33 +0,0 @@ -# - Try to find netmap includes. -# -# -# Variables used by this module, they can change the default behaviour and need -# to be set before calling find_package: -# -# NETMAP_ROOT_DIR Set this variable to the root installation of -# netmap if the module has problems finding the -# proper installation path. -# -# Variables defined by this module: -# -# NETMAP_FOUND System has netmap API files. -# NETMAP_INCLUDE_DIR The netmap include directory. - -find_path(NETMAP_ROOT_DIR - NAMES sys/net/netmap_user.h -) - -find_path(NETMAP_INCLUDE_DIR - NAMES sys/net/netmap_user.h - HINTS ${NETMAP_ROOT_DIR} -) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(Netmap DEFAULT_MSG - NETMAP_INCLUDE_DIR -) - -mark_as_advanced( - NETMAP_ROOT_DIR - NETMAP_INCLUDE_DIR -) diff --git a/src/iosource/pktsrc/netmap/Plugin.cc b/src/iosource/pktsrc/netmap/Plugin.cc deleted file mode 100644 index 05b4434e16..0000000000 --- a/src/iosource/pktsrc/netmap/Plugin.cc +++ /dev/null @@ -1,26 +0,0 @@ -// See the file in the main distribution directory for copyright. - -#include "plugin/Plugin.h" - -#include "Source.h" - -namespace plugin { -namespace Bro_Netmap { - -class Plugin : public plugin::Plugin { -public: - plugin::Configuration Configure() - { - AddComponent(new ::iosource::pktsrc::SourceComponent("NetmapReader", "netmap", ::iosource::pktsrc::SourceComponent::LIVE, ::iosource::pktsrc::NetmapSource::InstantiateNetmap)); - AddComponent(new ::iosource::pktsrc::SourceComponent("NetmapReader", "vale", ::iosource::pktsrc::SourceComponent::LIVE, ::iosource::pktsrc::NetmapSource::InstantiateVale)); - - plugin::Configuration config; - config.name = "Bro::Netmap"; - config.description = "Packet aquisition via netmap"; - return config; - } -} plugin; - -} -} - diff --git a/src/iosource/pktsrc/netmap/Source.cc b/src/iosource/pktsrc/netmap/Source.cc deleted file mode 100644 index b0569a5a0c..0000000000 --- a/src/iosource/pktsrc/netmap/Source.cc +++ /dev/null @@ -1,127 +0,0 @@ - -#include - -#include "config.h" - -#include "Source.h" - -using namespace iosource::pktsrc; - -NetmapSource::~NetmapSource() - { - Close(); - } - -NetmapSource::NetmapSource(const std::string& path, const std::string& filter, bool is_live, const std::string& arg_kind) - { - if ( ! is_live ) - Error("netmap source does not support offline input"); - - kind = arg_kind; - props.path = path; - props.filter = filter; - last_data = 0; - } - -void NetmapSource::Close() - { - if ( ! nd ) - return; - - nm_close(nd); - nd = 0; - last_data = 0; - - Closed(); - } - -void NetmapSource::Open() - { - std::string iface = kind + ":" + props.path; - nd = nm_open(iface.c_str(), getenv("NETMAP_RING_ID"), 0, 0); - - if ( ! nd ) - { - Error(errno ? strerror(errno) : "invalid interface"); - return; - } - - props.selectable_fd = NETMAP_FD(nd); - props.is_live = true; - props.link_type = DLT_EN10MB; - props.hdr_size = GetLinkHeaderSize(props.link_type); - assert(props.hdr_size >= 0); - - Info(fmt("netmap listening on %s\n", props.path.c_str())); - - Opened(props); - } - -int NetmapSource::ExtractNextPacket(Packet* pkt) - { - nm_hdr_t hdr; - const u_char* data = nm_nextpkt(nd, &hdr); - - if ( ! data ) - // Source has gone dry. - return 0; - - current_hdr.ts = hdr.ts; - current_hdr.caplen = hdr.caplen; - current_hdr.len = hdr.len; - - pkt->ts = current_hdr.ts.tv_sec + double(current_hdr.ts.tv_usec) / 1e6; - pkt->hdr = ¤t_hdr; - pkt->data = last_data = data; - - if ( current_hdr.len == 0 || current_hdr.caplen == 0 ) - { - Weird("empty_netmap_header", pkt); - return 0; - } - - last_hdr = current_hdr; - last_data = data; - ++stats.received; - return 1; - } - -void NetmapSource::DoneWithPacket(Packet* pkt) - { - // Nothing to do. - } - -void NetmapSource::Statistics(Stats* s) - { - if ( ! nd ) - { - s->received = s->link = s->dropped = 0; - return; - } - - s->received = stats.received; - - // TODO: Seems these counter's aren't actually set? - s->link = nd->st.ps_recv; - s->dropped = nd->st.ps_drop + nd->st.ps_ifdrop; - } - -bool NetmapSource::GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) - { - if ( ! last_data ) - return false; - - *hdr = &last_hdr; - *pkt = last_data; - return true; - } - -iosource::PktSrc* NetmapSource::InstantiateNetmap(const std::string& path, const std::string& filter, bool is_live) - { - return new NetmapSource(path, filter, is_live, "netmap"); - } - -iosource::PktSrc* NetmapSource::InstantiateVale(const std::string& path, const std::string& filter, bool is_live) - { - return new NetmapSource(path, filter, is_live, "value"); - } diff --git a/src/iosource/pktsrc/netmap/Source.h b/src/iosource/pktsrc/netmap/Source.h deleted file mode 100644 index ff17fe792c..0000000000 --- a/src/iosource/pktsrc/netmap/Source.h +++ /dev/null @@ -1,48 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#ifndef IOSOURCE_PKTSRC_NETMAP_SOURCE_H -#define IOSOURCE_PKTSRC_NETMAP_SOURCE_H - -extern "C" { -#define NETMAP_WITH_LIBS -#include -} - -#include "../PktSrc.h" - -namespace iosource { -namespace pktsrc { - -class NetmapSource : public iosource::PktSrc { -public: - // XXX - NetmapSource(const std::string& path, const std::string& filter, bool is_live, const std::string& kind); - virtual ~NetmapSource(); - - static PktSrc* InstantiateNetmap(const std::string& path, const std::string& filter, bool is_live); - static PktSrc* InstantiateVale(const std::string& path, const std::string& filter, bool is_live); - -protected: - // PktSrc interface. - virtual void Open(); - virtual void Close(); - virtual int ExtractNextPacket(Packet* pkt); - virtual void DoneWithPacket(Packet* pkt); - virtual void Statistics(Stats* stats); - virtual bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); - -private: - std::string kind; - Properties props; - Stats stats; - - nm_desc_t *nd; - pcap_pkthdr current_hdr; - pcap_pkthdr last_hdr; - const u_char* last_data; -}; - -} -} - -#endif diff --git a/src/iosource/pktsrc/old-2ndary-code.h b/src/iosource/pktsrc/old-2ndary-code.h deleted file mode 100644 index 0b47cccdc5..0000000000 --- a/src/iosource/pktsrc/old-2ndary-code.h +++ /dev/null @@ -1,69 +0,0 @@ -// Whether a PktSrc object is used by the normal filter structure or the -// secondary-path structure. -typedef enum { - TYPE_FILTER_NORMAL, // the normal filter - TYPE_FILTER_SECONDARY, // the secondary-path filter -} PktSrc_Filter_Type; - -// {filter,event} tuples conforming the secondary path. -class SecondaryEvent { -public: - SecondaryEvent(const char* arg_filter, Func* arg_event) - { - filter = arg_filter; - event = arg_event; - } - - const char* Filter() { return filter; } - Func* Event() { return event; } - -private: - const char* filter; - Func* event; -}; - -declare(PList,SecondaryEvent); -typedef PList(SecondaryEvent) secondary_event_list; - -class SecondaryPath { -public: - SecondaryPath(); - ~SecondaryPath(); - - secondary_event_list& EventTable() { return event_list; } - const char* Filter() { return filter; } - -private: - secondary_event_list event_list; - // OR'ed union of all SecondaryEvent filters - char* filter; -}; - -// Main secondary-path object. -extern SecondaryPath* secondary_path; - -// {program, {filter,event}} tuple table. -class SecondaryProgram { -public: - SecondaryProgram(BPF_Program* arg_program, SecondaryEvent* arg_event) - { - program = arg_program; - event = arg_event; - } - - ~SecondaryProgram(); - - BPF_Program* Program() { return program; } - SecondaryEvent* Event() { return event; } - -private: - // Associated program. - BPF_Program *program; - - // Event that is run in case the program is matched. - SecondaryEvent* event; -}; - -declare(PList,SecondaryProgram); -typedef PList(SecondaryProgram) secondary_program_list; - diff --git a/src/iosource/pktsrc/pcap/Source.cc b/src/iosource/pktsrc/pcap/Source.cc index 8165724871..271c3efcd7 100644 --- a/src/iosource/pktsrc/pcap/Source.cc +++ b/src/iosource/pktsrc/pcap/Source.cc @@ -245,12 +245,6 @@ int PcapSource::SetFilter(int index) char errbuf[PCAP_ERRBUF_SIZE]; -#if 0 - // We don't want load-level filters for the secondary path. - if ( filter_type == TYPE_FILTER_SECONDARY && index > 0 ) - return 1; -#endif - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); BPF_Program* code = filters.Lookup(hash); delete hash; diff --git a/src/main.cc b/src/main.cc index 15a14e4733..5d5bc38384 100644 --- a/src/main.cc +++ b/src/main.cc @@ -111,9 +111,6 @@ int optimize = 0; int do_notice_analysis = 0; int rule_bench = 0; int generate_documentation = 0; -#if 0 -SecondaryPath* secondary_path = 0; -#endif extern char version[]; char* command_line_policy = 0; vector params; @@ -386,9 +383,6 @@ void terminate_bro() delete event_serializer; delete state_serializer; delete event_registry; -#if 0 - delete secondary_path; -#endif delete remote_serializer; delete analyzer_mgr; delete file_mgr; @@ -1007,15 +1001,9 @@ int main(int argc, char** argv) snaplen = internal_val("snaplen")->AsCount(); -#if 0 - // Initialize the secondary path, if it's needed. - secondary_path = new SecondaryPath(); -#endif - if ( dns_type != DNS_PRIME ) net_init(interfaces, read_files, netflows, flow_files, - writefile, "", - "", do_watchdog); + writefile, "", do_watchdog); BroFile::SetDefaultRotation(log_rotate_interval, log_max_size); diff --git a/src/plugin/Component.h b/src/plugin/Component.h index 74cbbc6128..de24a7dbde 100644 --- a/src/plugin/Component.h +++ b/src/plugin/Component.h @@ -12,7 +12,7 @@ namespace plugin { namespace component { /** - * Component types. + * Component types. */ enum Type { READER, /// An input reader (not currently used). @@ -28,7 +28,7 @@ enum Type { /** * Base class for plugin components. A component is a specific piece of * functionality that a plugin provides, such as a protocol analyzer or a log - * writer. + * writer. */ class Component { From ecf1e32f607ba92c311f346bbef83e1d3246919f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 22 Aug 2014 16:33:55 -0700 Subject: [PATCH 024/106] Removing FlowSrc. We could bring this back, now derived from PktSrc (though strickly speaking it's of course not *packets). But not sure if we want that, as the input framework seems the better place to host it. Then it would turns into a reader. --- src/CMakeLists.txt | 1 - src/FlowSrc.cc | 227 --------------------------------------- src/FlowSrc.h | 84 --------------- src/Net.cc | 1 - src/Net.h | 2 - src/analyzer/Analyzer.cc | 1 + src/main.cc | 23 +--- 7 files changed, 6 insertions(+), 333 deletions(-) delete mode 100644 src/FlowSrc.cc delete mode 100644 src/FlowSrc.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b1b504e62a..d1f6657630 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -279,7 +279,6 @@ set(bro_SRCS EventRegistry.cc Expr.cc File.cc - FlowSrc.cc Frag.cc Frame.cc Func.cc diff --git a/src/FlowSrc.cc b/src/FlowSrc.cc deleted file mode 100644 index f96ba72026..0000000000 --- a/src/FlowSrc.cc +++ /dev/null @@ -1,227 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// Written by Bernhard Ager, TU Berlin (2006/2007). - -#include -#include -#include -#include - -#include "FlowSrc.h" -#include "Net.h" -#include "analyzer/protocol/netflow/netflow_pac.h" -#include - -FlowSrc::FlowSrc() - { // TODO: v9. - selectable_fd = -1; - data = 0; - pdu_len = -1; - exporter_ip = 0; - current_timestamp = next_timestamp = 0.0; - netflow_analyzer = new binpac::NetFlow::NetFlow_Analyzer(); - } - -FlowSrc::~FlowSrc() - { - delete netflow_analyzer; - } - -void FlowSrc::GetFds(int* read, int* write, int* except) - { - if ( selectable_fd >= 0 ) - *read = selectable_fd; - } - -double FlowSrc::NextTimestamp(double* network_time) - { - if ( ! data && ! ExtractNextPDU() ) - return -1.0; - else - return next_timestamp; - } - -void FlowSrc::Process() - { - if ( ! data && ! ExtractNextPDU() ) - return; - - // This is normally done by calling net_packet_dispatch(), - // but as we don't have a packet to dispatch ... - net_update_time(next_timestamp); - expire_timers(); - - netflow_analyzer->downflow()->set_exporter_ip(exporter_ip); - - // We handle exceptions in NewData (might have changed w/ new binpac). - netflow_analyzer->NewData(0, data, data + pdu_len); - data = 0; - } - -void FlowSrc::Close() - { - safe_close(selectable_fd); - } - - -FlowSocketSrc::~FlowSocketSrc() - { - } - -int FlowSocketSrc::ExtractNextPDU() - { - sockaddr_in from; - socklen_t fromlen = sizeof(from); - pdu_len = recvfrom(selectable_fd, buffer, NF_MAX_PKT_SIZE, 0, - (struct sockaddr*) &from, &fromlen); - if ( pdu_len < 0 ) - { - reporter->Error("problem reading NetFlow data from socket"); - data = 0; - next_timestamp = -1.0; - SetClosed(true); - return 0; - } - - if ( fromlen != sizeof(from) ) - { - reporter->Error("malformed NetFlow PDU"); - return 0; - } - - data = buffer; - exporter_ip = from.sin_addr.s_addr; - next_timestamp = current_time(); - - if ( next_timestamp < current_timestamp ) - next_timestamp = current_timestamp; - else - current_timestamp = next_timestamp; - - return 1; - } - -FlowSocketSrc::FlowSocketSrc(const char* listen_parms) - { - int n = strlen(listen_parms) + 1; - - char laddr[n], port[n], ident[n]; - laddr[0] = port[0] = ident[0] = '\0'; - - int ret = sscanf(listen_parms, "%[^:]:%[^=]=%s", laddr, port, ident); - if ( ret < 2 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "parsing your listen-spec went nuts: laddr='%s', port='%s'\n", - laddr[0] ? laddr : "", port[0] ? port : ""); - SetClosed(true); - return; - } - - const char* id = (ret == 3) ? ident : listen_parms; - netflow_analyzer->downflow()->set_identifier(id); - - struct addrinfo aiprefs = { - 0, PF_INET, SOCK_DGRAM, IPPROTO_UDP, 0, NULL, NULL, NULL - }; - struct addrinfo* ainfo = 0; - if ( (ret = getaddrinfo(laddr, port, &aiprefs, &ainfo)) != 0 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "getaddrinfo(%s, %s, ...): %s", - laddr, port, gai_strerror(ret)); - SetClosed(true); - return; - } - - if ( (selectable_fd = socket (PF_INET, SOCK_DGRAM, 0)) < 0 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "socket: %s", strerror(errno)); - SetClosed(true); - goto cleanup; - } - - if ( bind (selectable_fd, ainfo->ai_addr, ainfo->ai_addrlen) < 0 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "bind: %s", strerror(errno)); - SetClosed(true); - goto cleanup; - } - -cleanup: - freeaddrinfo(ainfo); - } - - -FlowFileSrc::~FlowFileSrc() - { - delete [] readfile; - } - -int FlowFileSrc::ExtractNextPDU() - { - FlowFileSrcPDUHeader pdu_header; - - if ( read(selectable_fd, &pdu_header, sizeof(pdu_header)) < - int(sizeof(pdu_header)) ) - return Error(errno, "read header"); - - if ( pdu_header.pdu_length > NF_MAX_PKT_SIZE ) - { - reporter->Error("NetFlow packet too long"); - - // Safely skip over the too-long PDU. - if ( lseek(selectable_fd, pdu_header.pdu_length, SEEK_CUR) < 0 ) - return Error(errno, "lseek"); - return 0; - } - - if ( read(selectable_fd, buffer, pdu_header.pdu_length) < - pdu_header.pdu_length ) - return Error(errno, "read data"); - - if ( next_timestamp < pdu_header.network_time ) - { - next_timestamp = pdu_header.network_time; - current_timestamp = pdu_header.network_time; - } - else - current_timestamp = next_timestamp; - - data = buffer; - pdu_len = pdu_header.pdu_length; - exporter_ip = pdu_header.ipaddr; - - return 1; - } - -FlowFileSrc::FlowFileSrc(const char* readfile) - { - int n = strlen(readfile) + 1; - char ident[n]; - this->readfile = new char[n]; - - int ret = sscanf(readfile, "%[^=]=%s", this->readfile, ident); - const char* id = (ret == 2) ? ident : this->readfile; - netflow_analyzer->downflow()->set_identifier(id); - - selectable_fd = open(this->readfile, O_RDONLY); - if ( selectable_fd < 0 ) - { - SetClosed(true); - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "open: %s", strerror(errno)); - } - } - -int FlowFileSrc::Error(int errlvl, const char* errmsg) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "%s: %s", errmsg, strerror(errlvl)); - data = 0; - next_timestamp = -1.0; - SetClosed(true); - return 0; - } diff --git a/src/FlowSrc.h b/src/FlowSrc.h deleted file mode 100644 index 71c8b0cd11..0000000000 --- a/src/FlowSrc.h +++ /dev/null @@ -1,84 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// Written by Bernhard Ager, TU Berlin (2006/2007). - -#ifndef flowsrc_h -#define flowsrc_h - -#include "iosource/IOSource.h" -#include "NetVar.h" -#include "binpac.h" - -#define BRO_FLOW_ERRBUF_SIZE 512 - -// TODO: 1500 is enough for v5 - how about the others? -// 65536 would be enough for any UDP packet. -#define NF_MAX_PKT_SIZE 8192 - -struct FlowFileSrcPDUHeader { - double network_time; - int pdu_length; - uint32 ipaddr; -}; - -// Avoid including netflow_pac.h by explicitly declaring the NetFlow_Analyzer. -namespace binpac { - namespace NetFlow { - class NetFlow_Analyzer; - } -} - -class FlowSrc : public iosource::IOSource { -public: - virtual ~FlowSrc(); - - // IOSource interface: - bool IsReady(); - void GetFds(int* read, int* write, int* except); - double NextTimestamp(double* network_time); - void Process(); - - const char* Tag() { return "FlowSrc"; } - const char* ErrorMsg() const { return errbuf; } - -protected: - FlowSrc(); - - virtual int ExtractNextPDU() = 0; - virtual void Close(); - - int selectable_fd; - - double current_timestamp; - double next_timestamp; - binpac::NetFlow::NetFlow_Analyzer* netflow_analyzer; - - u_char buffer[NF_MAX_PKT_SIZE]; - u_char* data; - int pdu_len; - uint32 exporter_ip; // in network byte order - - char errbuf[BRO_FLOW_ERRBUF_SIZE]; -}; - -class FlowSocketSrc : public FlowSrc { -public: - FlowSocketSrc(const char* listen_parms); - virtual ~FlowSocketSrc(); - - int ExtractNextPDU(); -}; - -class FlowFileSrc : public FlowSrc { -public: - FlowFileSrc(const char* readfile); - ~FlowFileSrc(); - - int ExtractNextPDU(); - -protected: - int Error(int errlvl, const char* errmsg); - char* readfile; -}; - -#endif diff --git a/src/Net.cc b/src/Net.cc index 8168f6807d..af9e3bb57f 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -154,7 +154,6 @@ void net_update_time(double new_network_time) } void net_init(name_list& interfaces, name_list& readfiles, - name_list& netflows, name_list& flowfiles, const char* writefile, const char* filter, int do_watchdog) { diff --git a/src/Net.h b/src/Net.h index 06b7da9691..073ffcd527 100644 --- a/src/Net.h +++ b/src/Net.h @@ -6,7 +6,6 @@ #include "net_util.h" #include "util.h" #include "List.h" -#include "FlowSrc.h" #include "Func.h" #include "RemoteSerializer.h" #include "iosource/IOSource.h" @@ -14,7 +13,6 @@ #include "iosource/pktsrc/PktDumper.h" extern void net_init(name_list& interfaces, name_list& readfiles, - name_list& netflows, name_list& flowfiles, const char* writefile, const char* filter, int do_watchdog); extern void net_run(); diff --git a/src/analyzer/Analyzer.cc b/src/analyzer/Analyzer.cc index bd85f8263a..fb5602f96e 100644 --- a/src/analyzer/Analyzer.cc +++ b/src/analyzer/Analyzer.cc @@ -4,6 +4,7 @@ #include "Analyzer.h" #include "Manager.h" +#include "binpac.h" #include "analyzer/protocol/pia/PIA.h" #include "../Event.h" diff --git a/src/main.cc b/src/main.cc index 5d5bc38384..5066ef85ee 100644 --- a/src/main.cc +++ b/src/main.cc @@ -449,8 +449,6 @@ int main(int argc, char** argv) name_list interfaces; name_list read_files; - name_list netflows; - name_list flow_files; name_list rule_files; char* bst_file = 0; char* id_name = 0; @@ -552,7 +550,7 @@ int main(int argc, char** argv) opterr = 0; char opts[256]; - safe_strncpy(opts, "B:D:e:f:I:i:K:l:n:p:R:r:s:T:t:U:w:x:X:y:Y:z:CFGLNOPSWabdghvZQ", + safe_strncpy(opts, "B:D:e:f:I:i:K:l:n:p:R:r:s:T:t:U:w:x:X:z:CFGLNOPSWabdghvZQ", sizeof(opts)); #ifdef USE_PERFTOOLS_DEBUG @@ -612,10 +610,6 @@ int main(int argc, char** argv) writefile = optarg; break; - case 'y': - flow_files.append(optarg); - break; - case 'z': if ( streq(optarg, "notice") ) do_notice_analysis = 1; @@ -709,10 +703,6 @@ int main(int argc, char** argv) do_watchdog = 1; break; - case 'Y': - netflows.append(optarg); - break; - case 'h': usage(); break; @@ -800,8 +790,7 @@ int main(int argc, char** argv) // seed the PRNG. We should do this here (but at least Linux, FreeBSD // and Solaris provide /dev/urandom). - if ( (interfaces.length() > 0 || netflows.length() > 0) && - (read_files.length() > 0 || flow_files.length() > 0 )) + if ( interfaces.length() > 0 && read_files.length() > 0 ) usage(); #ifdef USE_IDMEF @@ -824,7 +813,7 @@ int main(int argc, char** argv) plugin_mgr->SearchDynamicPlugins(bro_plugin_path()); if ( optind == argc && - read_files.length() == 0 && flow_files.length() == 0 && + read_files.length() == 0 && interfaces.length() == 0 && ! (id_name || bst_file) && ! command_line_policy && ! print_plugins ) add_input_file("-"); @@ -983,8 +972,7 @@ int main(int argc, char** argv) // ### Add support for debug command file. dbg_init_debugger(0); - if ( (flow_files.length() == 0 || read_files.length() == 0) && - (netflows.length() == 0 || interfaces.length() == 0) ) + if ( read_files.length() == 0 && interfaces.length() == 0 ) { Val* interfaces_val = internal_val("interfaces"); if ( interfaces_val ) @@ -1002,8 +990,7 @@ int main(int argc, char** argv) snaplen = internal_val("snaplen")->AsCount(); if ( dns_type != DNS_PRIME ) - net_init(interfaces, read_files, netflows, flow_files, - writefile, "", do_watchdog); + net_init(interfaces, read_files, writefile, "", do_watchdog); BroFile::SetDefaultRotation(log_rotate_interval, log_max_size); From caa55ad3529f3793a474cbd55896bd0142a4c0d8 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 22 Aug 2014 16:44:35 -0700 Subject: [PATCH 025/106] Moving Pkt{Src,Dumper} a directory level up. Also renaming PktSourceComponent to PktSrcComponent. --- TODO.iosources | 8 +- src/Net.cc | 4 +- src/Net.h | 4 +- src/iosource/CMakeLists.txt | 8 +- src/iosource/Component.cc | 147 +++++++++++++++++ src/iosource/Component.h | 109 ++++++++++++ src/iosource/Manager.cc | 19 +-- src/iosource/{pktsrc => }/PktDumper.cc | 0 src/iosource/{pktsrc => }/PktDumper.h | 2 +- src/iosource/{pktsrc => }/PktSrc.cc | 0 src/iosource/{pktsrc => }/PktSrc.h | 2 +- src/iosource/{pktsrc => }/pcap/BPF_Program.cc | 0 src/iosource/{pktsrc => }/pcap/BPF_Program.h | 0 src/iosource/{pktsrc => }/pcap/CMakeLists.txt | 0 src/iosource/{pktsrc => }/pcap/Dumper.cc | 0 src/iosource/{pktsrc => }/pcap/Dumper.h | 0 src/iosource/{pktsrc => }/pcap/Plugin.cc | 0 src/iosource/{pktsrc => }/pcap/Source.cc | 0 src/iosource/{pktsrc => }/pcap/Source.h | 0 src/iosource/pktsrc/CMakeLists.txt | 2 - src/iosource/pktsrc/Component.cc | 155 ------------------ src/iosource/pktsrc/Component.h | 124 -------------- src/plugin/Plugin.h | 1 - 23 files changed, 274 insertions(+), 311 deletions(-) rename src/iosource/{pktsrc => }/PktDumper.cc (100%) rename src/iosource/{pktsrc => }/PktDumper.h (97%) rename src/iosource/{pktsrc => }/PktSrc.cc (100%) rename src/iosource/{pktsrc => }/PktSrc.h (99%) rename src/iosource/{pktsrc => }/pcap/BPF_Program.cc (100%) rename src/iosource/{pktsrc => }/pcap/BPF_Program.h (100%) rename src/iosource/{pktsrc => }/pcap/CMakeLists.txt (100%) rename src/iosource/{pktsrc => }/pcap/Dumper.cc (100%) rename src/iosource/{pktsrc => }/pcap/Dumper.h (100%) rename src/iosource/{pktsrc => }/pcap/Plugin.cc (100%) rename src/iosource/{pktsrc => }/pcap/Source.cc (100%) rename src/iosource/{pktsrc => }/pcap/Source.h (100%) delete mode 100644 src/iosource/pktsrc/CMakeLists.txt delete mode 100644 src/iosource/pktsrc/Component.cc delete mode 100644 src/iosource/pktsrc/Component.h diff --git a/TODO.iosources b/TODO.iosources index 7b84a60fb4..e6afb3978a 100644 --- a/TODO.iosources +++ b/TODO.iosources @@ -1,10 +1,4 @@ -- Move the current_{iosrc,pkt_src,etc.} into manager -- Remove all 2ndary path code -- Remove all flow src code. -- Move pktsrc/*.{h,cc} up a level? Or create a subsublibrary there? -- Create a global Packet data structure and pass that around instead - of the pcap_* stuff? - PktDumper: Move Dump() to public and remove Record() - Wrap BPF_Program into namespace and clean up -- Tests, in particular the packet dumping needs testing. - Add an interface for derived pkt source to run a BPF filter. +- Tests, in particular the packet dumping needs testing. diff --git a/src/Net.cc b/src/Net.cc index af9e3bb57f..70d8b70169 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -30,8 +30,8 @@ #include "Serializer.h" #include "PacketDumper.h" #include "iosource/Manager.h" -#include "iosource/pktsrc/PktSrc.h" -#include "iosource/pktsrc/PktDumper.h" +#include "iosource/PktSrc.h" +#include "iosource/PktDumper.h" #include "plugin/Manager.h" extern "C" { diff --git a/src/Net.h b/src/Net.h index 073ffcd527..41cbd69abe 100644 --- a/src/Net.h +++ b/src/Net.h @@ -9,8 +9,8 @@ #include "Func.h" #include "RemoteSerializer.h" #include "iosource/IOSource.h" -#include "iosource/pktsrc/PktSrc.h" -#include "iosource/pktsrc/PktDumper.h" +#include "iosource/PktSrc.h" +#include "iosource/PktDumper.h" extern void net_init(name_list& interfaces, name_list& readfiles, const char* writefile, const char* filter, diff --git a/src/iosource/CMakeLists.txt b/src/iosource/CMakeLists.txt index c2b63e3d7e..6bcfa16289 100644 --- a/src/iosource/CMakeLists.txt +++ b/src/iosource/CMakeLists.txt @@ -6,15 +6,11 @@ include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR} ) -add_subdirectory(pktsrc) - set(iosource_SRCS Component.cc Manager.cc - - pktsrc/Component.cc - pktsrc/PktDumper.cc - pktsrc/PktSrc.cc + PktDumper.cc + PktSrc.cc ) bro_add_subdir_library(iosource ${iosource_SRCS}) diff --git a/src/iosource/Component.cc b/src/iosource/Component.cc index fa57a7d5ad..f54c212352 100644 --- a/src/iosource/Component.cc +++ b/src/iosource/Component.cc @@ -2,6 +2,7 @@ #include "Component.h" #include "Desc.h" +#include "Reporter.h" using namespace iosource; @@ -18,3 +19,149 @@ Component::Component(plugin::component::Type type, const std::string& name) Component::~Component() { } + +PktSrcComponent::PktSrcComponent(const std::string& arg_name, const std::string& arg_prefix, InputType arg_type, factory_callback arg_factory) + : iosource::Component(plugin::component::PKTSRC, arg_name) + { + tokenize_string(arg_prefix, ":", &prefixes); + type = arg_type; + factory = arg_factory; + } + +PktSrcComponent::~PktSrcComponent() + { + } + +const std::vector& PktSrcComponent::Prefixes() const + { + return prefixes; + } + +bool PktSrcComponent::HandlesPrefix(const string& prefix) const + { + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( *i == prefix ) + return true; + } + + return false; + } + +bool PktSrcComponent::DoesLive() const + { + return type == LIVE || type == BOTH; + } + +bool PktSrcComponent::DoesTrace() const + { + return type == TRACE || type == BOTH; + } + +PktSrcComponent::factory_callback PktSrcComponent::Factory() const + { + return factory; + } + + +void PktSrcComponent::Describe(ODesc* d) const + { + iosource::Component::Describe(d); + + string prefs; + + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( prefs.size() ) + prefs += ", "; + + prefs += *i; + } + + d->Add(" (interface prefix"); + if ( prefixes.size() > 1 ) + d->Add("es"); + + d->Add(": "); + d->Add(prefs); + d->Add("; "); + + switch ( type ) { + case LIVE: + d->Add("live input"); + break; + + case TRACE: + d->Add("trace input"); + break; + + case BOTH: + d->Add("live and trace input"); + break; + + default: + reporter->InternalError("unknown PkrSrc type"); + } + + d->Add(")"); + } + +PktDumperComponent::PktDumperComponent(const std::string& name, const std::string& arg_prefix, factory_callback arg_factory) + : plugin::Component(plugin::component::PKTDUMPER, name) + { + tokenize_string(arg_prefix, ":", &prefixes); + factory = arg_factory; + } + +PktDumperComponent::~PktDumperComponent() + { + } + +PktDumperComponent::factory_callback PktDumperComponent::Factory() const + { + return factory; + } + +const std::vector& PktDumperComponent::Prefixes() const + { + return prefixes; + } + +bool PktDumperComponent::HandlesPrefix(const string& prefix) const + { + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( *i == prefix ) + return true; + } + + return false; + } + +void PktDumperComponent::Describe(ODesc* d) const + { + plugin::Component::Describe(d); + + string prefs; + + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( prefs.size() ) + prefs += ", "; + + prefs += *i; + } + + d->Add(" (dumper prefix"); + + if ( prefixes.size() > 1 ) + d->Add("es"); + + d->Add(": "); + d->Add(prefs); + d->Add(")"); + } diff --git a/src/iosource/Component.h b/src/iosource/Component.h index 065855caea..c93597fd67 100644 --- a/src/iosource/Component.h +++ b/src/iosource/Component.h @@ -3,11 +3,16 @@ #ifndef IOSOURCE_PLUGIN_COMPONENT_H #define IOSOURCE_PLUGIN_COMPONENT_H +#include +#include + #include "plugin/Component.h" namespace iosource { class IOSource; +class PktSrc; +class PktDumper; /** * Component description for plugins providing IOSources. @@ -38,6 +43,110 @@ protected: Component(plugin::component::Type type, const std::string& name); }; +/** + * Component description for plugins providing a PktSrc for packet input. + */ +class PktSrcComponent : public iosource::Component { +public: + enum InputType { LIVE, TRACE, BOTH }; + + typedef PktSrc* (*factory_callback)(const std::string& path, const std::string& filter, bool is_live); + + /** + * XXX + */ + PktSrcComponent(const std::string& name, const std::string& prefixes, InputType type, factory_callback factory); + + /** + * Destructor. + */ + virtual ~PktSrcComponent(); + + /** + * Returns the prefix(es) passed to the constructor. + */ + const std::vector& Prefixes() const; + + /** + * Returns true if the given prefix is among the one specified for the component. + */ + bool HandlesPrefix(const std::string& prefix) const; + + /** + * Returns true if packet source instantiated by the component handle + * live traffic. + */ + bool DoesLive() const; + + /** + * Returns true if packet source instantiated by the component handle + * offline traces. + */ + bool DoesTrace() const; + + /** + * Returns the source's factory function. + */ + factory_callback Factory() const; + + /** + * Generates a human-readable description of the component. This goes + * into the output of \c "bro -NN". + */ + virtual void Describe(ODesc* d) const; + +private: + std::vector prefixes; + InputType type; + factory_callback factory; +}; + +/** + * Component description for plugins providing a PktDumper for packet output. + * + * PktDumpers aren't IOSurces but we locate them here to keep them along with + * the PktSrc. + */ +class PktDumperComponent : public plugin::Component { +public: + typedef PktDumper* (*factory_callback)(const std::string& path, bool append); + + /** + * XXX + */ + PktDumperComponent(const std::string& name, const std::string& prefixes, factory_callback factory); + + /** + * Destructor. + */ + ~PktDumperComponent(); + + /** + * Returns the prefix(es) passed to the constructor. + */ + const std::vector& Prefixes() const; + + /** + * Returns true if the given prefix is among the one specified for the component. + */ + bool HandlesPrefix(const std::string& prefix) const; + + /** + * Returns the source's factory function. + */ + factory_callback Factory() const; + + /** + * Generates a human-readable description of the component. This goes + * into the output of \c "bro -NN". + */ + virtual void Describe(ODesc* d) const; + +private: + std::vector prefixes; + factory_callback factory; +}; + } #endif diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index 6c01e5e57b..e2be297a77 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -8,9 +8,8 @@ #include "Manager.h" #include "IOSource.h" -#include "pktsrc/PktSrc.h" -#include "pktsrc/PktDumper.h" -#include "pktsrc/Component.h" +#include "PktSrc.h" +#include "PktDumper.h" #include "plugin/Manager.h" #include "util.h" @@ -222,14 +221,14 @@ PktSrc* Manager::OpenPktSrc(const std::string& path, const std::string& filter, // Find the component providing packet sources of the requested prefix. - pktsrc::SourceComponent* component = 0; + PktSrcComponent* component = 0; - std::list all_components = plugin_mgr->Components(); + std::list all_components = plugin_mgr->Components(); - for ( std::list::const_iterator i = all_components.begin(); + for ( std::list::const_iterator i = all_components.begin(); i != all_components.end(); i++ ) { - pktsrc::SourceComponent* c = *i; + PktSrcComponent* c = *i; if ( c->HandlesPrefix(prefix) && (( is_live && c->DoesLive() ) || @@ -272,11 +271,11 @@ PktDumper* Manager::OpenPktDumper(const string& path, bool append) // Find the component providing packet dumpers of the requested prefix. - pktsrc::DumperComponent* component = 0; + PktDumperComponent* component = 0; - std::list all_components = plugin_mgr->Components(); + std::list all_components = plugin_mgr->Components(); - for ( std::list::const_iterator i = all_components.begin(); + for ( std::list::const_iterator i = all_components.begin(); i != all_components.end(); i++ ) { if ( (*i)->HandlesPrefix(prefix) ) diff --git a/src/iosource/pktsrc/PktDumper.cc b/src/iosource/PktDumper.cc similarity index 100% rename from src/iosource/pktsrc/PktDumper.cc rename to src/iosource/PktDumper.cc diff --git a/src/iosource/pktsrc/PktDumper.h b/src/iosource/PktDumper.h similarity index 97% rename from src/iosource/pktsrc/PktDumper.h rename to src/iosource/PktDumper.h index b8f3595e32..794992d870 100644 --- a/src/iosource/pktsrc/PktDumper.h +++ b/src/iosource/PktDumper.h @@ -3,7 +3,7 @@ #ifndef IOSOURCE_PKTSRC_PKTDUMPER_H #define IOSOURCE_PKTSRC_PKTDUMPER_H -#include "../IOSource.h" +#include "IOSource.h" namespace iosource { diff --git a/src/iosource/pktsrc/PktSrc.cc b/src/iosource/PktSrc.cc similarity index 100% rename from src/iosource/pktsrc/PktSrc.cc rename to src/iosource/PktSrc.cc diff --git a/src/iosource/pktsrc/PktSrc.h b/src/iosource/PktSrc.h similarity index 99% rename from src/iosource/pktsrc/PktSrc.h rename to src/iosource/PktSrc.h index 3eddaec6e8..d04ca24817 100644 --- a/src/iosource/pktsrc/PktSrc.h +++ b/src/iosource/PktSrc.h @@ -7,7 +7,7 @@ extern "C" { #include } -#include "../IOSource.h" +#include "IOSource.h" namespace iosource { diff --git a/src/iosource/pktsrc/pcap/BPF_Program.cc b/src/iosource/pcap/BPF_Program.cc similarity index 100% rename from src/iosource/pktsrc/pcap/BPF_Program.cc rename to src/iosource/pcap/BPF_Program.cc diff --git a/src/iosource/pktsrc/pcap/BPF_Program.h b/src/iosource/pcap/BPF_Program.h similarity index 100% rename from src/iosource/pktsrc/pcap/BPF_Program.h rename to src/iosource/pcap/BPF_Program.h diff --git a/src/iosource/pktsrc/pcap/CMakeLists.txt b/src/iosource/pcap/CMakeLists.txt similarity index 100% rename from src/iosource/pktsrc/pcap/CMakeLists.txt rename to src/iosource/pcap/CMakeLists.txt diff --git a/src/iosource/pktsrc/pcap/Dumper.cc b/src/iosource/pcap/Dumper.cc similarity index 100% rename from src/iosource/pktsrc/pcap/Dumper.cc rename to src/iosource/pcap/Dumper.cc diff --git a/src/iosource/pktsrc/pcap/Dumper.h b/src/iosource/pcap/Dumper.h similarity index 100% rename from src/iosource/pktsrc/pcap/Dumper.h rename to src/iosource/pcap/Dumper.h diff --git a/src/iosource/pktsrc/pcap/Plugin.cc b/src/iosource/pcap/Plugin.cc similarity index 100% rename from src/iosource/pktsrc/pcap/Plugin.cc rename to src/iosource/pcap/Plugin.cc diff --git a/src/iosource/pktsrc/pcap/Source.cc b/src/iosource/pcap/Source.cc similarity index 100% rename from src/iosource/pktsrc/pcap/Source.cc rename to src/iosource/pcap/Source.cc diff --git a/src/iosource/pktsrc/pcap/Source.h b/src/iosource/pcap/Source.h similarity index 100% rename from src/iosource/pktsrc/pcap/Source.h rename to src/iosource/pcap/Source.h diff --git a/src/iosource/pktsrc/CMakeLists.txt b/src/iosource/pktsrc/CMakeLists.txt deleted file mode 100644 index 07303b46a3..0000000000 --- a/src/iosource/pktsrc/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ - -add_subdirectory(pcap) diff --git a/src/iosource/pktsrc/Component.cc b/src/iosource/pktsrc/Component.cc deleted file mode 100644 index 6caf743ff9..0000000000 --- a/src/iosource/pktsrc/Component.cc +++ /dev/null @@ -1,155 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include "Component.h" - -#include "../Desc.h" -#include "../Reporter.h" - -using namespace iosource::pktsrc; - -SourceComponent::SourceComponent(const std::string& arg_name, const std::string& arg_prefix, InputType arg_type, factory_callback arg_factory) - : iosource::Component(plugin::component::PKTSRC, arg_name) - { - tokenize_string(arg_prefix, ":", &prefixes); - type = arg_type; - factory = arg_factory; - } - -SourceComponent::~SourceComponent() - { - } - -const std::vector& SourceComponent::Prefixes() const - { - return prefixes; - } - -bool SourceComponent::HandlesPrefix(const string& prefix) const - { - for ( std::vector::const_iterator i = prefixes.begin(); - i != prefixes.end(); i++ ) - { - if ( *i == prefix ) - return true; - } - - return false; - } - -bool SourceComponent::DoesLive() const - { - return type == LIVE || type == BOTH; - } - -bool SourceComponent::DoesTrace() const - { - return type == TRACE || type == BOTH; - } - -SourceComponent::factory_callback SourceComponent::Factory() const - { - return factory; - } - - -void SourceComponent::Describe(ODesc* d) const - { - iosource::Component::Describe(d); - - string prefs; - - for ( std::vector::const_iterator i = prefixes.begin(); - i != prefixes.end(); i++ ) - { - if ( prefs.size() ) - prefs += ", "; - - prefs += *i; - } - - d->Add(" (interface prefix"); - if ( prefixes.size() > 1 ) - d->Add("es"); - - d->Add(": "); - d->Add(prefs); - d->Add("; "); - - switch ( type ) { - case LIVE: - d->Add("live input"); - break; - - case TRACE: - d->Add("trace input"); - break; - - case BOTH: - d->Add("live and trace input"); - break; - - default: - reporter->InternalError("unknown PkrSrc type"); - } - - d->Add(")"); - } - -DumperComponent::DumperComponent(const std::string& name, const std::string& arg_prefix, factory_callback arg_factory) - : plugin::Component(plugin::component::PKTDUMPER, name) - { - tokenize_string(arg_prefix, ":", &prefixes); - factory = arg_factory; - } - -DumperComponent::~DumperComponent() - { - } - -DumperComponent::factory_callback DumperComponent::Factory() const - { - return factory; - } - -const std::vector& DumperComponent::Prefixes() const - { - return prefixes; - } - -bool DumperComponent::HandlesPrefix(const string& prefix) const - { - for ( std::vector::const_iterator i = prefixes.begin(); - i != prefixes.end(); i++ ) - { - if ( *i == prefix ) - return true; - } - - return false; - } - -void DumperComponent::Describe(ODesc* d) const - { - plugin::Component::Describe(d); - - string prefs; - - for ( std::vector::const_iterator i = prefixes.begin(); - i != prefixes.end(); i++ ) - { - if ( prefs.size() ) - prefs += ", "; - - prefs += *i; - } - - d->Add(" (dumper prefix"); - - if ( prefixes.size() > 1 ) - d->Add("es"); - - d->Add(": "); - d->Add(prefs); - d->Add(")"); - } - diff --git a/src/iosource/pktsrc/Component.h b/src/iosource/pktsrc/Component.h deleted file mode 100644 index 0e4755d7b8..0000000000 --- a/src/iosource/pktsrc/Component.h +++ /dev/null @@ -1,124 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#ifndef IOSOURCE_PKTSRC_PLUGIN_COMPONENT_H -#define IOSOURCE_PKTSRC_PLUGIN_COMPONENT_H - -#include - -#include "../Component.h" - -namespace iosource { - -class PktSrc; -class PktDumper; - -namespace pktsrc { - -/** - * Component description for plugins providing a PktSrc for packet input. - */ -class SourceComponent : public iosource::Component { -public: - enum InputType { LIVE, TRACE, BOTH }; - - typedef PktSrc* (*factory_callback)(const std::string& path, const std::string& filter, bool is_live); - - /** - * XXX - */ - SourceComponent(const std::string& name, const std::string& prefixes, InputType type, factory_callback factory); - - /** - * Destructor. - */ - virtual ~SourceComponent(); - - /** - * Returns the prefix(es) passed to the constructor. - */ - const std::vector& Prefixes() const; - - /** - * Returns true if the given prefix is among the one specified for the component. - */ - bool HandlesPrefix(const std::string& prefix) const; - - /** - * Returns true if packet source instantiated by the component handle - * live traffic. - */ - bool DoesLive() const; - - /** - * Returns true if packet source instantiated by the component handle - * offline traces. - */ - bool DoesTrace() const; - - /** - * Returns the source's factory function. - */ - factory_callback Factory() const; - - /** - * Generates a human-readable description of the component. This goes - * into the output of \c "bro -NN". - */ - virtual void Describe(ODesc* d) const; - -private: - std::vector prefixes; - InputType type; - factory_callback factory; -}; - -/** - * Component description for plugins providing a PktDumper for packet output. - * - * PktDumpers aren't IOSurces but we locate them here to keep them along with - * the PktSrc. - */ -class DumperComponent : public plugin::Component { -public: - typedef PktDumper* (*factory_callback)(const std::string& path, bool append); - - /** - * XXX - */ - DumperComponent(const std::string& name, const std::string& prefixes, factory_callback factory); - - /** - * Destructor. - */ - ~DumperComponent(); - - /** - * Returns the prefix(es) passed to the constructor. - */ - const std::vector& Prefixes() const; - - /** - * Returns true if the given prefix is among the one specified for the component. - */ - bool HandlesPrefix(const std::string& prefix) const; - - /** - * Returns the source's factory function. - */ - factory_callback Factory() const; - - /** - * Generates a human-readable description of the component. This goes - * into the output of \c "bro -NN". - */ - virtual void Describe(ODesc* d) const; - -private: - std::vector prefixes; - factory_callback factory; -}; - -} -} - -#endif diff --git a/src/plugin/Plugin.h b/src/plugin/Plugin.h index 773fe139f6..ccda20054c 100644 --- a/src/plugin/Plugin.h +++ b/src/plugin/Plugin.h @@ -10,7 +10,6 @@ #include "analyzer/Component.h" #include "file_analysis/Component.h" #include "iosource/Component.h" -#include "iosource/pktsrc/Component.h" // We allow to override this externally for testing purposes. #ifndef BRO_PLUGIN_API_VERSION From 0186061aa83e98526ba9c1a4211902279aea7a6b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 22 Aug 2014 16:49:14 -0700 Subject: [PATCH 026/106] Small packet dumper API change. --- TODO.iosources | 1 - src/Net.cc | 2 +- src/Serializer.cc | 2 +- src/Sessions.cc | 4 ++-- src/bro.bif | 4 ++-- src/iosource/PktDumper.cc | 5 ----- src/iosource/PktDumper.h | 6 ++---- 7 files changed, 8 insertions(+), 16 deletions(-) diff --git a/TODO.iosources b/TODO.iosources index e6afb3978a..e61e858087 100644 --- a/TODO.iosources +++ b/TODO.iosources @@ -1,4 +1,3 @@ -- PktDumper: Move Dump() to public and remove Record() - Wrap BPF_Program into namespace and clean up - Add an interface for derived pkt source to run a BPF filter. - Tests, in particular the packet dumping needs testing. diff --git a/src/Net.cc b/src/Net.cc index 70d8b70169..38ec7c3b06 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -127,7 +127,7 @@ RETSIGTYPE watchdog(int /* signo */) iosource::PktDumper::Packet p; p.hdr = current_hdr; p.data = current_pkt; - pkt_dumper->Record(&p); + pkt_dumper->Dump(&p); } } diff --git a/src/Serializer.cc b/src/Serializer.cc index fd3b34da44..e63e7eef77 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -1221,7 +1221,7 @@ Packet* Packet::Unserialize(UnserialInfo* info) iosource::PktDumper::Packet dp; dp.hdr = p->hdr; dp.data = p->pkt; - dump->Record(&dp); + dump->Dump(&dp); } } #endif diff --git a/src/Sessions.cc b/src/Sessions.cc index 3aeeaadf3b..43e55dd95a 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -1394,7 +1394,7 @@ void NetSessions::DumpPacket(const struct pcap_pkthdr* hdr, iosource::PktDumper::Packet p; p.hdr = hdr; p.data = pkt; - pkt_dumper->Record(&p); + pkt_dumper->Dump(&p); } else @@ -1407,7 +1407,7 @@ void NetSessions::DumpPacket(const struct pcap_pkthdr* hdr, iosource::PktDumper::Packet p; p.hdr = &h; p.data = pkt; - pkt_dumper->Record(&p); + pkt_dumper->Dump(&p); } } diff --git a/src/bro.bif b/src/bro.bif index 25d53cc736..adf5e3d6f0 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -3235,7 +3235,7 @@ function dump_current_packet%(file_name: string%) : bool iosource::PktDumper::Packet p; p.hdr = hdr; p.data = pkt; - addl_pkt_dumper->Record(&p); + addl_pkt_dumper->Dump(&p); } return new Val(! addl_pkt_dumper->IsError(), TYPE_BOOL); @@ -3300,7 +3300,7 @@ function dump_packet%(pkt: pcap_packet, file_name: string%) : bool iosource::PktDumper::Packet p; p.hdr = &hdr; p.data = (*pkt_vl)[4]->AsString()->Bytes(); - addl_pkt_dumper->Record(&p); + addl_pkt_dumper->Dump(&p); } return new Val(addl_pkt_dumper->IsError(), TYPE_BOOL); diff --git a/src/iosource/PktDumper.cc b/src/iosource/PktDumper.cc index 21ad79b87d..3c8595e11c 100644 --- a/src/iosource/PktDumper.cc +++ b/src/iosource/PktDumper.cc @@ -50,11 +50,6 @@ int PktDumper::HdrSize() const return is_open ? props.hdr_size : -1; } -bool PktDumper::Record(const Packet* pkt) - { - return Dump(pkt); - } - void PktDumper::Opened(const Properties& arg_props) { is_open = true; diff --git a/src/iosource/PktDumper.h b/src/iosource/PktDumper.h index 794992d870..85c64ffd1d 100644 --- a/src/iosource/PktDumper.h +++ b/src/iosource/PktDumper.h @@ -25,8 +25,10 @@ public: int HdrSize() const; bool Record(const Packet* pkt); + // PktSrc interface for derived classes to implement. virtual void Close() = 0; virtual void Open() = 0; + virtual bool Dump(const Packet* pkt) = 0; protected: // Methods to use by derived classed. @@ -41,10 +43,6 @@ protected: void Closed(); void Error(const std::string& msg); - // PktSrc interface for derived classes to implement. - - virtual bool Dump(const Packet* pkt) = 0; - private: bool is_open; Properties props; From ce9f16490c28155788928b89cc7a81a5931ea5f8 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 22 Aug 2014 17:27:20 -0700 Subject: [PATCH 027/106] Moving some of the BPF filtering code into base class. This will allow packet sources that don't support BPF natively to emulate the filtering via libpcap. --- src/iosource/{pcap => }/BPF_Program.cc | 0 src/iosource/{pcap => }/BPF_Program.h | 0 src/iosource/CMakeLists.txt | 1 + src/iosource/PktSrc.cc | 59 ++++++++++++++++- src/iosource/PktSrc.h | 38 ++++++++++- src/iosource/pcap/CMakeLists.txt | 2 +- src/iosource/pcap/Source.cc | 89 +++++--------------------- src/iosource/pcap/Source.h | 6 -- 8 files changed, 110 insertions(+), 85 deletions(-) rename src/iosource/{pcap => }/BPF_Program.cc (100%) rename src/iosource/{pcap => }/BPF_Program.h (100%) diff --git a/src/iosource/pcap/BPF_Program.cc b/src/iosource/BPF_Program.cc similarity index 100% rename from src/iosource/pcap/BPF_Program.cc rename to src/iosource/BPF_Program.cc diff --git a/src/iosource/pcap/BPF_Program.h b/src/iosource/BPF_Program.h similarity index 100% rename from src/iosource/pcap/BPF_Program.h rename to src/iosource/BPF_Program.h diff --git a/src/iosource/CMakeLists.txt b/src/iosource/CMakeLists.txt index 6bcfa16289..272ced7740 100644 --- a/src/iosource/CMakeLists.txt +++ b/src/iosource/CMakeLists.txt @@ -7,6 +7,7 @@ include_directories(BEFORE ) set(iosource_SRCS + BPF_Program.cc Component.cc Manager.cc PktDumper.cc diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc index 0ec04b65d8..2e056a10ba 100644 --- a/src/iosource/PktSrc.cc +++ b/src/iosource/PktSrc.cc @@ -25,6 +25,10 @@ PktSrc::PktSrc() PktSrc::~PktSrc() { + BPF_Program* code; + IterCookie* cookie = filters.InitForIteration(); + while ( (code = filters.NextEntry(cookie)) ) + delete code; } const std::string& PktSrc::Path() const @@ -43,6 +47,11 @@ int PktSrc::LinkType() const return IsOpen() ? props.link_type : -1; } +uint32 PktSrc::Netmask() const + { + return IsOpen() ? props.netmask : PCAP_NETMASK_UNKNOWN; + } + int PktSrc::HdrSize() const { return IsOpen() ? props.hdr_size : -1; @@ -77,6 +86,12 @@ void PktSrc::Opened(const Properties& arg_props) props = arg_props; SetClosed(false); + if ( ! PrecompileFilter(0, props.filter) || ! SetFilter(0) ) + { + Close(); + return; + } + DBG_LOG(DBG_PKTIO, "Opened source %s", props.path.c_str()); } @@ -409,12 +424,50 @@ int PktSrc::ExtractNextPacketInternal() return 0; } -int PktSrc::PrecompileFilter(int index, const std::string& filter) +int PktSrc::PrecompileBPFFilter(int index, const std::string& filter) { + char errbuf[PCAP_ERRBUF_SIZE]; + + // Compile filter. + BPF_Program* code = new BPF_Program(); + + if ( ! code->Compile(SnapLen(), LinkType(), filter.c_str(), Netmask(), errbuf, sizeof(errbuf)) ) + { + Error(fmt("cannot compile BPF filter \"%s\": %s", filter.c_str(), errbuf)); + Close(); + delete code; + return 0; + } + + // Store it in hash. + HashKey* hash = new HashKey(HashKey(bro_int_t(index))); + BPF_Program* oldcode = filters.Lookup(hash); + if ( oldcode ) + delete oldcode; + + filters.Insert(hash, code); + delete hash; + return 1; } -int PktSrc::SetFilter(int index) +BPF_Program* PktSrc::GetBPFFilter(int index) { - return 1; + HashKey* hash = new HashKey(HashKey(bro_int_t(index))); + BPF_Program* code = filters.Lookup(hash); + delete hash; + return code; + } + +int PktSrc::ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt) + { + BPF_Program* code = GetBPFFilter(index); + + if ( ! code ) + { + Error(fmt("BPF filter %d not compiled", index)); + Close(); + } + + return pcap_offline_filter(code->GetProgram(), hdr, pkt); } diff --git a/src/iosource/PktSrc.h b/src/iosource/PktSrc.h index d04ca24817..5dd51ab5f2 100644 --- a/src/iosource/PktSrc.h +++ b/src/iosource/PktSrc.h @@ -8,6 +8,10 @@ extern "C" { } #include "IOSource.h" +#include "BPF_Program.h" +#include "Dict.h" + +declare(PDict,BPF_Program); namespace iosource { @@ -29,6 +33,7 @@ public: const std::string& Filter() const; bool IsLive() const; int LinkType() const; + uint32 Netmask() const; const char* ErrorMsg() const; int HdrSize() const; int SnapLen() const; @@ -41,7 +46,22 @@ public: // going to be continued. void ContinueAfterSuspend(); - virtual void Statistics(Stats* stats) = 0; + // Precompiles a BPF filter and associates the given index with it. + // Returns true on success, 0 if a problem occurred. The compiled + // filter will be then available via GetBPFFilter*(. + int PrecompileBPFFilter(int index, const std::string& filter); + + // Returns the BPF filter with the given index, as compiled by + // PrecompileBPFFilter(), or null if none has been (successfully) + // compiled. + BPF_Program* GetBPFFilter(int index); + + // Applies a precompiled BPF filter to a packet, returning true if it + // maches. This will close the source with an error message if no + // filter with that index has been compiled. + int ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt); + + // PacketSource interace for derived classes to override. // Returns the packet last processed; false if there is no // current packet available. @@ -50,12 +70,15 @@ public: // Precompiles a filter and associates the given index with it. // Returns true on success, 0 if a problem occurred or filtering is // not supported. - virtual int PrecompileFilter(int index, const std::string& filter); + virtual int PrecompileFilter(int index, const std::string& filter) = 0; // Activates the filter with the given index. Returns true on // success, 0 if a problem occurred or the filtering is not // supported. - virtual int SetFilter(int index); + virtual int SetFilter(int index) = 0; + + // Returns current statistics about the source. + virtual void Statistics(Stats* stats) = 0; static int GetLinkHeaderSize(int link_type); @@ -68,7 +91,13 @@ protected: int selectable_fd; int link_type; int hdr_size; + uint32 netmask; bool is_live; + + Properties() + { + netmask = PCAP_NETMASK_UNKNOWN; + } }; struct Packet { @@ -113,6 +142,9 @@ private: bool have_packet; Packet current_packet; + // For BPF filtering support. + PDict(BPF_Program) filters; + // Only set in pseudo-realtime mode. double first_timestamp; double first_wallclock; diff --git a/src/iosource/pcap/CMakeLists.txt b/src/iosource/pcap/CMakeLists.txt index b43d51b0ca..1c57bb6ac9 100644 --- a/src/iosource/pcap/CMakeLists.txt +++ b/src/iosource/pcap/CMakeLists.txt @@ -4,5 +4,5 @@ include(BroPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) bro_plugin_begin(Bro Pcap) -bro_plugin_cc(Source.cc Dumper.cc BPF_Program.cc Plugin.cc) +bro_plugin_cc(Source.cc Dumper.cc Plugin.cc) bro_plugin_end() diff --git a/src/iosource/pcap/Source.cc b/src/iosource/pcap/Source.cc index 271c3efcd7..67a73fbd8a 100644 --- a/src/iosource/pcap/Source.cc +++ b/src/iosource/pcap/Source.cc @@ -37,13 +37,6 @@ void PcapSource::Close() if ( ! pd ) return; - BPF_Program* code; - IterCookie* cookie = filters.InitForIteration(); - while ( (code = filters.NextEntry(cookie)) ) - delete code; - - filters.Clear(); - pcap_close(pd); pd = 0; last_data = 0; @@ -56,10 +49,6 @@ void PcapSource::OpenLive() char errbuf[PCAP_ERRBUF_SIZE]; char tmp_errbuf[PCAP_ERRBUF_SIZE]; -#if 0 - filter_type = ft; -#endif - // Determine interface if not specified. if ( props.path.empty() ) props.path = pcap_lookupdev(tmp_errbuf); @@ -74,7 +63,7 @@ void PcapSource::OpenLive() // Determine network and netmask. uint32 net; - if ( pcap_lookupnet(props.path.c_str(), &net, &netmask, tmp_errbuf) < 0 ) + if ( pcap_lookupnet(props.path.c_str(), &net, &props.netmask, tmp_errbuf) < 0 ) { // ### The lookup can fail if no address is assigned to // the interface; and libpcap doesn't have any useful notion @@ -82,7 +71,7 @@ void PcapSource::OpenLive() // just kludge around the error :-(. // sprintf(errbuf, "pcap_lookupnet %s", tmp_errbuf); // return; - netmask = 0xffffff00; + props.netmask = 0xffffff00; } // We use the smallest time-out possible to return almost immediately if @@ -113,31 +102,22 @@ void PcapSource::OpenLive() props.selectable_fd = pcap_fileno(pd); - if ( PrecompileFilter(0, props.filter) && SetFilter(0) ) - { - SetHdrSize(); + SetHdrSize(); - if ( ! pd ) - // Was closed, couldn't get header size. - return; - - Info(fmt("listening on %s, capture length %d bytes\n", props.path.c_str(), SnapLen())); - } - else - Close(); + if ( ! pd ) + // Was closed, couldn't get header size. + return; props.is_live = true; Opened(props); + + Info(fmt("listening on %s, capture length %d bytes\n", props.path.c_str(), SnapLen())); } void PcapSource::OpenOffline() { char errbuf[PCAP_ERRBUF_SIZE]; -#if 0 - filter_type = ft; -#endif - pd = pcap_open_offline(props.path.c_str(), errbuf); if ( ! pd ) @@ -146,25 +126,16 @@ void PcapSource::OpenOffline() return; } - if ( PrecompileFilter(0, props.filter) && SetFilter(0) ) - { - SetHdrSize(); + SetHdrSize(); - if ( ! pd ) - // Was closed, unknown link layer type. - return; + if ( ! pd ) + // Was closed, unknown link layer type. + return; - // We don't put file sources into non-blocking mode as - // otherwise we would not be able to identify the EOF. + props.selectable_fd = fileno(pcap_file(pd)); - props.selectable_fd = fileno(pcap_file(pd)); - - if ( props.selectable_fd < 0 ) - InternalError("OS does not support selectable pcap fd"); - } - - else - Close(); + if ( props.selectable_fd < 0 ) + InternalError("OS does not support selectable pcap fd"); props.is_live = false; Opened(props); @@ -211,31 +182,7 @@ void PcapSource::DoneWithPacket(Packet* pkt) int PcapSource::PrecompileFilter(int index, const std::string& filter) { - if ( ! pd ) - return 1; // Prevent error message. - - char errbuf[PCAP_ERRBUF_SIZE]; - - // Compile filter. - BPF_Program* code = new BPF_Program(); - - if ( ! code->Compile(pd, filter.c_str(), netmask, errbuf, sizeof(errbuf)) ) - { - PcapError(); - delete code; - return 0; - } - - // Store it in hash. - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* oldcode = filters.Lookup(hash); - if ( oldcode ) - delete oldcode; - - filters.Insert(hash, code); - delete hash; - - return 1; + return PktSrc::PrecompileBPFFilter(index, filter). } int PcapSource::SetFilter(int index) @@ -245,9 +192,7 @@ int PcapSource::SetFilter(int index) char errbuf[PCAP_ERRBUF_SIZE]; - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* code = filters.Lookup(hash); - delete hash; + BPF_Program* code = GetFilter(index); if ( ! code ) { diff --git a/src/iosource/pcap/Source.h b/src/iosource/pcap/Source.h index 03b75c1ca7..b9b61ac618 100644 --- a/src/iosource/pcap/Source.h +++ b/src/iosource/pcap/Source.h @@ -4,10 +4,6 @@ #define IOSOURCE_PKTSRC_PCAP_SOURCE_H #include "../PktSrc.h" -#include "BPF_Program.h" -#include "Dict.h" - -declare(PDict,BPF_Program); namespace iosource { namespace pktsrc { @@ -42,8 +38,6 @@ private: Stats stats; pcap_t *pd; - uint32 netmask; - PDict(BPF_Program) filters; struct pcap_pkthdr current_hdr; struct pcap_pkthdr last_hdr; From 121fcdbb5b922103e28e806896e2cd3e15803478 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 22 Aug 2014 19:56:27 -0500 Subject: [PATCH 028/106] Fix build on systems that already have ntohll/htonll BIT-1234 #close --- CMakeLists.txt | 2 ++ config.h.in | 3 +++ src/net_util.h | 5 +++++ 3 files changed, 10 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 080b731875..22d63a89d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -148,6 +148,8 @@ set(brodeps include(TestBigEndian) test_big_endian(WORDS_BIGENDIAN) +include(CheckSymbolExists) +check_symbol_exists(htonll arpa/inet.h HAVE_BYTEORDER_64) include(OSSpecific) include(CheckTypes) diff --git a/config.h.in b/config.h.in index d3889a2d90..755a9eee98 100644 --- a/config.h.in +++ b/config.h.in @@ -129,6 +129,9 @@ /* whether words are stored with the most significant byte first */ #cmakedefine WORDS_BIGENDIAN +/* whether htonll/ntohll is defined in */ +#cmakedefine HAVE_BYTEORDER_64 + /* ultrix can't hack const */ #cmakedefine NEED_ULTRIX_CONST_HACK #ifdef NEED_ULTRIX_CONST_HACK diff --git a/src/net_util.h b/src/net_util.h index 0f34335267..d68a7110ce 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -180,8 +180,11 @@ extern uint32 extract_uint32(const u_char* data); inline double ntohd(double d) { return d; } inline double htond(double d) { return d; } + +#ifndef HAVE_BYTEORDER_64 inline uint64 ntohll(uint64 i) { return i; } inline uint64 htonll(uint64 i) { return i; } +#endif #else @@ -207,6 +210,7 @@ inline double ntohd(double d) inline double htond(double d) { return ntohd(d); } +#ifndef HAVE_BYTEORDER_64 inline uint64 ntohll(uint64 i) { u_char c; @@ -224,6 +228,7 @@ inline uint64 ntohll(uint64 i) } inline uint64 htonll(uint64 i) { return ntohll(i); } +#endif #endif From 9232f05f5231681023553e8a5a1ab7a1cadec730 Mon Sep 17 00:00:00 2001 From: Jimmy Jones Date: Sat, 23 Aug 2014 15:05:20 +0100 Subject: [PATCH 029/106] Better documentation for sub_bytes --- src/strings.bif | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/strings.bif b/src/strings.bif index f50eb1f89b..4a30ca2aa4 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -308,7 +308,8 @@ function edit%(arg_s: string, arg_edit_char: string%): string ## ## s: The string to obtain a substring from. ## -## start: The starting position of the substring in *s* +## start: The starting position of the substring in *s*, where 1 is the first +## character. As a special case, 0 also represents the first character. ## ## n: The number of characters to extract, beginning at *start*. ## From 5f817513d098d2c0141ba74d48807a44d0131afa Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 22 Aug 2014 17:56:16 -0700 Subject: [PATCH 030/106] A set of various fixes and smaller API tweaks, plus tests. Also moving PCAP-related bifs to iosource/pcap.bif. --- TODO.iosources | 7 +- src/Net.cc | 25 ++-- src/bro.bif | 108 ------------------ src/iosource/BPF_Program.cc | 12 +- src/iosource/BPF_Program.h | 3 +- src/iosource/CMakeLists.txt | 4 + src/iosource/IOSource.h | 3 +- src/iosource/Manager.cc | 37 +++--- src/iosource/Manager.h | 3 + src/iosource/PktDumper.cc | 16 ++- src/iosource/PktDumper.h | 9 +- src/iosource/PktSrc.cc | 13 ++- src/iosource/PktSrc.h | 2 + src/iosource/pcap.bif | 104 +++++++++++++++++ src/iosource/pcap/Dumper.cc | 4 +- src/iosource/pcap/Dumper.h | 2 +- src/iosource/pcap/Plugin.cc | 4 +- src/iosource/pcap/Source.cc | 6 +- src/iosource/pcap/Source.h | 2 +- src/main.cc | 1 + .../btest/Baseline/core.pcap.dumper/output | 1 + .../core.pcap.dynamic-filter/conn.log | 25 ++++ .../Baseline/core.pcap.dynamic-filter/output | 30 +++++ .../Baseline/core.pcap.filter-error/output | 3 + .../Baseline/core.pcap.input-error/output2 | 2 + .../Baseline/core.pcap.pseudo-realtime/output | 1 + .../core.pcap.read-trace-with-filter/conn.log | 10 ++ .../packet_filter.log | 10 ++ testing/btest/core/pcap/dumper.bro | 5 + testing/btest/core/pcap/dynamic-filter.bro | 32 ++++++ testing/btest/core/pcap/filter-error.bro | 16 +++ testing/btest/core/pcap/input-error.bro | 14 +++ testing/btest/core/pcap/pseudo-realtime.bro | 42 +++++++ .../core/pcap/read-trace-with-filter.bro | 3 + 34 files changed, 395 insertions(+), 164 deletions(-) create mode 100644 src/iosource/pcap.bif create mode 100644 testing/btest/Baseline/core.pcap.dumper/output create mode 100644 testing/btest/Baseline/core.pcap.dynamic-filter/conn.log create mode 100644 testing/btest/Baseline/core.pcap.dynamic-filter/output create mode 100644 testing/btest/Baseline/core.pcap.filter-error/output create mode 100644 testing/btest/Baseline/core.pcap.input-error/output2 create mode 100644 testing/btest/Baseline/core.pcap.pseudo-realtime/output create mode 100644 testing/btest/Baseline/core.pcap.read-trace-with-filter/conn.log create mode 100644 testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log create mode 100644 testing/btest/core/pcap/dumper.bro create mode 100644 testing/btest/core/pcap/dynamic-filter.bro create mode 100644 testing/btest/core/pcap/filter-error.bro create mode 100644 testing/btest/core/pcap/input-error.bro create mode 100644 testing/btest/core/pcap/pseudo-realtime.bro create mode 100644 testing/btest/core/pcap/read-trace-with-filter.bro diff --git a/TODO.iosources b/TODO.iosources index e61e858087..ee19dea169 100644 --- a/TODO.iosources +++ b/TODO.iosources @@ -1,3 +1,4 @@ -- Wrap BPF_Program into namespace and clean up -- Add an interface for derived pkt source to run a BPF filter. -- Tests, in particular the packet dumping needs testing. +- Tests + - pktsrc plugin + - pktdump plugin + diff --git a/src/Net.cc b/src/Net.cc index 38ec7c3b06..554aa890ba 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -116,8 +116,7 @@ RETSIGTYPE watchdog(int /* signo */) pkt_dumper = iosource_mgr->OpenPktDumper("watchdog-pkt.pcap", false); if ( ! pkt_dumper || pkt_dumper->IsError() ) { - reporter->Error("watchdog: can't open watchdog-pkt.pcap for writing\n"); - delete pkt_dumper; + reporter->Error("watchdog: can't open watchdog-pkt.pcap for writing"); pkt_dumper = 0; } } @@ -167,9 +166,9 @@ void net_init(name_list& interfaces, name_list& readfiles, iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(readfiles[i], filter, false); assert(ps); - if ( ps->ErrorMsg() ) - reporter->FatalError("%s: problem with trace file %s - %s\n", - prog, readfiles[i], + if ( ! ps->IsOpen() ) + reporter->FatalError("problem with trace file %s (%s)", + readfiles[i], ps->ErrorMsg()); } } @@ -184,9 +183,9 @@ void net_init(name_list& interfaces, name_list& readfiles, iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(interfaces[i], filter, true); assert(ps); - if ( ps->ErrorMsg() ) - reporter->FatalError("%s: problem with interface %s - %s\n", - prog, interfaces[i], + if ( ! ps->IsOpen() ) + reporter->FatalError("problem with interface %s (%s)", + interfaces[i], ps->ErrorMsg()); } } @@ -203,9 +202,9 @@ void net_init(name_list& interfaces, name_list& readfiles, pkt_dumper = iosource_mgr->OpenPktDumper(writefile, false); assert(pkt_dumper); - if ( pkt_dumper->ErrorMsg().size() ) - reporter->FatalError("problem opening dump file %s - %s\n", - writefile, pkt_dumper->ErrorMsg().c_str()); + if ( ! pkt_dumper->IsOpen() ) + reporter->FatalError("problem opening dump file %s (%s)", + writefile, pkt_dumper->ErrorMsg()); ID* id = global_scope()->Lookup("trace_output_file"); if ( ! id ) @@ -409,7 +408,7 @@ void net_get_final_stats() { iosource::PktSrc::Stats s; ps->Statistics(&s); - reporter->Info("%d packets received on interface %s, %d dropped\n", + reporter->Info("%d packets received on interface %s, %d dropped", s.received, ps->Path().c_str(), s.dropped); } } @@ -430,8 +429,6 @@ void net_finish(int drain_events) sessions->Done(); } - delete pkt_dumper; - #ifdef DEBUG extern int reassem_seen_bytes, reassem_copied_bytes; // DEBUG_MSG("Reassembly (TCP and IP/Frag): %d bytes seen, %d bytes copied\n", diff --git a/src/bro.bif b/src/bro.bif index adf5e3d6f0..1255f05f50 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -4228,114 +4228,6 @@ function enable_raw_output%(f: file%): any # # =========================================================================== -## Precompiles a PCAP filter and binds it to a given identifier. -## -## id: The PCAP identifier to reference the filter *s* later on. -## -## s: The PCAP filter. See ``man tcpdump`` for valid expressions. -## -## Returns: True if *s* is valid and precompiles successfully. -## -## .. bro:see:: install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## pcap_error -function precompile_pcap_filter%(id: PcapFilterID, s: string%): bool - %{ - bool success = true; - - const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - - for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); - i != pkt_srcs.end(); i++ ) - { - iosource::PktSrc* ps = *i; - - if ( ! ps->PrecompileFilter(id->ForceAsInt(), - s->CheckString()) ) - { - reporter->Error("precompile_pcap_filter: %s", - ps->ErrorMsg()); - success = false; - } - } - - return new Val(success, TYPE_BOOL); - %} - -## Installs a PCAP filter that has been precompiled with -## :bro:id:`precompile_pcap_filter`. -## -## id: The PCAP filter id of a precompiled filter. -## -## Returns: True if the filter associated with *id* has been installed -## successfully. -## -## .. bro:see:: precompile_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## pcap_error -function install_pcap_filter%(id: PcapFilterID%): bool - %{ - bool success = true; - - const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - - for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); - i != pkt_srcs.end(); i++ ) - { - iosource::PktSrc* ps = *i; - - if ( ! ps->SetFilter(id->ForceAsInt()) ) - success = false; - } - - return new Val(success, TYPE_BOOL); - %} - -## Returns a string representation of the last PCAP error. -## -## Returns: A descriptive error message of the PCAP function that failed. -## -## .. bro:see:: precompile_pcap_filter -## install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -function pcap_error%(%): string - %{ - const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - - for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); - i != pkt_srcs.end(); i++ ) - { - iosource::PktSrc* ps = *i; - - const char* err = ps->ErrorMsg(); - if ( *err ) - return new StringVal(err); - } - - return new StringVal("no error"); - %} - ## Installs a filter to drop packets from a given IP source address with ## a certain probability if none of a given set of TCP flags are set. ## Note that for IPv6 packets with a Destination options header that has diff --git a/src/iosource/BPF_Program.cc b/src/iosource/BPF_Program.cc index 5260429eb0..8df7729ad1 100644 --- a/src/iosource/BPF_Program.cc +++ b/src/iosource/BPF_Program.cc @@ -91,7 +91,8 @@ bool BPF_Program::Compile(pcap_t* pcap, const char* filter, uint32 netmask, } bool BPF_Program::Compile(int snaplen, int linktype, const char* filter, - uint32 netmask, char* errbuf, bool optimize) + uint32 netmask, char* errbuf, unsigned int errbuf_len, + bool optimize) { FreeCode(); @@ -99,13 +100,18 @@ bool BPF_Program::Compile(int snaplen, int linktype, const char* filter, char my_error[PCAP_ERRBUF_SIZE]; int err = pcap_compile_nopcap(snaplen, linktype, &m_program, - (char *) filter, optimize, netmask, error); + (char *) filter, optimize, netmask, my_error); if ( err < 0 && errbuf ) - safe_strncpy(errbuf, my_errbuf, PCAP_ERRBUF_SIZE); + safe_strncpy(errbuf, my_error, errbuf_len); + *errbuf = '\0'; #else int err = pcap_compile_nopcap(snaplen, linktype, &m_program, (char*) filter, optimize, netmask); + + if ( err < 0 && errbuf && errbuf_len ) + *errbuf = '\0'; #endif + if ( err == 0 ) m_compiled = true; diff --git a/src/iosource/BPF_Program.h b/src/iosource/BPF_Program.h index 88ed669da2..3efa212bbc 100644 --- a/src/iosource/BPF_Program.h +++ b/src/iosource/BPF_Program.h @@ -30,7 +30,8 @@ public: // similarly to pcap_compile_nopcap(). Parameters are // similar. Returns true on success. bool Compile(int snaplen, int linktype, const char* filter, - uint32 netmask, char* errbuf = 0, bool optimize = true); + uint32 netmask, char* errbuf = 0, unsigned int errbuf_len = 0, + bool optimize = true); // Returns true if this program currently contains compiled // code, false otherwise. diff --git a/src/iosource/CMakeLists.txt b/src/iosource/CMakeLists.txt index 272ced7740..a36667aee7 100644 --- a/src/iosource/CMakeLists.txt +++ b/src/iosource/CMakeLists.txt @@ -6,6 +6,8 @@ include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR} ) +add_subdirectory(pcap) + set(iosource_SRCS BPF_Program.cc Component.cc @@ -14,6 +16,8 @@ set(iosource_SRCS PktSrc.cc ) +bif_target(pcap.bif) + bro_add_subdir_library(iosource ${iosource_SRCS}) add_dependencies(bro_iosource generate_outputs) diff --git a/src/iosource/IOSource.h b/src/iosource/IOSource.h index 3419152a9a..9083e8f4f1 100644 --- a/src/iosource/IOSource.h +++ b/src/iosource/IOSource.h @@ -14,7 +14,7 @@ namespace iosource { */ class IOSource { public: - IOSource() { idle = closed = false; } + IOSource() { idle = false; closed = false; } virtual ~IOSource() {} // Returns true if source has nothing ready to process. @@ -57,7 +57,6 @@ protected: // Derived classed are to set this to true if they have gone dry // temporarily. void SetIdle(bool is_idle) { idle = is_idle; } - // Derived classed are to set this to true if they have gone dry // temporarily. diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index e2be297a77..63b6888801 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -22,11 +22,19 @@ Manager::~Manager() { for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) { - (*i)->src->Done(); + // ??? (*i)->src->Done(); delete *i; } sources.clear(); + + for ( PktDumperList::iterator i = pkt_dumpers.begin(); i != pkt_dumpers.end(); ++i ) + { + (*i)->Done(); + delete *i; + } + + pkt_dumpers.clear(); } void Manager::RemoveAll() @@ -43,6 +51,7 @@ IOSource* Manager::FindSoonest(double* ts) i != sources.end(); ++i ) if ( ! (*i)->src->IsOpen() ) { + (*i)->src->Done(); delete *i; sources.erase(i); break; @@ -246,15 +255,11 @@ PktSrc* Manager::OpenPktSrc(const std::string& path, const std::string& filter, // Instantiate packet source. PktSrc* ps = (*component->Factory())(npath, filter, is_live); + assert(ps); - if ( ! (ps && ps->IsOpen()) ) - { - string type = (is_live ? "interface" : "trace file"); - string pserr = ps->ErrorMsg() ? (string(" - ") + ps->ErrorMsg()) : ""; - - reporter->FatalError("%s: problem with %s %s%s", - prog, npath.c_str(), type.c_str(), pserr.c_str()); - } + if ( ! ps->IsOpen() && ps->ErrorMsg() ) + // Set an error message if it didn't open successfully. + ps->Error("could not open"); DBG_LOG(DBG_PKTIO, "Created packet source of type %s for %s", component->Name().c_str(), npath.c_str()); @@ -291,16 +296,16 @@ PktDumper* Manager::OpenPktDumper(const string& path, bool append) // Instantiate packet dumper. PktDumper* pd = (*component->Factory())(npath, append); + assert(pd); - if ( ! (pd && pd->IsOpen()) ) - { - string pderr = pd->ErrorMsg().size() ? (string(" - ") + pd->ErrorMsg()) : ""; - - reporter->FatalError("%s: can't open write file \"%s\"%s", - prog, npath.c_str(), pderr.c_str()); - } + if ( ! pd->IsOpen() && pd->ErrorMsg() ) + // Set an error message if it didn't open successfully. + pd->Error("could not open"); DBG_LOG(DBG_PKTIO, "Created packer dumper of type %s for %s", component->Name().c_str(), npath.c_str()); + pd->Init(); + pkt_dumpers.push_back(pd); + return pd; } diff --git a/src/iosource/Manager.h b/src/iosource/Manager.h index 5a3a58d798..4198c73680 100644 --- a/src/iosource/Manager.h +++ b/src/iosource/Manager.h @@ -64,7 +64,10 @@ protected: typedef std::list SourceList; SourceList sources; + typedef std::list PktDumperList; + PktSrcList pkt_srcs; + PktDumperList pkt_dumpers; }; } diff --git a/src/iosource/PktDumper.cc b/src/iosource/PktDumper.cc index 3c8595e11c..a4bc3a82f8 100644 --- a/src/iosource/PktDumper.cc +++ b/src/iosource/PktDumper.cc @@ -20,6 +20,16 @@ PktDumper::~PktDumper() { } +void PktDumper::Init() + { + Open(); + } + +void PktDumper::Done() + { + Close(); + } + const std::string& PktDumper::Path() const { return props.path; @@ -40,9 +50,9 @@ bool PktDumper::IsError() const return errmsg.size(); } -const std::string& PktDumper::ErrorMsg() const +const char* PktDumper::ErrorMsg() const { - return errmsg; + return errmsg.size() ? errmsg.c_str() : 0; } int PktDumper::HdrSize() const @@ -60,8 +70,8 @@ void PktDumper::Opened(const Properties& arg_props) void PktDumper::Closed() { is_open = false; - props.path = ""; DBG_LOG(DBG_PKTIO, "Closed dumper %s", props.path.c_str()); + props.path = ""; } void PktDumper::Error(const std::string& msg) diff --git a/src/iosource/PktDumper.h b/src/iosource/PktDumper.h index 85c64ffd1d..d8201f977c 100644 --- a/src/iosource/PktDumper.h +++ b/src/iosource/PktDumper.h @@ -21,16 +21,18 @@ public: bool IsOpen() const; double OpenTime() const; bool IsError() const; - const std::string& ErrorMsg() const; + const char* ErrorMsg() const; int HdrSize() const; bool Record(const Packet* pkt); - // PktSrc interface for derived classes to implement. + // PktDumper interface for derived classes to implement. virtual void Close() = 0; virtual void Open() = 0; virtual bool Dump(const Packet* pkt) = 0; protected: + friend class Manager; + // Methods to use by derived classed. // struct Properties { @@ -39,6 +41,9 @@ protected: double open_time; }; + void Init(); + void Done(); + void Opened(const Properties& props); void Closed(); void Error(const std::string& msg); diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc index 2e056a10ba..30f58a5b3f 100644 --- a/src/iosource/PktSrc.cc +++ b/src/iosource/PktSrc.cc @@ -17,6 +17,7 @@ PktSrc::PktSrc() { have_packet = false; errbuf = ""; + SetClosed(true); next_sync_point = 0; first_timestamp = 0.0; @@ -195,7 +196,8 @@ void PktSrc::Init() void PktSrc::Done() { - Close(); + if ( IsOpen() ) + Close(); } void PktSrc::GetFds(int* read, int* write, int* except) @@ -433,8 +435,13 @@ int PktSrc::PrecompileBPFFilter(int index, const std::string& filter) if ( ! code->Compile(SnapLen(), LinkType(), filter.c_str(), Netmask(), errbuf, sizeof(errbuf)) ) { - Error(fmt("cannot compile BPF filter \"%s\": %s", filter.c_str(), errbuf)); - Close(); + string msg = fmt("cannot compile BPF filter \"%s\"", filter.c_str()); + + if ( *errbuf ) + msg += ": " + string(errbuf); + + Error(msg); + delete code; return 0; } diff --git a/src/iosource/PktSrc.h b/src/iosource/PktSrc.h index 5dd51ab5f2..72e1a0da8c 100644 --- a/src/iosource/PktSrc.h +++ b/src/iosource/PktSrc.h @@ -83,6 +83,8 @@ public: static int GetLinkHeaderSize(int link_type); protected: + friend class Manager; + // Methods to use by derived classes. struct Properties { diff --git a/src/iosource/pcap.bif b/src/iosource/pcap.bif new file mode 100644 index 0000000000..ee4e1e6c06 --- /dev/null +++ b/src/iosource/pcap.bif @@ -0,0 +1,104 @@ + +## Precompiles a PCAP filter and binds it to a given identifier. +## +## id: The PCAP identifier to reference the filter *s* later on. +## +## s: The PCAP filter. See ``man tcpdump`` for valid expressions. +## +## Returns: True if *s* is valid and precompiles successfully. +## +## .. bro:see:: install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## pcap_error +function precompile_pcap_filter%(id: PcapFilterID, s: string%): bool + %{ + bool success = true; + + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + if ( ! ps->PrecompileFilter(id->ForceAsInt(), + s->CheckString()) ) + success = false; + } + + return new Val(success, TYPE_BOOL); + %} + +## Installs a PCAP filter that has been precompiled with +## :bro:id:`precompile_pcap_filter`. +## +## id: The PCAP filter id of a precompiled filter. +## +## Returns: True if the filter associated with *id* has been installed +## successfully. +## +## .. bro:see:: precompile_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## pcap_error +function install_pcap_filter%(id: PcapFilterID%): bool + %{ + bool success = true; + + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + if ( ! ps->SetFilter(id->ForceAsInt()) ) + success = false; + } + + return new Val(success, TYPE_BOOL); + %} + +## Returns a string representation of the last PCAP error. +## +## Returns: A descriptive error message of the PCAP function that failed. +## +## .. bro:see:: precompile_pcap_filter +## install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +function pcap_error%(%): string + %{ + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + const char* err = ps->ErrorMsg(); + if ( *err ) + return new StringVal(err); + } + + return new StringVal("no error"); + %} diff --git a/src/iosource/pcap/Dumper.cc b/src/iosource/pcap/Dumper.cc index 2210e08e64..5d0b5e599b 100644 --- a/src/iosource/pcap/Dumper.cc +++ b/src/iosource/pcap/Dumper.cc @@ -5,9 +5,9 @@ #include "Dumper.h" #include "../PktSrc.h" -#include "../../../Net.h" +#include "../../Net.h" -using namespace iosource::pktsrc; +using namespace iosource::pcap; PcapDumper::PcapDumper(const std::string& path, bool arg_append) { diff --git a/src/iosource/pcap/Dumper.h b/src/iosource/pcap/Dumper.h index c2762a2b04..8013afcb8e 100644 --- a/src/iosource/pcap/Dumper.h +++ b/src/iosource/pcap/Dumper.h @@ -9,7 +9,7 @@ extern "C" { #include "../PktDumper.h" namespace iosource { -namespace pktsrc { +namespace pcap { class PcapDumper : public PktDumper { public: diff --git a/src/iosource/pcap/Plugin.cc b/src/iosource/pcap/Plugin.cc index a412e4f650..f0490e6e3d 100644 --- a/src/iosource/pcap/Plugin.cc +++ b/src/iosource/pcap/Plugin.cc @@ -12,8 +12,8 @@ class Plugin : public plugin::Plugin { public: plugin::Configuration Configure() { - AddComponent(new ::iosource::pktsrc::SourceComponent("PcapReader", "pcap", ::iosource::pktsrc::SourceComponent::BOTH, ::iosource::pktsrc::PcapSource::Instantiate)); - AddComponent(new ::iosource::pktsrc::DumperComponent("PcapWriter", "pcap", ::iosource::pktsrc::PcapDumper::Instantiate)); + AddComponent(new ::iosource::PktSrcComponent("PcapReader", "pcap", ::iosource::PktSrcComponent::BOTH, ::iosource::pcap::PcapSource::Instantiate)); + AddComponent(new ::iosource::PktDumperComponent("PcapWriter", "pcap", ::iosource::pcap::PcapDumper::Instantiate)); plugin::Configuration config; config.name = "Bro::Pcap"; diff --git a/src/iosource/pcap/Source.cc b/src/iosource/pcap/Source.cc index 67a73fbd8a..79ded790bd 100644 --- a/src/iosource/pcap/Source.cc +++ b/src/iosource/pcap/Source.cc @@ -9,7 +9,7 @@ #include #endif -using namespace iosource::pktsrc; +using namespace iosource::pcap; PcapSource::~PcapSource() { @@ -182,7 +182,7 @@ void PcapSource::DoneWithPacket(Packet* pkt) int PcapSource::PrecompileFilter(int index, const std::string& filter) { - return PktSrc::PrecompileBPFFilter(index, filter). + return PktSrc::PrecompileBPFFilter(index, filter); } int PcapSource::SetFilter(int index) @@ -192,7 +192,7 @@ int PcapSource::SetFilter(int index) char errbuf[PCAP_ERRBUF_SIZE]; - BPF_Program* code = GetFilter(index); + BPF_Program* code = GetBPFFilter(index); if ( ! code ) { diff --git a/src/iosource/pcap/Source.h b/src/iosource/pcap/Source.h index b9b61ac618..039bdec81a 100644 --- a/src/iosource/pcap/Source.h +++ b/src/iosource/pcap/Source.h @@ -6,7 +6,7 @@ #include "../PktSrc.h" namespace iosource { -namespace pktsrc { +namespace pcap { class PcapSource : public iosource::PktSrc { public: diff --git a/src/main.cc b/src/main.cc index 5066ef85ee..295fa79ee7 100644 --- a/src/main.cc +++ b/src/main.cc @@ -390,6 +390,7 @@ void terminate_bro() delete plugin_mgr; delete thread_mgr; delete reporter; + delete iosource_mgr; reporter = 0; } diff --git a/testing/btest/Baseline/core.pcap.dumper/output b/testing/btest/Baseline/core.pcap.dumper/output new file mode 100644 index 0000000000..1055e73ebe --- /dev/null +++ b/testing/btest/Baseline/core.pcap.dumper/output @@ -0,0 +1 @@ +00000010 ff ff 00 00 01 00 00 00 1d a2 b2 4e 73 00 07 00 | | 00000010 00 20 00 00 01 00 00 00 1d a2 b2 4e 73 00 07 00 | diff --git a/testing/btest/Baseline/core.pcap.dynamic-filter/conn.log b/testing/btest/Baseline/core.pcap.dynamic-filter/conn.log new file mode 100644 index 0000000000..f42999c4fa --- /dev/null +++ b/testing/btest/Baseline/core.pcap.dynamic-filter/conn.log @@ -0,0 +1,25 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-08-24-15-51-55 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count set[string] +1300475167.096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns - - - S0 - 0 D 1 73 0 0 (empty) +1300475168.853899 CCvvfg3TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.854378 CsRx2w45OKnoww6xl4 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.854837 CRJuHdVW0XPVINV8a 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.857956 CPbrpk1qSsw6ESzHV4 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.858306 C6pKV8GSxOnSLghOa 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.858713 CIPOse170MGiRM1Qf4 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.891644 C7XEbhP654jzLoe3a 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.892037 CJ3xTn1c4Zw9TmAE05 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.892414 CMXxB5GvmoxJFXdTa 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.893988 Caby8b1slFea8xwSmb 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.894422 Che1bq3i2rO3KD1Syg 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.894787 C3SfNE4BWaU4aSuwkc 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.901749 CEle3f3zno26fFZkrh 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 36 131 SF - 0 Dd 1 64 1 159 (empty) +1300475168.902195 CwSkQu4eWZCH7OONC1 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 36 198 SF - 0 Dd 1 64 1 226 (empty) +1300475168.652003 CjhGID4nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp - - - - OTH - 0 D 1 515 0 0 (empty) +#close 2014-08-24-15-51-55 diff --git a/testing/btest/Baseline/core.pcap.dynamic-filter/output b/testing/btest/Baseline/core.pcap.dynamic-filter/output new file mode 100644 index 0000000000..e8be0b0195 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.dynamic-filter/output @@ -0,0 +1,30 @@ +1, [orig_h=141.142.220.202, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp] +2, [orig_h=141.142.220.118, orig_p=35634/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +3, [orig_h=141.142.220.118, orig_p=43927/udp, resp_h=141.142.2.2, resp_p=53/udp] +4, [orig_h=141.142.220.118, orig_p=43927/udp, resp_h=141.142.2.2, resp_p=53/udp] +5, [orig_h=141.142.220.118, orig_p=37676/udp, resp_h=141.142.2.2, resp_p=53/udp] +6, [orig_h=141.142.220.118, orig_p=37676/udp, resp_h=141.142.2.2, resp_p=53/udp] +7, [orig_h=141.142.220.118, orig_p=40526/udp, resp_h=141.142.2.2, resp_p=53/udp] +8, [orig_h=141.142.220.118, orig_p=40526/udp, resp_h=141.142.2.2, resp_p=53/udp] +9, [orig_h=141.142.220.118, orig_p=32902/udp, resp_h=141.142.2.2, resp_p=53/udp] +10, [orig_h=141.142.220.118, orig_p=32902/udp, resp_h=141.142.2.2, resp_p=53/udp] +11, [orig_h=141.142.220.118, orig_p=59816/udp, resp_h=141.142.2.2, resp_p=53/udp] +12, [orig_h=141.142.220.118, orig_p=59816/udp, resp_h=141.142.2.2, resp_p=53/udp] +13, [orig_h=141.142.220.118, orig_p=59714/udp, resp_h=141.142.2.2, resp_p=53/udp] +14, [orig_h=141.142.220.118, orig_p=59714/udp, resp_h=141.142.2.2, resp_p=53/udp] +15, [orig_h=141.142.220.118, orig_p=58206/udp, resp_h=141.142.2.2, resp_p=53/udp] +16, [orig_h=141.142.220.118, orig_p=58206/udp, resp_h=141.142.2.2, resp_p=53/udp] +17, [orig_h=141.142.220.118, orig_p=38911/udp, resp_h=141.142.2.2, resp_p=53/udp] +18, [orig_h=141.142.220.118, orig_p=38911/udp, resp_h=141.142.2.2, resp_p=53/udp] +19, [orig_h=141.142.220.118, orig_p=59746/udp, resp_h=141.142.2.2, resp_p=53/udp] +20, [orig_h=141.142.220.118, orig_p=59746/udp, resp_h=141.142.2.2, resp_p=53/udp] +21, [orig_h=141.142.220.118, orig_p=45000/udp, resp_h=141.142.2.2, resp_p=53/udp] +22, [orig_h=141.142.220.118, orig_p=45000/udp, resp_h=141.142.2.2, resp_p=53/udp] +23, [orig_h=141.142.220.118, orig_p=48479/udp, resp_h=141.142.2.2, resp_p=53/udp] +24, [orig_h=141.142.220.118, orig_p=48479/udp, resp_h=141.142.2.2, resp_p=53/udp] +25, [orig_h=141.142.220.118, orig_p=48128/udp, resp_h=141.142.2.2, resp_p=53/udp] +26, [orig_h=141.142.220.118, orig_p=48128/udp, resp_h=141.142.2.2, resp_p=53/udp] +27, [orig_h=141.142.220.118, orig_p=56056/udp, resp_h=141.142.2.2, resp_p=53/udp] +28, [orig_h=141.142.220.118, orig_p=56056/udp, resp_h=141.142.2.2, resp_p=53/udp] +29, [orig_h=141.142.220.118, orig_p=55092/udp, resp_h=141.142.2.2, resp_p=53/udp] +30, [orig_h=141.142.220.118, orig_p=55092/udp, resp_h=141.142.2.2, resp_p=53/udp] diff --git a/testing/btest/Baseline/core.pcap.filter-error/output b/testing/btest/Baseline/core.pcap.filter-error/output new file mode 100644 index 0000000000..82804bb483 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.filter-error/output @@ -0,0 +1,3 @@ +fatal error in /home/robin/bro/master/scripts/base/frameworks/packet-filter/./main.bro, line 282: Bad pcap filter 'kaputt' +---- +error, cannot compile BPF filter "kaputt, too" diff --git a/testing/btest/Baseline/core.pcap.input-error/output2 b/testing/btest/Baseline/core.pcap.input-error/output2 new file mode 100644 index 0000000000..74666797b9 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.input-error/output2 @@ -0,0 +1,2 @@ +fatal error: problem with interface NO_SUCH_INTERFACE +fatal error: problem with trace file NO_SUCH_TRACE (NO_SUCH_TRACE: No such file or directory) diff --git a/testing/btest/Baseline/core.pcap.pseudo-realtime/output b/testing/btest/Baseline/core.pcap.pseudo-realtime/output new file mode 100644 index 0000000000..d708959ce9 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.pseudo-realtime/output @@ -0,0 +1 @@ +real time matches trace time diff --git a/testing/btest/Baseline/core.pcap.read-trace-with-filter/conn.log b/testing/btest/Baseline/core.pcap.read-trace-with-filter/conn.log new file mode 100644 index 0000000000..8522d69ae6 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.read-trace-with-filter/conn.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-08-23-18-29-48 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count set[string] +1300475168.892936 CXWv6p3arKYeMETxOg 141.142.220.118 50000 208.80.152.3 80 tcp http 0.229603 1148 734 S1 - 0 ShADad 6 1468 4 950 (empty) +#close 2014-08-23-18-29-48 diff --git a/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log b/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log new file mode 100644 index 0000000000..75b09c608a --- /dev/null +++ b/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path packet_filter +#open 2014-08-23-18-29-48 +#fields ts node filter init success +#types time string string bool bool +1408818588.510297 bro port 50000 T T +#close 2014-08-23-18-29-48 diff --git a/testing/btest/core/pcap/dumper.bro b/testing/btest/core/pcap/dumper.bro new file mode 100644 index 0000000000..9cf5cbe71c --- /dev/null +++ b/testing/btest/core/pcap/dumper.bro @@ -0,0 +1,5 @@ +# @TEST-EXEC: bro -r $TRACES/workshop_2011_browse.trace -w dump +# @TEST-EXEC: hexdump -C $TRACES/workshop_2011_browse.trace >1 +# @TEST-EXEC: hexdump -C dump >2 +# @TEST-EXEC: sdiff -s 1 2 >output || true +# @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/pcap/dynamic-filter.bro b/testing/btest/core/pcap/dynamic-filter.bro new file mode 100644 index 0000000000..012858fa65 --- /dev/null +++ b/testing/btest/core/pcap/dynamic-filter.bro @@ -0,0 +1,32 @@ +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff conn.log + +redef enum PcapFilterID += { A, B }; + +global cnt = 0; + +event new_packet(c: connection, p: pkt_hdr) + { + ++cnt; + + print cnt, c$id; + + if ( cnt == 1 ) + if ( ! install_pcap_filter(A) ) + print "error 3"; + + if ( cnt == 2 ) + if ( ! install_pcap_filter(B) ) + print "error 4"; + } + +event bro_init() + { + if ( ! precompile_pcap_filter(A, "port 80") ) + print "error 1"; + + if ( ! precompile_pcap_filter(B, "port 53") ) + print "error 2"; + } + diff --git a/testing/btest/core/pcap/filter-error.bro b/testing/btest/core/pcap/filter-error.bro new file mode 100644 index 0000000000..1f8ad7a464 --- /dev/null +++ b/testing/btest/core/pcap/filter-error.bro @@ -0,0 +1,16 @@ +# @TEST-EXEC-FAIL: bro -r $TRACES/workshop_2011_browse.trace -f "kaputt" >>output 2>&1 +# @TEST-EXEC-FAIL: test -e conn.log +# @TEST-EXEC: echo ---- >>output +# @TEST-EXEC: bro -r $TRACES/workshop_2011_browse.trace %INPUT >>output 2>&1 +# @TEST-EXEC: test -e conn.log +# @TEST-EXEC: btest-diff output + +redef enum PcapFilterID += { A }; + +event bro_init() + { + if ( ! precompile_pcap_filter(A, "kaputt, too") ) + print "error", pcap_error(); + } + + diff --git a/testing/btest/core/pcap/input-error.bro b/testing/btest/core/pcap/input-error.bro new file mode 100644 index 0000000000..2a0787c832 --- /dev/null +++ b/testing/btest/core/pcap/input-error.bro @@ -0,0 +1,14 @@ +# @TEST-EXEC-FAIL: bro -i NO_SUCH_INTERFACE 2>&1 >>output 2>&1 +# @TEST-EXEC: cat output | sed 's/(.*)//g' >output2 +# @TEST-EXEC-FAIL: bro -r NO_SUCH_TRACE 2>&1 >>output2 2>&1 +# @TEST-EXEC: btest-diff output2 + +redef enum PcapFilterID += { A }; + +event bro_init() + { + if ( ! precompile_pcap_filter(A, "kaputt, too") ) + print "error", pcap_error(); + } + + diff --git a/testing/btest/core/pcap/pseudo-realtime.bro b/testing/btest/core/pcap/pseudo-realtime.bro new file mode 100644 index 0000000000..625706f321 --- /dev/null +++ b/testing/btest/core/pcap/pseudo-realtime.bro @@ -0,0 +1,42 @@ +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT --pseudo-realtime >output +# @TEST-EXEC: btest-diff output + +global init = F; +global last_network = network_time(); +global last_current = current_time(); +global cnt = 0; +global an = 0secs; +global ac = 0secs; + +event new_packet(c: connection, p: pkt_hdr) + { + local tn = network_time(); + local tc = current_time(); + local dn = tn - last_network; + local dc = tc - last_current; + + last_network = tn; + last_current = tc; + ++cnt; + + if ( ! init ) + { + init = T; + return; + } + + an += dn; + ac += dc; + + # print fmt("num=%d agg_delta_network=%.1f agg_delta_real=%.1f", cnt, an, ac); + } + +event bro_done() + { + local d = (an - ac); + if ( d < 0 secs) + d = -d; + + print fmt("real time %s trace time", d < 1.0secs ? "matches" : "does NOT match"); + } + diff --git a/testing/btest/core/pcap/read-trace-with-filter.bro b/testing/btest/core/pcap/read-trace-with-filter.bro new file mode 100644 index 0000000000..5878bada64 --- /dev/null +++ b/testing/btest/core/pcap/read-trace-with-filter.bro @@ -0,0 +1,3 @@ +# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace -f "port 50000" +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff packet_filter.log From d5513a07575b990a87e994a3ae4873bc02adaf69 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 26 Aug 2014 17:50:28 -0500 Subject: [PATCH 031/106] Improve multipart HTTP/MIME entity file analysis. Singular CR or LF characters in multipart body content are no longer converted to a full CRLF (thus corrupting the file) and it also no longer considers the CRLF before the multipart boundary as part of the content. Addresses BIT-1235. --- src/analyzer/protocol/http/HTTP.cc | 21 +++++++++++++ src/analyzer/protocol/http/HTTP.h | 2 ++ src/analyzer/protocol/mime/MIME.cc | 26 ++++++++++++++-- src/analyzer/protocol/mime/MIME.h | 1 + src/analyzer/protocol/tcp/ContentLine.cc | 7 +++-- src/analyzer/protocol/tcp/ContentLine.h | 5 ++++ .../1-file | 2 +- .../2-file | 2 +- .../3-file | 2 +- .../out | 30 +++++++++---------- .../out | 8 ++--- .../thefile0 | 1 - .../all-events.log | 6 ++-- 13 files changed, 82 insertions(+), 31 deletions(-) diff --git a/src/analyzer/protocol/http/HTTP.cc b/src/analyzer/protocol/http/HTTP.cc index 02b6947b9f..857cb35980 100644 --- a/src/analyzer/protocol/http/HTTP.cc +++ b/src/analyzer/protocol/http/HTTP.cc @@ -463,6 +463,20 @@ void HTTP_Entity::SubmitAllHeaders() if ( DEBUG_http ) DEBUG_MSG("%.6f end of headers\n", network_time); + if ( Parent() && + Parent()->MIMEContentType() == mime::CONTENT_TYPE_MULTIPART ) + { + // Don't treat single \r or \n characters in the multipart body content + // as lines because the MIME_Entity code will implicitly add back a + // \r\n for each line it receives. We do this instead of setting + // plain delivery mode for the content line analyzer because + // the size of the content to deliver "plainly" may be unknown + // and just leaving it in that mode indefinitely screws up the + // detection of multipart boundaries. + http_message->content_line->SupressWeirds(true); + http_message->content_line->SetCRLFAsEOL(0); + } + // The presence of a message-body in a request is signaled by // the inclusion of a Content-Length or Transfer-Encoding // header field in the request's message-headers. @@ -664,6 +678,13 @@ void HTTP_Message::EndEntity(mime::MIME_Entity* entity) current_entity = (HTTP_Entity*) entity->Parent(); + if ( entity->Parent() && + entity->Parent()->MIMEContentType() == mime::CONTENT_TYPE_MULTIPART ) + { + content_line->SupressWeirds(false); + content_line->SetCRLFAsEOL(); + } + // It is necessary to call Done when EndEntity is triggered by // SubmitAllHeaders (through EndOfData). if ( entity == top_level ) diff --git a/src/analyzer/protocol/http/HTTP.h b/src/analyzer/protocol/http/HTTP.h index 5785d93198..075e6f4dba 100644 --- a/src/analyzer/protocol/http/HTTP.h +++ b/src/analyzer/protocol/http/HTTP.h @@ -99,6 +99,8 @@ enum { // HTTP_MessageDone -> {Request,Reply}Made class HTTP_Message : public mime::MIME_Message { +friend class HTTP_Entity; + public: HTTP_Message(HTTP_Analyzer* analyzer, tcp::ContentLine_Analyzer* cl, bool is_orig, int expect_body, int64_t init_header_length); diff --git a/src/analyzer/protocol/mime/MIME.cc b/src/analyzer/protocol/mime/MIME.cc index 6f992c9256..0e54fb7826 100644 --- a/src/analyzer/protocol/mime/MIME.cc +++ b/src/analyzer/protocol/mime/MIME.cc @@ -552,6 +552,7 @@ void MIME_Entity::init() data_buf_offset = -1; message = 0; + delay_adding_implicit_CRLF = false; } MIME_Entity::~MIME_Entity() @@ -1005,12 +1006,33 @@ void MIME_Entity::DecodeDataLine(int len, const char* data, int trailing_CRLF) void MIME_Entity::DecodeBinary(int len, const char* data, int trailing_CRLF) { + if ( delay_adding_implicit_CRLF ) + { + delay_adding_implicit_CRLF = false; + DataOctet(CR); + DataOctet(LF); + } + DataOctets(len, data); if ( trailing_CRLF ) { - DataOctet(CR); - DataOctet(LF); + if ( Parent() && + Parent()->MIMEContentType() == mime::CONTENT_TYPE_MULTIPART ) + { + // For multipart body content, we want to keep all implicit CRLFs + // except for the last because that one belongs to the multipart + // boundary delimiter, not the content. Simply delaying the + // addition of implicit CRLFs until another chunk of content + // data comes in is a way to prevent the CRLF before the final + // message boundary from being accidentally added to the content. + delay_adding_implicit_CRLF = true; + } + else + { + DataOctet(CR); + DataOctet(LF); + } } } diff --git a/src/analyzer/protocol/mime/MIME.h b/src/analyzer/protocol/mime/MIME.h index 2b2f88105d..1790d0faaa 100644 --- a/src/analyzer/protocol/mime/MIME.h +++ b/src/analyzer/protocol/mime/MIME.h @@ -172,6 +172,7 @@ protected: int data_buf_offset; MIME_Message* message; + bool delay_adding_implicit_CRLF; }; // The reason I separate MIME_Message as an abstract class is to diff --git a/src/analyzer/protocol/tcp/ContentLine.cc b/src/analyzer/protocol/tcp/ContentLine.cc index 72314dd45d..f5dd7aaf07 100644 --- a/src/analyzer/protocol/tcp/ContentLine.cc +++ b/src/analyzer/protocol/tcp/ContentLine.cc @@ -32,6 +32,7 @@ void ContentLine_Analyzer::InitState() seq_to_skip = 0; plain_delivery_length = 0; is_plain = 0; + suppress_weirds = false; InitBuffer(0); } @@ -258,7 +259,7 @@ int ContentLine_Analyzer::DoDeliverOnce(int len, const u_char* data) else { - if ( Conn()->FlagEvent(SINGULAR_LF) ) + if ( ! suppress_weirds && Conn()->FlagEvent(SINGULAR_LF) ) Conn()->Weird("line_terminated_with_single_LF"); buf[offset++] = c; } @@ -277,7 +278,7 @@ int ContentLine_Analyzer::DoDeliverOnce(int len, const u_char* data) } if ( last_char == '\r' ) - if ( Conn()->FlagEvent(SINGULAR_CR) ) + if ( ! suppress_weirds && Conn()->FlagEvent(SINGULAR_CR) ) Conn()->Weird("line_terminated_with_single_CR"); last_char = c; @@ -307,7 +308,7 @@ void ContentLine_Analyzer::CheckNUL() ; // Ignore it. else { - if ( Conn()->FlagEvent(NUL_IN_LINE) ) + if ( ! suppress_weirds && Conn()->FlagEvent(NUL_IN_LINE) ) Conn()->Weird("NUL_in_line"); flag_NULs = 0; } diff --git a/src/analyzer/protocol/tcp/ContentLine.h b/src/analyzer/protocol/tcp/ContentLine.h index 93c473c47c..7a5a6b996e 100644 --- a/src/analyzer/protocol/tcp/ContentLine.h +++ b/src/analyzer/protocol/tcp/ContentLine.h @@ -15,6 +15,9 @@ public: ContentLine_Analyzer(Connection* conn, bool orig); ~ContentLine_Analyzer(); + void SupressWeirds(bool enable) + { suppress_weirds = enable; } + // If enabled, flag (first) line with embedded NUL. Default off. void SetIsNULSensitive(bool enable) { flag_NULs = enable; } @@ -96,6 +99,8 @@ protected: // Don't deliver further data. int skip_deliveries; + bool suppress_weirds; + // If true, flag (first) line with embedded NUL. unsigned int flag_NULs:1; diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file index 77356c3140..30d74d2584 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file @@ -1 +1 @@ -test +test \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file index ac2a9e002d..d606037cb2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file @@ -1 +1 @@ -test2 +test2 \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file index ae48ec8c20..29f446afe2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file @@ -1 +1 @@ -test3 +test3 \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out index b22c8fe886..0bf8d6a0c9 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out @@ -1,39 +1,39 @@ FILE_NEW file #0, 0, 0 FILE_BOF_BUFFER -test^M^J +test FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #0, 6, 0 +file #0, 4, 0 [orig_h=141.142.228.5, orig_p=57262/tcp, resp_h=54.243.88.146, resp_p=80/tcp] source: HTTP -MD5: 9f06243abcb89c70e0c331c61d871fa7 -SHA1: fde773a18bb29f5ed65e6f0a7aa717fd1fa485d4 -SHA256: 837ccb607e312b170fac7383d7ccfd61fa5072793f19a25e75fbacb56539b86b +MD5: 098f6bcd4621d373cade4e832627b4f6 +SHA1: a94a8fe5ccb19ba61c4c0873d391e987982fbbd3 +SHA256: 9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 FILE_NEW file #1, 0, 0 FILE_BOF_BUFFER -test2^M^J +test2 FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #1, 7, 0 +file #1, 5, 0 [orig_h=141.142.228.5, orig_p=57262/tcp, resp_h=54.243.88.146, resp_p=80/tcp] source: HTTP -MD5: d68af81ef370b3873d50f09140068810 -SHA1: 51a7b6f2d91f6a87822dc04560f2972bc14fc97e -SHA256: de0edd0ac4a705aff70f34734e90a1d0a1d8b76abe4bb53f3ea934bc105b3b17 +MD5: ad0234829205b9033196ba818f7a872b +SHA1: 109f4b3c50d7b0df729d299bc6f8e9ef9066971f +SHA256: 60303ae22b998861bce3b28f33eec1be758a213c86c93c076dbe9f558c11c752 FILE_NEW file #2, 0, 0 FILE_BOF_BUFFER -test3^M^J +test3 FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #2, 7, 0 +file #2, 5, 0 [orig_h=141.142.228.5, orig_p=57262/tcp, resp_h=54.243.88.146, resp_p=80/tcp] source: HTTP -MD5: 1a3d75d44753ad246f0bd333cdaf08b0 -SHA1: 4f98809ab09272dfcc58266e3f23ae2393f70e76 -SHA256: 018c67a2c30ed9977e1dddfe98cac542165dac355cf9764c91a362613e752933 +MD5: 8ad8757baa8564dc136c1e07507f4a98 +SHA1: 3ebfa301dc59196f18593c45e519287a23297589 +SHA256: fd61a03af4f77d870fc21e05e7e80678095c92d808cfb3b5c279ee04c74aca13 FILE_NEW file #3, 0, 0 FILE_BOF_BUFFER diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out index 1d54e9a2ac..44c240c7ee 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out @@ -6,12 +6,12 @@ MIME_TYPE text/plain FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #0, 79, 0 +file #0, 77, 0 [orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp] source: SMTP -MD5: 92bca2e6cdcde73647125da7dccbdd07 -SHA1: b7e497be8a9f5e2c4b6980fceb015360f98f4a13 -SHA256: 785a8a044d1454ec88837108f443bbb30cc4f529393ffd57118261036bfe59f5 +MD5: 58aff3af22807bc5f4b6357c0038256c +SHA1: c39dc8cd0f8d8b1f7fc8b362c41e69fdf20f668a +SHA256: 8d057f3af311c20675eea767a9df5fa31ff3597c6d5d50fd0cdc34766c40204d FILE_NEW file #1, 0, 0 FILE_BOF_BUFFER diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 index f4dd7d22f4..0b84e1fd86 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 @@ -10,4 +10,3 @@ Find the attachment GPS - diff --git a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log index b8f576e497..6508792b36 100644 --- a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log +++ b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log @@ -305,15 +305,15 @@ [2] is_orig: bool = T 1254722770.692743 file_new - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=] 1254722770.692743 file_over_new_connection - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^J}, rx_hosts={^J^J}, conn_uids={^J^J}, source=SMTP, depth=0, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^J}, rx_hosts={^J^J}, conn_uids={^J^J}, source=SMTP, depth=0, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722770.692743 file_state_remove - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=79, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^I74.53.140.153^J}, rx_hosts={^J^I10.10.1.4^J}, conn_uids={^J^ICjhGID4nQcgTWjvg4c^J}, source=SMTP, depth=3, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^I74.53.140.153^J}, rx_hosts={^J^I10.10.1.4^J}, conn_uids={^J^ICjhGID4nQcgTWjvg4c^J}, source=SMTP, depth=3, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] 1254722770.692743 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP From 3e669daa05733d8f320752ea310a379ea85ea01c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 27 Aug 2014 23:44:39 -0400 Subject: [PATCH 032/106] Interface tweaks for PktSrc, plus docs for PktSrc and PktDumper. --- aux/plugins | 1 - src/Net.cc | 7 +- src/Net.h | 3 +- src/iosource/BPF_Program.cc | 1 + src/iosource/BPF_Program.h | 5 + src/iosource/Component.h | 2 +- src/iosource/Manager.cc | 8 +- src/iosource/Manager.h | 2 +- src/iosource/PktDumper.h | 114 +++++++- src/iosource/PktSrc.cc | 28 +- src/iosource/PktSrc.h | 336 ++++++++++++++++++++--- src/iosource/pcap/Source.cc | 41 +-- src/iosource/pcap/Source.h | 13 +- src/main.cc | 2 +- testing/btest/core/pcap/filter-error.bro | 2 +- 15 files changed, 464 insertions(+), 101 deletions(-) delete mode 160000 aux/plugins diff --git a/aux/plugins b/aux/plugins deleted file mode 160000 index 6de518922e..0000000000 --- a/aux/plugins +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6de518922e5f89d52d831ea6fb6adb7fff94437e diff --git a/src/Net.cc b/src/Net.cc index 554aa890ba..adac9c02fd 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -153,8 +153,7 @@ void net_update_time(double new_network_time) } void net_init(name_list& interfaces, name_list& readfiles, - const char* writefile, const char* filter, - int do_watchdog) + const char* writefile, int do_watchdog) { if ( readfiles.length() > 0 ) { @@ -163,7 +162,7 @@ void net_init(name_list& interfaces, name_list& readfiles, for ( int i = 0; i < readfiles.length(); ++i ) { - iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(readfiles[i], filter, false); + iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(readfiles[i], false); assert(ps); if ( ! ps->IsOpen() ) @@ -180,7 +179,7 @@ void net_init(name_list& interfaces, name_list& readfiles, for ( int i = 0; i < interfaces.length(); ++i ) { - iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(interfaces[i], filter, true); + iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(interfaces[i], true); assert(ps); if ( ! ps->IsOpen() ) diff --git a/src/Net.h b/src/Net.h index 41cbd69abe..2e466f8c7f 100644 --- a/src/Net.h +++ b/src/Net.h @@ -13,8 +13,7 @@ #include "iosource/PktDumper.h" extern void net_init(name_list& interfaces, name_list& readfiles, - const char* writefile, const char* filter, - int do_watchdog); + const char* writefile, int do_watchdog); extern void net_run(); extern void net_get_final_stats(); extern void net_finish(int drain_events); diff --git a/src/iosource/BPF_Program.cc b/src/iosource/BPF_Program.cc index 8df7729ad1..064e9a743e 100644 --- a/src/iosource/BPF_Program.cc +++ b/src/iosource/BPF_Program.cc @@ -86,6 +86,7 @@ bool BPF_Program::Compile(pcap_t* pcap, const char* filter, uint32 netmask, } m_compiled = true; + m_matches_anything = (strlen(filter) == 0 || strcmp(filter, "ip or not ip") == 0); return true; } diff --git a/src/iosource/BPF_Program.h b/src/iosource/BPF_Program.h index 3efa212bbc..88a4512d4e 100644 --- a/src/iosource/BPF_Program.h +++ b/src/iosource/BPF_Program.h @@ -37,6 +37,10 @@ public: // code, false otherwise. bool IsCompiled() { return m_compiled; } + // Returns true if this program matches any packets. This is not + // comprehensive, but can identify a few cases where it does. + bool MatchesAnything() { return m_matches_anything; } + // Accessor to the compiled program. Returns nil when // no program is currently compiled. bpf_program* GetProgram(); @@ -47,6 +51,7 @@ protected: // (I like to prefix member variables with m_, makes it clear // in the implementation whether it's a global or not. --ck) bool m_compiled; + bool m_matches_anything; struct bpf_program m_program; }; diff --git a/src/iosource/Component.h b/src/iosource/Component.h index c93597fd67..cef500e52f 100644 --- a/src/iosource/Component.h +++ b/src/iosource/Component.h @@ -50,7 +50,7 @@ class PktSrcComponent : public iosource::Component { public: enum InputType { LIVE, TRACE, BOTH }; - typedef PktSrc* (*factory_callback)(const std::string& path, const std::string& filter, bool is_live); + typedef PktSrc* (*factory_callback)(const std::string& path, bool is_live); /** * XXX diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index 63b6888801..ebd92e9527 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -222,7 +222,7 @@ static std::pair split_prefix(std::string path) return std::make_pair(prefix, path); } -PktSrc* Manager::OpenPktSrc(const std::string& path, const std::string& filter, bool is_live) +PktSrc* Manager::OpenPktSrc(const std::string& path, bool is_live) { std::pair t = split_prefix(path); std::string prefix = t.first; @@ -254,10 +254,10 @@ PktSrc* Manager::OpenPktSrc(const std::string& path, const std::string& filter, // Instantiate packet source. - PktSrc* ps = (*component->Factory())(npath, filter, is_live); + PktSrc* ps = (*component->Factory())(npath, is_live); assert(ps); - if ( ! ps->IsOpen() && ps->ErrorMsg() ) + if ( ! ps->IsOpen() && ps->IsError() ) // Set an error message if it didn't open successfully. ps->Error("could not open"); @@ -298,7 +298,7 @@ PktDumper* Manager::OpenPktDumper(const string& path, bool append) PktDumper* pd = (*component->Factory())(npath, append); assert(pd); - if ( ! pd->IsOpen() && pd->ErrorMsg() ) + if ( ! pd->IsOpen() && pd->IsError() ) // Set an error message if it didn't open successfully. pd->Error("could not open"); diff --git a/src/iosource/Manager.h b/src/iosource/Manager.h index 4198c73680..bebed61de7 100644 --- a/src/iosource/Manager.h +++ b/src/iosource/Manager.h @@ -35,7 +35,7 @@ public: // sources (and therefore returning a Size() of zero). void Terminate() { RemoveAll(); } - PktSrc* OpenPktSrc(const std::string& path, const std::string& filter, bool is_live); + PktSrc* OpenPktSrc(const std::string& path, bool is_live); PktDumper* OpenPktDumper(const std::string& path, bool append); protected: diff --git a/src/iosource/PktDumper.h b/src/iosource/PktDumper.h index d8201f977c..5e35bf1ca7 100644 --- a/src/iosource/PktDumper.h +++ b/src/iosource/PktDumper.h @@ -9,45 +9,145 @@ namespace iosource { class PktDumper { public: + /** + * Structure describing a packet. + */ struct Packet { + /** + * The pcap header associated with the packet. + */ const struct pcap_pkthdr* hdr; - const u_char* data; + + /** + * The full content of the packet. + */ + const unsigned char* data; }; + /** + * Constructor. + */ PktDumper(); + + /** + * Destructor. + */ virtual ~PktDumper(); + /** + * Returns the path associated with the dumper. + */ const std::string& Path() const; + + /** + * Returns true if the dumper is open for writing. + */ bool IsOpen() const; + + /** + * Returns the time when the dumper was opened for writing. + */ double OpenTime() const; + + /** + * Returns returns true if the dumper has encountered an error. + */ bool IsError() const; + + /** + * Returns if the dumper has encountered an error, returns a + * corresponding error message. Returns an emoty string otherwise. + */ const char* ErrorMsg() const; + + /** + * Returns the size of the link-layer headers with this dumper. + */ int HdrSize() const; + + /** + * Writes a packet to the dumper. + * + * @param pkt The packet to record. + */ bool Record(const Packet* pkt); // PktDumper interface for derived classes to implement. - virtual void Close() = 0; + + /** + * Called by the manager system to open the source. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Opened(); if not, it must call Error() + * with a corresponding message. + */ virtual void Open() = 0; + + /** + * Called by the manager system to close the dumper. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Closed(); if not, it must call Error() + * with a corresponding message. + */ + virtual void Close() = 0; + + /** + * Called to write a packet to the dumper. + * + * Derived classes must implement this method. + * + * @param pkt The packet to record. + * + * @return True if succesful, false otherwise (in which case \a + * Error() must have been called.) + */ virtual bool Dump(const Packet* pkt) = 0; protected: friend class Manager; - // Methods to use by derived classed. - // + /** + * Structure to pass back information about the packet dumper to the + * base class. Derived class pass an instance of this to \a Opened(). + */ struct Properties { std::string path; int hdr_size; double open_time; }; - void Init(); - void Done(); - + /** + * Called from the implementations of \a Open() to signal that the + * source has been successully opened. + * + * @param props A properties instance describing the now open source. + */ void Opened(const Properties& props); + + /** + * Called from the implementations of \a Close() to signal that the + * source has been closed. + */ void Closed(); + + /** + * Called from derived classes to signal an error. + * + * @param msg A corresponding error message. + */ void Error(const std::string& msg); + /** + * Called by the manager to initialize the dumper. + */ + void Init(); + + /** + * Called by the manager to shutdown the dumper. + */ + void Done(); + private: bool is_open; Properties props; diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc index 30f58a5b3f..acde8d5ff6 100644 --- a/src/iosource/PktSrc.cc +++ b/src/iosource/PktSrc.cc @@ -53,6 +53,11 @@ uint32 PktSrc::Netmask() const return IsOpen() ? props.netmask : PCAP_NETMASK_UNKNOWN; } +bool PktSrc::IsError() const + { + return ErrorMsg(); + } + int PktSrc::HdrSize() const { return IsOpen() ? props.hdr_size : -1; @@ -87,7 +92,7 @@ void PktSrc::Opened(const Properties& arg_props) props = arg_props; SetClosed(false); - if ( ! PrecompileFilter(0, props.filter) || ! SetFilter(0) ) + if ( ! PrecompileFilter(0, "") || ! SetFilter(0) ) { Close(); return; @@ -378,7 +383,7 @@ void PktSrc::Process() net_packet_dispatch(current_packet.ts, current_packet.hdr, current_packet.data, pkt_hdr_size, this); have_packet = 0; - DoneWithPacket(¤t_packet); + DoneWithPacket(); } const char* PktSrc::Tag() @@ -386,7 +391,7 @@ const char* PktSrc::Tag() return "PktSrc"; } -int PktSrc::ExtractNextPacketInternal() +bool PktSrc::ExtractNextPacketInternal() { if ( have_packet ) return true; @@ -426,7 +431,7 @@ int PktSrc::ExtractNextPacketInternal() return 0; } -int PktSrc::PrecompileBPFFilter(int index, const std::string& filter) +bool PktSrc::PrecompileBPFFilter(int index, const std::string& filter) { char errbuf[PCAP_ERRBUF_SIZE]; @@ -466,7 +471,7 @@ BPF_Program* PktSrc::GetBPFFilter(int index) return code; } -int PktSrc::ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt) +bool PktSrc::ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt) { BPF_Program* code = GetBPFFilter(index); @@ -476,5 +481,18 @@ int PktSrc::ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_cha Close(); } + if ( code->MatchesAnything() ) + return true; + return pcap_offline_filter(code->GetProgram(), hdr, pkt); } + +bool PktSrc::GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) + { + if ( ! have_packet ) + return false; + + *hdr = current_packet.hdr; + *pkt = current_packet.data; + return true; + } diff --git a/src/iosource/PktSrc.h b/src/iosource/PktSrc.h index 72e1a0da8c..c616a2d5b4 100644 --- a/src/iosource/PktSrc.h +++ b/src/iosource/PktSrc.h @@ -17,69 +17,206 @@ namespace iosource { class PktSrc : public IOSource { public: + /** + * Struct for returning statistics on a packet source. + */ struct Stats { - unsigned int received; // pkts received (w/o drops) + /** + * Packets received by source after filtering (w/o drops). + */ + unsigned int received; + + /** + * Packets dropped by source. + */ unsigned int dropped; // pkts dropped - unsigned int link; // total packets on link - // (not always not available) - // + + /** + * Total number of packets on link before filtering. + * Optional, can be left unset if not available. + */ + unsigned int link; + Stats() { received = dropped = link = 0; } }; + /** + * Constructor. + */ PktSrc(); + + /** + * Destructor. + */ virtual ~PktSrc(); + /** + * Returns the path associated with the source. This is the interface + * name for live source, and a filename for offline sources. + */ const std::string& Path() const; - const std::string& Filter() const; + + /** + * Returns true if this is a live source. + */ bool IsLive() const; + + /** + * Returns the link type of the source. + */ int LinkType() const; + + /** + * Returns the netmask associated with the source, or \c + * PCAP_NETMASK_UNKNOWN if unknown. + */ uint32 Netmask() const; + + /** + * Returns true if the source has flagged an error. + */ + bool IsError() const; + + /** + * If the source encountered an error, returns a corresponding error + * message. Returns an empty string otherwise. + */ const char* ErrorMsg() const; + + /** + * Returns the size of the link-layer header for this source. + */ int HdrSize() const; + + /** + * Returns the snap length for this source. + */ int SnapLen() const; - // Only valid in pseudo-realtime mode. + /** + * In pseudo-realtime mode, returns the logical timestamp of the + * current packet. Undefined if not running pseudo-realtime mode. + */ double CurrentPacketTimestamp(); + + /** + * In pseudo-realtime mode, returns the wall clock time associated + * with current packet. Undefined if not running pseudo-realtime + * mode. + */ double CurrentPacketWallClock(); - // Signal packet source that processing was suspended and is now - // going to be continued. + /** + * Signals packet source that processing is going to be continued + * after previous suspension. + */ void ContinueAfterSuspend(); - // Precompiles a BPF filter and associates the given index with it. - // Returns true on success, 0 if a problem occurred. The compiled - // filter will be then available via GetBPFFilter*(. - int PrecompileBPFFilter(int index, const std::string& filter); + /** + * Precompiles a BPF filter and associates the given index with it. + * The compiled filter will be then available via \a GetBPFFilter(). + * + * This is primarily a helper for packet source implementation that + * want to apply BPF filtering to their packets. + * + * @param index The index to associate with the filter. + * + * @param BPF filter The filter string to precompile. + * + * @return True on success, false if a problem occurred. + */ + bool PrecompileBPFFilter(int index, const std::string& filter); - // Returns the BPF filter with the given index, as compiled by - // PrecompileBPFFilter(), or null if none has been (successfully) - // compiled. + /** + * Returns the precompiled BPF filter associated with a given index, + * if any, as compiled by \a PrecompileBPFFilter(). + * + * This is primarily a helper for packet source implementation that + * want to apply BPF filtering to their packets. + * + * @return The BPF filter associated, or null if none has been + * (successfully) compiled. + */ BPF_Program* GetBPFFilter(int index); - // Applies a precompiled BPF filter to a packet, returning true if it - // maches. This will close the source with an error message if no - // filter with that index has been compiled. - int ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt); + /** + * Applies a precompiled BPF filter to a packet. This will close the + * source with an error message if no filter with that index has been + * compiled. + * + * This is primarily a helper for packet source implementation that + * want to apply BPF filtering to their packets. + * + * @param index The index of the filter to apply. + * + * @param hdr The header of the packet to filter. + * + * @param pkt The content of the packet to filter. + * + * @return True if it maches. */ + bool ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt); + + /** + * Returns the packet currently being processed, if available. + * + * @param hdr A pointer to pass the header of the current packet back. + * + * @param pkt A pointer to pass the content of the current packet + * back. + * + * @return True if the current packet is available, or false if not. + */ + bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); // PacketSource interace for derived classes to override. - // Returns the packet last processed; false if there is no - // current packet available. - virtual bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) = 0; + /** + * Precompiles a filter and associates a given index with it. The + * filter syntax is defined by the packet source's implenentation. + * + * Derived classes must implement this to implement their filtering. + * If they want to use BPF but don't support it natively, they can + * call the corresponding helper method provided by \a PktSrc. + * + * @param index The index to associate with the filter + * + * @param filter The filter string to precompile. + * + * @return True on success, false if a problem occurred or filtering + * is not supported. + */ + virtual bool PrecompileFilter(int index, const std::string& filter) = 0; - // Precompiles a filter and associates the given index with it. - // Returns true on success, 0 if a problem occurred or filtering is - // not supported. - virtual int PrecompileFilter(int index, const std::string& filter) = 0; + /** + * Activates a precompiled filter with the given index. + * + * Derived classes must implement this to implement their filtering. + * If they want to use BPF but don't support it natively, they can + * call the corresponding helper method provided by \a PktSrc. + * + * @param index The index of the filter to activate. + * + * @return True on success, false if a problem occurred or the + * filtering is not supported. + */ + virtual bool SetFilter(int index) = 0; - // Activates the filter with the given index. Returns true on - // success, 0 if a problem occurred or the filtering is not - // supported. - virtual int SetFilter(int index) = 0; - - // Returns current statistics about the source. + /** + * Returns current statistics about the source. + * + * Derived classes must implement this method. + * + * @param stats A statistics structure that the method fill out. + */ virtual void Statistics(Stats* stats) = 0; + /** + * Helper method to return the header size for a given link tyoe. + * + * @param link_type The link tyoe. + * + * @return The header size in bytes. + */ static int GetLinkHeaderSize(int link_type); protected: @@ -87,13 +224,45 @@ protected: // Methods to use by derived classes. + /** + * Structure to pass back information about the packet source to the + * base class. Derived class pass an instance of this to \a Opened(). + */ struct Properties { + /** + * The path associated with the source. This is the interface + * name for live source, and a filename for offline sources. + */ std::string path; - std::string filter; // Maybe different than what's passed in if not (directly) supported. + + /** + * A file descriptor suitable to use with \a select() for + * determining if there's input available from this source. + */ int selectable_fd; + + /** + * The link type for packets from this source. + */ int link_type; + + /** + * The size of the link-layer header for packets from this + * source. \a GetLinkHeaderSize() may be used to derive this + * value. + */ int hdr_size; + + /** + * The netmask associated with the source, or \c + * PCAP_NETMASK_UNKNOWN if unknown. + */ uint32 netmask; + + /** + * True if the source is reading live inout, false for + * working offline. + */ bool is_live; Properties() @@ -102,34 +271,120 @@ protected: } }; + /** + * Structure describing a packet. + */ struct Packet { + /** + * Time associated with the packet. + */ double ts; + + /** + * The pcap header associated with the packet. + */ const struct ::pcap_pkthdr* hdr; + + /** + * The full content of the packet. + */ const u_char* data; }; + /** + * Called from the implementations of \a Open() to signal that the + * source has been successully opened. + * + * @param props A properties instance describing the now open source. + */ void Opened(const Properties& props); + + /** + * Called from the implementations of \a Close() to signal that the + * source has been closed. + */ void Closed(); + + /** + * Can be called from derived classes to send an informational + * message to the user. + * + * @param msg The message to pass on. + */ void Info(const std::string& msg); + + /** + * Can be called from derived classes to flag send an error. + * + * @param msg The message going with the error. + */ void Error(const std::string& msg); + + /** + * Can be called from derived classes to flah a "weird" situation. + * + * @param msg The message to pass on. + * + * @param pkt The packet associated with the weird, or null if none. + */ void Weird(const std::string& msg, const Packet* pkt); + + /** + * Can be called from derived classes to flag an internal error, + * which will abort execution. + * + * @param msg The message to pass on. + */ void InternalError(const std::string& msg); // PktSrc interface for derived classes to implement. + /** + * Called by the manager system to open the source. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Opened(); if not, it must call Error() + * with a corresponding message. + */ virtual void Open() = 0; + + /** + * Called by the manager system to close the source. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Closed(); if not, it must call Error() + * with a corresponding message. + */ virtual void Close() = 0; - // Returns 1 on success, 0 on time-out/gone dry. - virtual int ExtractNextPacket(Packet* pkt) = 0; - virtual void DoneWithPacket(Packet* pkt) = 0; + + /** + * Provides the next packet from the source. + * + * @param pkt The packet structure to fill in with the packet's + * information. The callee keep ownership of the data but must + * guaranetee that it stays available at least until \a + * DoneWithPacket() is called. It is guaranteed that no two calls to + * this method will hapen with \a DoneWithPacket() in between. + * + * @return True if a packet is available and *pkt* filled in. False + * if not packet is available or an error occured (which must be + * flageed via Error()). + */ + virtual bool ExtractNextPacket(Packet* pkt) = 0; + + /** + * Signals that the data of previously extracted packet will no + * longer be needed. + */ + virtual void DoneWithPacket() = 0; private: - // Checks if the current packet has a pseudo-time <= current_time. - // If yes, returns pseudo-time, otherwise 0. + // Checks if the current packet has a pseudo-time <= current_time. If + // yes, returns pseudo-time, otherwise 0. double CheckPseudoTime(); - // XXX - int ExtractNextPacketInternal(); + // Internal helper for ExtractNextPacket(). + bool ExtractNextPacketInternal(); // IOSource interface implementation. virtual void Init(); @@ -159,5 +414,4 @@ private: } - #endif diff --git a/src/iosource/pcap/Source.cc b/src/iosource/pcap/Source.cc index 79ded790bd..7cca94122b 100644 --- a/src/iosource/pcap/Source.cc +++ b/src/iosource/pcap/Source.cc @@ -16,10 +16,9 @@ PcapSource::~PcapSource() Close(); } -PcapSource::PcapSource(const std::string& path, const std::string& filter, bool is_live) +PcapSource::PcapSource(const std::string& path, bool is_live) { props.path = path; - props.filter = filter; props.is_live = is_live; last_data = 0; } @@ -141,10 +140,10 @@ void PcapSource::OpenOffline() Opened(props); } -int PcapSource::ExtractNextPacket(Packet* pkt) +bool PcapSource::ExtractNextPacket(Packet* pkt) { if ( ! pd ) - return 0; + return false; const u_char* data = pcap_next(pd, ¤t_hdr); @@ -156,7 +155,7 @@ int PcapSource::ExtractNextPacket(Packet* pkt) if ( ! props.is_live ) Close(); - return 0; + return false; } pkt->ts = current_hdr.ts.tv_sec + double(current_hdr.ts.tv_usec) / 1e6; @@ -166,29 +165,29 @@ int PcapSource::ExtractNextPacket(Packet* pkt) if ( current_hdr.len == 0 || current_hdr.caplen == 0 ) { Weird("empty_pcap_header", pkt); - return 0; + return false; } last_hdr = current_hdr; last_data = data; ++stats.received; - return 1; + return true; } -void PcapSource::DoneWithPacket(Packet* pkt) +void PcapSource::DoneWithPacket() { // Nothing to do. } -int PcapSource::PrecompileFilter(int index, const std::string& filter) +bool PcapSource::PrecompileFilter(int index, const std::string& filter) { return PktSrc::PrecompileBPFFilter(index, filter); } -int PcapSource::SetFilter(int index) +bool PcapSource::SetFilter(int index) { if ( ! pd ) - return 1; // Prevent error message + return true; // Prevent error message char errbuf[PCAP_ERRBUF_SIZE]; @@ -200,13 +199,13 @@ int PcapSource::SetFilter(int index) "No precompiled pcap filter for index %d", index); Error(errbuf); - return 0; + return false; } if ( pcap_setfilter(pd, code->GetProgram()) < 0 ) { PcapError(); - return 0; + return false; } #ifndef HAVE_LINUX @@ -214,7 +213,7 @@ int PcapSource::SetFilter(int index) stats.received = stats.dropped = stats.link = 0; #endif - return 1; + return true; } void PcapSource::Statistics(Stats* s) @@ -246,16 +245,6 @@ void PcapSource::Statistics(Stats* s) s->dropped = 0; } -bool PcapSource::GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) - { - if ( ! last_data ) - return false; - - *hdr = &last_hdr; - *pkt = last_data; - return true; - } - void PcapSource::PcapError() { if ( pd ) @@ -285,7 +274,7 @@ void PcapSource::SetHdrSize() } } -iosource::PktSrc* PcapSource::Instantiate(const std::string& path, const std::string& filter, bool is_live) +iosource::PktSrc* PcapSource::Instantiate(const std::string& path, bool is_live) { - return new PcapSource(path, filter, is_live); + return new PcapSource(path, is_live); } diff --git a/src/iosource/pcap/Source.h b/src/iosource/pcap/Source.h index 039bdec81a..b914dc6b63 100644 --- a/src/iosource/pcap/Source.h +++ b/src/iosource/pcap/Source.h @@ -11,22 +11,21 @@ namespace pcap { class PcapSource : public iosource::PktSrc { public: // XXX - PcapSource(const std::string& path, const std::string& filter, bool is_live); + PcapSource(const std::string& path, bool is_live); virtual ~PcapSource(); - static PktSrc* Instantiate(const std::string& path, const std::string& filter, bool is_live); + static PktSrc* Instantiate(const std::string& path, bool is_live); protected: // PktSrc interface. virtual void Open(); virtual void Close(); - virtual int ExtractNextPacket(Packet* pkt); - virtual void DoneWithPacket(Packet* pkt); - virtual int PrecompileFilter(int index, const std::string& filter); - virtual int SetFilter(int index); + virtual bool ExtractNextPacket(Packet* pkt); + virtual void DoneWithPacket(); + virtual bool PrecompileFilter(int index, const std::string& filter); + virtual bool SetFilter(int index); virtual void Statistics(Stats* stats); - virtual bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); private: void OpenLive(); diff --git a/src/main.cc b/src/main.cc index 295fa79ee7..bdd3d7072b 100644 --- a/src/main.cc +++ b/src/main.cc @@ -991,7 +991,7 @@ int main(int argc, char** argv) snaplen = internal_val("snaplen")->AsCount(); if ( dns_type != DNS_PRIME ) - net_init(interfaces, read_files, writefile, "", do_watchdog); + net_init(interfaces, read_files, writefile, do_watchdog); BroFile::SetDefaultRotation(log_rotate_interval, log_max_size); diff --git a/testing/btest/core/pcap/filter-error.bro b/testing/btest/core/pcap/filter-error.bro index 1f8ad7a464..1d7b6516db 100644 --- a/testing/btest/core/pcap/filter-error.bro +++ b/testing/btest/core/pcap/filter-error.bro @@ -3,7 +3,7 @@ # @TEST-EXEC: echo ---- >>output # @TEST-EXEC: bro -r $TRACES/workshop_2011_browse.trace %INPUT >>output 2>&1 # @TEST-EXEC: test -e conn.log -# @TEST-EXEC: btest-diff output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output redef enum PcapFilterID += { A }; From 5e4f498083271c7bede1ac79ba4aed3084eefe10 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 28 Aug 2014 00:53:15 -0400 Subject: [PATCH 033/106] Adding test creating a dynamic pktsrc plugin. --- .../btest/Baseline/plugins.pktsrc/conn.log | 10 +++ .../btest/plugins/pktsrc-plugin/.btest-ignore | 0 .../plugins/pktsrc-plugin/CMakeLists.txt | 17 ++++ .../btest/plugins/pktsrc-plugin/src/Foo.cc | 77 +++++++++++++++++++ testing/btest/plugins/pktsrc-plugin/src/Foo.h | 35 +++++++++ .../btest/plugins/pktsrc-plugin/src/Plugin.cc | 20 +++++ testing/btest/plugins/pktsrc.bro | 8 ++ 7 files changed, 167 insertions(+) create mode 100644 testing/btest/Baseline/plugins.pktsrc/conn.log create mode 100644 testing/btest/plugins/pktsrc-plugin/.btest-ignore create mode 100644 testing/btest/plugins/pktsrc-plugin/CMakeLists.txt create mode 100644 testing/btest/plugins/pktsrc-plugin/src/Foo.cc create mode 100644 testing/btest/plugins/pktsrc-plugin/src/Foo.h create mode 100644 testing/btest/plugins/pktsrc-plugin/src/Plugin.cc create mode 100644 testing/btest/plugins/pktsrc.bro diff --git a/testing/btest/Baseline/plugins.pktsrc/conn.log b/testing/btest/Baseline/plugins.pktsrc/conn.log new file mode 100644 index 0000000000..550f520352 --- /dev/null +++ b/testing/btest/Baseline/plugins.pktsrc/conn.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-08-28-04-53-05 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count set[string] +1409193037.000000 CXWv6p3arKYeMETxOg 1.2.0.2 2527 1.2.0.3 6649 tcp - - - - S0 - 0 S 1 64 0 0 (empty) +#close 2014-08-28-04-53-05 diff --git a/testing/btest/plugins/pktsrc-plugin/.btest-ignore b/testing/btest/plugins/pktsrc-plugin/.btest-ignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/btest/plugins/pktsrc-plugin/CMakeLists.txt b/testing/btest/plugins/pktsrc-plugin/CMakeLists.txt new file mode 100644 index 0000000000..2234907ad2 --- /dev/null +++ b/testing/btest/plugins/pktsrc-plugin/CMakeLists.txt @@ -0,0 +1,17 @@ + +project(Bro-Plugin-Demo-Foo) + +cmake_minimum_required(VERSION 2.6.3) + +if ( NOT BRO_DIST ) + message(FATAL_ERROR "BRO_DIST not set") +endif () + +set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) + +include(BroPlugin) + +bro_plugin_begin(Demo Foo) +bro_plugin_cc(src/Plugin.cc) +bro_plugin_cc(src/Foo.cc) +bro_plugin_end() diff --git a/testing/btest/plugins/pktsrc-plugin/src/Foo.cc b/testing/btest/plugins/pktsrc-plugin/src/Foo.cc new file mode 100644 index 0000000000..b08bc51d72 --- /dev/null +++ b/testing/btest/plugins/pktsrc-plugin/src/Foo.cc @@ -0,0 +1,77 @@ + +#include +#include + +#include "Foo.h" + +using namespace plugin::Demo_Foo; + +Foo::Foo(const std::string& path, bool is_live) + { + packet = + string("\x45\x00\x00\x40\x15\x55\x40\x00\x3e\x06\x25\x5b\x01\x02\x00\x02" + "\x01\x02\x00\x03\x09\xdf\x19\xf9\x5d\x8a\x36\x7c\x00\x00\x00\x00" + "\xb0\x02\x40\x00\x3c\x72\x00\x00\x02\x04\x05\x5c\x01\x03\x03\x00" + "\x01\x01\x08\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x04\x02", 64); + + props.path = path; + props.selectable_fd = open("/bin/sh", O_RDONLY); // any fd is fine. + props.link_type = DLT_RAW; + props.hdr_size = 0; + props.netmask = 0; + props.is_live = 0; + } + +iosource::PktSrc* Foo::Instantiate(const std::string& path, bool is_live) + { + return new Foo(path, is_live); + } + +void Foo::Open() + { + Opened(props); + } + +void Foo::Close() + { + Closed(); + } + +bool Foo::ExtractNextPacket(Packet* pkt) + { + if ( packet.empty() ) + { + Close(); + return false; + } + + hdr.ts.tv_sec = 1409193037; + hdr.ts.tv_usec = 0; + hdr.caplen = hdr.len = packet.size(); + pkt->ts = hdr.ts.tv_sec; + pkt->hdr = &hdr; + pkt->data = (const u_char *)packet.c_str(); + return true; + } + +void Foo::DoneWithPacket() + { + packet.clear(); + } + +bool Foo::PrecompileFilter(int index, const std::string& filter) + { + // skip for the testing. + return true; + } + +bool Foo::SetFilter(int index) + { + // skip for the testing. + return true; + } + +void Foo::Statistics(Stats* stats) + { + // skip for the testing. + } diff --git a/testing/btest/plugins/pktsrc-plugin/src/Foo.h b/testing/btest/plugins/pktsrc-plugin/src/Foo.h new file mode 100644 index 0000000000..902ac0e37a --- /dev/null +++ b/testing/btest/plugins/pktsrc-plugin/src/Foo.h @@ -0,0 +1,35 @@ + +#ifndef BRO_PLUGIN_DEMO_FOO_H +#define BRO_PLUGIN_DEMO_FOO_H + +#include +#include + +namespace plugin { +namespace Demo_Foo { + +class Foo : public iosource::PktSrc { +public: + Foo(const std::string& path, bool is_live); + + static PktSrc* Instantiate(const std::string& path, bool is_live); + +protected: + virtual void Open(); + virtual void Close(); + virtual bool ExtractNextPacket(Packet* pkt); + virtual void DoneWithPacket(); + virtual bool PrecompileFilter(int index, const std::string& filter); + virtual bool SetFilter(int index); + virtual void Statistics(Stats* stats); + +private: + Properties props; + string packet; + struct pcap_pkthdr hdr; +}; + +} +} + +#endif diff --git a/testing/btest/plugins/pktsrc-plugin/src/Plugin.cc b/testing/btest/plugins/pktsrc-plugin/src/Plugin.cc new file mode 100644 index 0000000000..ecc94866a6 --- /dev/null +++ b/testing/btest/plugins/pktsrc-plugin/src/Plugin.cc @@ -0,0 +1,20 @@ + +#include "Plugin.h" + +#include "Foo.h" + +namespace plugin { namespace Demo_Foo { Plugin plugin; } } + +using namespace plugin::Demo_Foo; + +plugin::Configuration Plugin::Configure() + { + AddComponent(new ::iosource::PktSrcComponent("FooPktSrc", "foo", ::iosource::PktSrcComponent::BOTH, ::plugin::Demo_Foo::Foo::Instantiate)); + + plugin::Configuration config; + config.name = "Demo::Foo"; + config.description = "A Foo packet source"; + config.version.major = 1; + config.version.minor = 0; + return config; + } diff --git a/testing/btest/plugins/pktsrc.bro b/testing/btest/plugins/pktsrc.bro new file mode 100644 index 0000000000..2bd9be7bb7 --- /dev/null +++ b/testing/btest/plugins/pktsrc.bro @@ -0,0 +1,8 @@ +# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo +# @TEST-EXEC: cp -r %DIR/pktsrc-plugin/* . +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r foo:XXX %INPUT FilteredTraceDetection::enable=F >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff conn.log + From 675fba3fdee0a391cfb6fc52d07b08caaca96c76 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 28 Aug 2014 13:13:30 -0500 Subject: [PATCH 034/106] Remove timeouts from remote communication loop. The select() now blocks until there's work to do instead of relying on a small timeout value which can cause unproductive use of cpu cycles. --- src/CMakeLists.txt | 2 + src/ChunkedIO.cc | 46 ++++++++++++++++++++++- src/ChunkedIO.h | 16 +++++++- src/DNS_Mgr.cc | 5 ++- src/DNS_Mgr.h | 3 +- src/Flare.cc | 29 +++++++++++++++ src/Flare.h | 45 +++++++++++++++++++++++ src/FlowSrc.cc | 5 ++- src/FlowSrc.h | 3 +- src/IOSource.cc | 47 ++++++++++++++++++------ src/IOSource.h | 13 +++++-- src/Pipe.cc | 79 ++++++++++++++++++++++++++++++++++++++++ src/Pipe.h | 57 +++++++++++++++++++++++++++++ src/PktSrc.cc | 5 ++- src/PktSrc.h | 3 +- src/RemoteSerializer.cc | 47 +++++++++++------------- src/RemoteSerializer.h | 3 +- src/Serializer.cc | 5 ++- src/Serializer.h | 3 +- src/threading/Manager.cc | 3 +- src/threading/Manager.h | 3 +- 21 files changed, 364 insertions(+), 58 deletions(-) create mode 100644 src/Flare.cc create mode 100644 src/Flare.h create mode 100644 src/Pipe.cc create mode 100644 src/Pipe.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 04867b7189..3764533b66 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -279,6 +279,7 @@ set(bro_SRCS EventRegistry.cc Expr.cc File.cc + Flare.cc FlowSrc.cc Frag.cc Frame.cc @@ -299,6 +300,7 @@ set(bro_SRCS OSFinger.cc PacketFilter.cc PersistenceSerializer.cc + Pipe.cc PktSrc.cc PolicyFile.cc PrefixTable.cc diff --git a/src/ChunkedIO.cc b/src/ChunkedIO.cc index 54e2e59575..a94eb98748 100644 --- a/src/ChunkedIO.cc +++ b/src/ChunkedIO.cc @@ -210,6 +210,7 @@ bool ChunkedIOFd::WriteChunk(Chunk* chunk, bool partial) else pending_head = pending_tail = q; + write_flare.Fire(); return Flush(); } @@ -232,6 +233,7 @@ bool ChunkedIOFd::PutIntoWriteBuffer(Chunk* chunk) write_len += len; delete chunk; + write_flare.Fire(); if ( network_time - last_flush > 0.005 ) FlushWriteBuffer(); @@ -269,6 +271,10 @@ bool ChunkedIOFd::FlushWriteBuffer() if ( unsigned(written) == len ) { write_pos = write_len = 0; + + if ( ! pending_head ) + write_flare.Extinguish(); + return true; } @@ -318,7 +324,12 @@ bool ChunkedIOFd::Flush() } } - return FlushWriteBuffer(); + bool rval = FlushWriteBuffer(); + + if ( ! pending_head && write_len == 0 ) + write_flare.Extinguish(); + + return rval; } uint32 ChunkedIOFd::ChunkAvailable() @@ -394,6 +405,9 @@ bool ChunkedIOFd::Read(Chunk** chunk, bool may_block) #ifdef DEBUG_COMMUNICATION AddToBuffer("", true); #endif + if ( ! ChunkAvailable() ) + read_flare.Extinguish(); + return false; } @@ -402,9 +416,15 @@ bool ChunkedIOFd::Read(Chunk** chunk, bool may_block) #ifdef DEBUG_COMMUNICATION AddToBuffer("", true); #endif + read_flare.Extinguish(); return true; } + if ( ChunkAvailable() ) + read_flare.Fire(); + else + read_flare.Extinguish(); + #ifdef DEBUG if ( *chunk ) DBG_LOG(DBG_CHUNKEDIO, "read of size %d %s[%s]", @@ -481,6 +501,9 @@ bool ChunkedIOFd::ReadChunk(Chunk** chunk, bool may_block) read_pos = 0; read_len = bytes_left; + if ( ! ChunkAvailable() ) + read_flare.Extinguish(); + // If allowed, wait a bit for something to read. if ( may_block ) { @@ -607,6 +630,14 @@ bool ChunkedIOFd::IsFillingUp() return stats.pending > MAX_BUFFERED_CHUNKS_SOFT; } +std::vector ChunkedIOFd::FdSupplements() const + { + std::vector rval; + rval.push_back(write_flare.FD()); + rval.push_back(read_flare.FD()); + return rval; + } + void ChunkedIOFd::Clear() { while ( pending_head ) @@ -618,6 +649,9 @@ void ChunkedIOFd::Clear() } pending_head = pending_tail = 0; + + if ( write_len == 0 ) + write_flare.Extinguish(); } const char* ChunkedIOFd::Error() @@ -830,6 +864,7 @@ bool ChunkedIOSSL::Write(Chunk* chunk) else write_head = write_tail = q; + write_flare.Fire(); Flush(); return true; } @@ -935,6 +970,7 @@ bool ChunkedIOSSL::Flush() write_state = LEN; } + write_flare.Extinguish(); return true; } @@ -1104,6 +1140,13 @@ bool ChunkedIOSSL::IsFillingUp() return false; } +std::vector ChunkedIOSSL::FdSupplements() const + { + std::vector rval; + rval.push_back(write_flare.FD()); + return rval; + } + void ChunkedIOSSL::Clear() { while ( write_head ) @@ -1114,6 +1157,7 @@ void ChunkedIOSSL::Clear() write_head = next; } write_head = write_tail = 0; + write_flare.Extinguish(); } const char* ChunkedIOSSL::Error() diff --git a/src/ChunkedIO.h b/src/ChunkedIO.h index a9865e4c05..c640e529b8 100644 --- a/src/ChunkedIO.h +++ b/src/ChunkedIO.h @@ -6,8 +6,9 @@ #include "config.h" #include "List.h" #include "util.h" - +#include "Flare.h" #include +#include #ifdef NEED_KRB5_H # include @@ -95,6 +96,11 @@ public: // Returns underlying fd if available, -1 otherwise. virtual int Fd() { return -1; } + // Returns supplementary file descriptors that become read-ready in order + // to signal that there is some work that can be performed. + virtual std::vector FdSupplements() const + { return std::vector(); } + // Makes sure that no additional protocol data is written into // the output stream. If this is activated, the output cannot // be read again by any of these classes! @@ -177,6 +183,7 @@ public: virtual void Clear(); virtual bool Eof() { return eof; } virtual int Fd() { return fd; } + virtual std::vector FdSupplements() const; virtual void Stats(char* buffer, int length); private: @@ -240,6 +247,8 @@ private: ChunkQueue* pending_tail; pid_t pid; + bro::Flare write_flare; + bro::Flare read_flare; }; // Chunked I/O using an SSL connection. @@ -262,6 +271,7 @@ public: virtual void Clear(); virtual bool Eof() { return eof; } virtual int Fd() { return socket; } + virtual std::vector FdSupplements() const; virtual void Stats(char* buffer, int length); private: @@ -303,6 +313,8 @@ private: // One SSL for all connections. static SSL_CTX* ctx; + + bro::Flare write_flare; }; #include @@ -328,6 +340,8 @@ public: virtual bool Eof() { return io->Eof(); } virtual int Fd() { return io->Fd(); } + virtual std::vector FdSupplements() const + { return io->FdSupplements(); } virtual void Stats(char* buffer, int length); void EnableCompression(int level) diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index 9188d61b96..9fb5c8bb87 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -1217,9 +1217,10 @@ void DNS_Mgr::IssueAsyncRequests() } } -void DNS_Mgr::GetFds(int* read, int* write, int* except) +void DNS_Mgr::GetFds(std::vector* read, std::vector* write, + std::vector* except) { - *read = nb_dns_fd(nb_dns); + read->push_back(nb_dns_fd(nb_dns)); } double DNS_Mgr::NextTimestamp(double* network_time) diff --git a/src/DNS_Mgr.h b/src/DNS_Mgr.h index 7864505add..fa19914add 100644 --- a/src/DNS_Mgr.h +++ b/src/DNS_Mgr.h @@ -132,7 +132,8 @@ protected: void DoProcess(bool flush); // IOSource interface. - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(std::vector* read, std::vector* write, + std::vector* except); virtual double NextTimestamp(double* network_time); virtual void Process(); virtual const char* Tag() { return "DNS_Mgr"; } diff --git a/src/Flare.cc b/src/Flare.cc new file mode 100644 index 0000000000..8a0418f631 --- /dev/null +++ b/src/Flare.cc @@ -0,0 +1,29 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Flare.h" +#include "util.h" +#include +#include +#include + +using namespace bro; + +Flare::Flare() + : pipe(FD_CLOEXEC, FD_CLOEXEC, O_NONBLOCK, O_NONBLOCK) + { + } + +void Flare::Fire() + { + char tmp; + safe_write(pipe.WriteFD(), &tmp, 1); + } + +void Flare::Extinguish() + { + char tmp[256]; + + for ( ; ; ) + if ( read(pipe.ReadFD(), &tmp, sizeof(tmp)) == -1 && errno == EAGAIN ) + break; + } diff --git a/src/Flare.h b/src/Flare.h new file mode 100644 index 0000000000..4e6378847a --- /dev/null +++ b/src/Flare.h @@ -0,0 +1,45 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef BRO_FLARE_H +#define BRO_FLARE_H + +#include "Pipe.h" + +namespace bro { + +class Flare { +public: + + /** + * Create a flare object that can be used to signal a "ready" status via + * a file descriptor that may be integrated with select(), poll(), etc. + * Not thread-safe, but that should only require Fire()/Extinguish() calls + * to be made mutually exclusive (across all copies of a Flare). + */ + Flare(); + + /** + * @return a file descriptor that will become ready if the flare has been + * Fire()'d and not yet Extinguished()'d. + */ + int FD() const + { return pipe.ReadFD(); } + + /** + * Put the object in the "ready" state. + */ + void Fire(); + + /** + * Take the object out of the "ready" state. + */ + void Extinguish(); + +private: + + Pipe pipe; +}; + +} // namespace bro + +#endif // BRO_FLARE_H diff --git a/src/FlowSrc.cc b/src/FlowSrc.cc index 8eed94fcea..4999d9cb97 100644 --- a/src/FlowSrc.cc +++ b/src/FlowSrc.cc @@ -28,10 +28,11 @@ FlowSrc::~FlowSrc() delete netflow_analyzer; } -void FlowSrc::GetFds(int* read, int* write, int* except) +void FlowSrc::GetFds(std::vector* read, std::vector* write, + std::vector* except) { if ( selectable_fd >= 0 ) - *read = selectable_fd; + read->push_back(selectable_fd); } double FlowSrc::NextTimestamp(double* network_time) diff --git a/src/FlowSrc.h b/src/FlowSrc.h index 03dda2761d..ee927604e1 100644 --- a/src/FlowSrc.h +++ b/src/FlowSrc.h @@ -34,7 +34,8 @@ public: // IOSource interface: bool IsReady(); - void GetFds(int* read, int* write, int* except); + void GetFds(std::vector* read, std::vector* write, + std::vector* except); double NextTimestamp(double* network_time); void Process(); diff --git a/src/IOSource.cc b/src/IOSource.cc index d47007caad..540b797162 100644 --- a/src/IOSource.cc +++ b/src/IOSource.cc @@ -24,6 +24,15 @@ void IOSourceRegistry::RemoveAll() dont_counts = sources.size(); } +static void fd_vector_set(const std::vector& fds, fd_set* set, int* max) + { + for ( size_t i = 0; i < fds.size(); ++i ) + { + FD_SET(fds[i], set); + *max = ::max(fds[i], *max); + } + } + IOSource* IOSourceRegistry::FindSoonest(double* ts) { // Remove sources which have gone dry. For simplicity, we only @@ -94,16 +103,14 @@ IOSource* IOSourceRegistry::FindSoonest(double* ts) // be ready. continue; - src->fd_read = src->fd_write = src->fd_except = 0; + src->fd_read.clear(); + src->fd_write.clear(); + src->fd_except.clear(); src->src->GetFds(&src->fd_read, &src->fd_write, &src->fd_except); - FD_SET(src->fd_read, &fd_read); - FD_SET(src->fd_write, &fd_write); - FD_SET(src->fd_except, &fd_except); - - maxx = max(src->fd_read, maxx); - maxx = max(src->fd_write, maxx); - maxx = max(src->fd_except, maxx); + fd_vector_set(src->fd_read, &fd_read, &maxx); + fd_vector_set(src->fd_write, &fd_write, &maxx); + fd_vector_set(src->fd_except, &fd_except, &maxx); } // We can't block indefinitely even when all sources are dry: @@ -143,9 +150,7 @@ IOSource* IOSourceRegistry::FindSoonest(double* ts) if ( ! src->src->IsIdle() ) continue; - if ( FD_ISSET(src->fd_read, &fd_read) || - FD_ISSET(src->fd_write, &fd_write) || - FD_ISSET(src->fd_except, &fd_except) ) + if ( src->Ready(&fd_read, &fd_write, &fd_except) ) { double local_network_time = 0; double ts = src->src->NextTimestamp(&local_network_time); @@ -174,3 +179,23 @@ void IOSourceRegistry::Register(IOSource* src, bool dont_count) ++dont_counts; return sources.push_back(s); } + +static bool fd_vector_ready(const std::vector& fds, fd_set* set) + { + for ( size_t i = 0; i < fds.size(); ++i ) + if ( FD_ISSET(fds[i], set) ) + return true; + + return false; + } + +bool IOSourceRegistry::Source::Ready(fd_set* read, fd_set* write, + fd_set* except) const + { + if ( fd_vector_ready(fd_read, read) || + fd_vector_ready(fd_write, write) || + fd_vector_ready(fd_except, except) ) + return true; + + return false; + } diff --git a/src/IOSource.h b/src/IOSource.h index db50bbd2a9..3da70af568 100644 --- a/src/IOSource.h +++ b/src/IOSource.h @@ -4,6 +4,8 @@ #define iosource_h #include +#include +#include #include "Timer.h" using namespace std; @@ -22,7 +24,8 @@ public: // Returns select'able fds (leaves args untouched if we don't have // selectable fds). - virtual void GetFds(int* read, int* write, int* except) = 0; + virtual void GetFds(std::vector* read, std::vector* write, + std::vector* except) = 0; // The following two methods are only called when either IsIdle() // returns false or select() on one of the fds indicates that there's @@ -89,9 +92,11 @@ protected: struct Source { IOSource* src; - int fd_read; - int fd_write; - int fd_except; + std::vector fd_read; + std::vector fd_write; + std::vector fd_except; + + bool Ready(fd_set* read, fd_set* write, fd_set* except) const; }; typedef list SourceList; diff --git a/src/Pipe.cc b/src/Pipe.cc new file mode 100644 index 0000000000..51298d07b6 --- /dev/null +++ b/src/Pipe.cc @@ -0,0 +1,79 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Pipe.h" +#include "Reporter.h" +#include +#include +#include +#include + +using namespace bro; + +static void pipe_fail(int eno) + { + char tmp[256]; + strerror_r(eno, tmp, sizeof(tmp)); + reporter->FatalError("Pipe failure: %s", tmp); + } + +static void set_flags(int fd, int flags) + { + if ( flags ) + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | flags); + } + +static void set_status_flags(int fd, int flags) + { + if ( flags ) + fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | flags); + } + +static int dup_or_fail(int fd, int flags) + { + int rval = dup(fd); + + if ( rval < 0 ) + pipe_fail(errno); + + set_flags(fd, flags); + return rval; + } + +Pipe::Pipe(int flags0, int flags1, int status_flags0, int status_flags1) + { + // pipe2 can set flags atomically, but not yet available everywhere. + if ( ::pipe(fds) ) + pipe_fail(errno); + + flags[0] = flags0; + flags[1] = flags1; + + set_flags(fds[0], flags[0]); + set_flags(fds[1], flags[1]); + set_status_flags(fds[0], status_flags0); + set_status_flags(fds[1], status_flags1); + } + +Pipe::~Pipe() + { + close(fds[0]); + close(fds[1]); + } + +Pipe::Pipe(const Pipe& other) + { + fds[0] = dup_or_fail(other.fds[0], other.flags[0]); + fds[1] = dup_or_fail(other.fds[1], other.flags[1]); + } + +Pipe& Pipe::operator=(const Pipe& other) + { + if ( this == &other ) + return *this; + + close(fds[0]); + close(fds[1]); + fds[0] = dup_or_fail(other.fds[0], other.flags[0]); + fds[1] = dup_or_fail(other.fds[1], other.flags[1]); + return *this; + } diff --git a/src/Pipe.h b/src/Pipe.h new file mode 100644 index 0000000000..493169e615 --- /dev/null +++ b/src/Pipe.h @@ -0,0 +1,57 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef BRO_PIPE_H +#define BRO_PIPE_H + +namespace bro { + +class Pipe { +public: + + /** + * Create a pair of file descriptors via pipe(), or aborts if it cannot. + * @param flags0 file descriptor flags to set on read end of pipe. + * @param flags1 file descriptor flags to set on write end of pipe. + * @param status_flags0 descriptor status flags to set on read end of pipe. + * @param status_flags1 descriptor status flags to set on write end of pipe. + */ + Pipe(int flags0 = 0, int flags1 = 0, int status_flags0 = 0, + int status_flags1 = 0); + + /** + * Close the pair of file descriptors owned by the object. + */ + ~Pipe(); + + /** + * Make a copy of another Pipe object (file descriptors are dup'd). + */ + Pipe(const Pipe& other); + + /** + * Assign a Pipe object by closing file descriptors and duping those of + * the other. + */ + Pipe& operator=(const Pipe& other); + + /** + * @return the file descriptor associated with the read-end of the pipe. + */ + int ReadFD() const + { return fds[0]; } + + /** + * @return the file descriptor associated with the write-end of the pipe. + */ + int WriteFD() const + { return fds[1]; } + +private: + + int fds[2]; + int flags[2]; +}; + +} // namespace bro + +#endif // BRO_PIPE_H diff --git a/src/PktSrc.cc b/src/PktSrc.cc index b5ac3a5d69..04b7b7d552 100644 --- a/src/PktSrc.cc +++ b/src/PktSrc.cc @@ -51,7 +51,8 @@ PktSrc::~PktSrc() delete [] readfile; } -void PktSrc::GetFds(int* read, int* write, int* except) +void PktSrc::GetFds(std::vector* read, std::vector* write, + std::vector* except) { if ( pseudo_realtime ) { @@ -62,7 +63,7 @@ void PktSrc::GetFds(int* read, int* write, int* except) } if ( selectable_fd >= 0 ) - *read = selectable_fd; + read->push_back(selectable_fd); } int PktSrc::ExtractNextPacket() diff --git a/src/PktSrc.h b/src/PktSrc.h index 70eef4dd00..0d4be12b43 100644 --- a/src/PktSrc.h +++ b/src/PktSrc.h @@ -98,7 +98,8 @@ public: // IOSource interface bool IsReady(); - void GetFds(int* read, int* write, int* except); + void GetFds(std::vector* read, std::vector* write, + std::vector* except); double NextTimestamp(double* local_network_time); void Process(); const char* Tag() { return "PktSrc"; } diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 3e46c5a1d2..34c5f1abce 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -1368,12 +1368,17 @@ void RemoteSerializer::Unregister(ID* id) } } -void RemoteSerializer::GetFds(int* read, int* write, int* except) +void RemoteSerializer::GetFds(std::vector* read, std::vector* write, + std::vector* except) { - *read = io->Fd(); + read->push_back(io->Fd()); + std::vector supp = io->FdSupplements(); + + for ( size_t i = 0; i < supp.size(); ++i ) + read->push_back(supp[i]); if ( io->CanWrite() ) - *write = io->Fd(); + write->push_back(io->Fd()); } double RemoteSerializer::NextTimestamp(double* local_network_time) @@ -3356,6 +3361,15 @@ SocketComm::~SocketComm() static unsigned int first_rtime = 0; +static void fd_vector_set(const std::vector& fds, fd_set* set, int* max) + { + for ( size_t i = 0; i < fds.size(); ++i ) + { + FD_SET(fds[i], set); + *max = ::max(fds[i], *max); + } + } + void SocketComm::Run() { first_rtime = (unsigned int) current_time(true); @@ -3381,6 +3395,7 @@ void SocketComm::Run() FD_SET(io->Fd(), &fd_read); max_fd = io->Fd(); + fd_vector_set(io->FdSupplements(), &fd_read, &max_fd); loop_over_list(peers, i) { @@ -3389,6 +3404,7 @@ void SocketComm::Run() FD_SET(peers[i]->io->Fd(), &fd_read); if ( peers[i]->io->Fd() > max_fd ) max_fd = peers[i]->io->Fd(); + fd_vector_set(peers[i]->io->FdSupplements(), &fd_read, &max_fd); } else { @@ -3439,38 +3455,17 @@ void SocketComm::Run() if ( ! io->IsFillingUp() && shutting_conns_down ) shutting_conns_down = false; - // We cannot rely solely on select() as the there may - // be some data left in our input/output queues. So, we use - // a small timeout for select and check for data - // manually afterwards. - static long selects = 0; static long canwrites = 0; - static long timeouts = 0; ++selects; if ( io->CanWrite() ) ++canwrites; - // FIXME: Fine-tune this (timeouts, flush, etc.) - struct timeval small_timeout; - small_timeout.tv_sec = 0; - small_timeout.tv_usec = - io->CanWrite() || io->CanRead() ? 1 : 10; - -#if 0 - if ( ! io->CanWrite() ) - usleep(10); -#endif - - int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, - &small_timeout); - - if ( a == 0 ) - ++timeouts; + int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, 0); if ( selects % 100000 == 0 ) - Log(fmt("selects=%ld canwrites=%ld timeouts=%ld", selects, canwrites, timeouts)); + Log(fmt("selects=%ld canwrites=%ld", selects, canwrites)); if ( a < 0 ) // Ignore errors for now. diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 9dbfbd9dae..3aa4f91bb0 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -140,7 +140,8 @@ public: void Finish(); // Overidden from IOSource: - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(std::vector* read, std::vector* write, + std::vector* except); virtual double NextTimestamp(double* local_network_time); virtual void Process(); virtual TimerMgr::Tag* GetCurrentTag(); diff --git a/src/Serializer.cc b/src/Serializer.cc index 36b1c74000..0ea79cfafb 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -1067,9 +1067,10 @@ void EventPlayer::GotFunctionCall(const char* name, double time, // We don't replay function calls. } -void EventPlayer::GetFds(int* read, int* write, int* except) +void EventPlayer::GetFds(std::vector* read, std::vector* write, + std::vector* except) { - *read = fd; + read->push_back(fd); } double EventPlayer::NextTimestamp(double* local_network_time) diff --git a/src/Serializer.h b/src/Serializer.h index 543797a7af..0524906d48 100644 --- a/src/Serializer.h +++ b/src/Serializer.h @@ -355,7 +355,8 @@ public: EventPlayer(const char* file); virtual ~EventPlayer(); - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(std::vector* read, std::vector* write, + std::vector* except); virtual double NextTimestamp(double* local_network_time); virtual void Process(); virtual const char* Tag() { return "EventPlayer"; } diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 4491cd42b5..c16b9f4351 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -65,7 +65,8 @@ void Manager::AddMsgThread(MsgThread* thread) msg_threads.push_back(thread); } -void Manager::GetFds(int* read, int* write, int* except) +void Manager::GetFds(std::vector* read, std::vector* write, + std::vector* except) { } diff --git a/src/threading/Manager.h b/src/threading/Manager.h index e839749a91..4f0e53928e 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -103,7 +103,8 @@ protected: /** * Part of the IOSource interface. */ - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(std::vector* read, std::vector* write, + std::vector* except); /** * Part of the IOSource interface. From dde0ce234f4e866a13a626e04fa9c3b518e5fbde Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 2 Sep 2014 14:22:26 -0500 Subject: [PATCH 035/106] Fix possible buffer over-read in DNS TSIG parsing --- src/analyzer/protocol/dns/DNS.cc | 28 +++++++++++------- src/analyzer/protocol/dns/DNS.h | 1 + .../scripts.base.protocols.dns.tsig/out | 2 ++ testing/btest/Traces/dns-tsig.trace | Bin 0 -> 294 bytes .../btest/scripts/base/protocols/dns/tsig.bro | 10 +++++++ 5 files changed, 30 insertions(+), 11 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.protocols.dns.tsig/out create mode 100644 testing/btest/Traces/dns-tsig.trace create mode 100644 testing/btest/scripts/base/protocols/dns/tsig.bro diff --git a/src/analyzer/protocol/dns/DNS.cc b/src/analyzer/protocol/dns/DNS.cc index 1c77fc6b51..8f66d74857 100644 --- a/src/analyzer/protocol/dns/DNS.cc +++ b/src/analyzer/protocol/dns/DNS.cc @@ -701,6 +701,19 @@ int DNS_Interpreter::ParseRR_EDNS(DNS_MsgInfo* msg, return 1; } +void DNS_Interpreter::ExtractOctets(const u_char*& data, int& len, + BroString** p) + { + uint16 dlen = ExtractShort(data, len); + dlen = min(len, static_cast(dlen)); + + if ( p ) + *p = new BroString(data, dlen, 0); + + data += dlen; + len -= dlen; + } + int DNS_Interpreter::ParseRR_TSIG(DNS_MsgInfo* msg, const u_char*& data, int& len, int rdlength, const u_char* msg_start) @@ -718,24 +731,17 @@ int DNS_Interpreter::ParseRR_TSIG(DNS_MsgInfo* msg, uint32 sign_time_sec = ExtractLong(data, len); unsigned int sign_time_msec = ExtractShort(data, len); unsigned int fudge = ExtractShort(data, len); - - u_char request_MAC[16]; - memcpy(request_MAC, data, sizeof(request_MAC)); - - // Here we adjust the size of the requested MAC + u_int16_t - // for length. See RFC 2845, sec 2.3. - int n = sizeof(request_MAC) + sizeof(u_int16_t); - data += n; - len -= n; - + BroString* request_MAC; + ExtractOctets(data, len, &request_MAC); unsigned int orig_id = ExtractShort(data, len); unsigned int rr_error = ExtractShort(data, len); + ExtractOctets(data, len, 0); // Other Data msg->tsig = new TSIG_DATA; msg->tsig->alg_name = new BroString(alg_name, alg_name_end - alg_name, 1); - msg->tsig->sig = new BroString(request_MAC, sizeof(request_MAC), 1); + msg->tsig->sig = request_MAC; msg->tsig->time_s = sign_time_sec; msg->tsig->time_ms = sign_time_msec; msg->tsig->fudge = fudge; diff --git a/src/analyzer/protocol/dns/DNS.h b/src/analyzer/protocol/dns/DNS.h index 569a4ee53a..2d95d979b8 100644 --- a/src/analyzer/protocol/dns/DNS.h +++ b/src/analyzer/protocol/dns/DNS.h @@ -180,6 +180,7 @@ protected: uint16 ExtractShort(const u_char*& data, int& len); uint32 ExtractLong(const u_char*& data, int& len); + void ExtractOctets(const u_char*& data, int& len, BroString** p); int ParseRR_Name(DNS_MsgInfo* msg, const u_char*& data, int& len, int rdlength, diff --git a/testing/btest/Baseline/scripts.base.protocols.dns.tsig/out b/testing/btest/Baseline/scripts.base.protocols.dns.tsig/out new file mode 100644 index 0000000000..ddeb775ec8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.dns.tsig/out @@ -0,0 +1,2 @@ +[query=secret-key, qtype=3, alg_name=hmac-md5.sig-alg.reg.int, sig=F\xbd\xbf1\xef^B6\xb8\xeb\xae1u,\x87\xdb^?, time_signed=21513.794, fudge=300.0, orig_id=9703, rr_error=0, is_query=1] +16 diff --git a/testing/btest/Traces/dns-tsig.trace b/testing/btest/Traces/dns-tsig.trace new file mode 100644 index 0000000000000000000000000000000000000000..9f377b11f7cc509d17f8b0f4e1277b100f52a9d5 GIT binary patch literal 294 zcmca|c+)~A1{MYw`2U}Qff2~L#K#sQ&CSIy9mob@27%ihm)@V)b7I=11sn{n3=Ex0 z`3wvWg4Y6j`<}3J0KuCk22+O8_Egp9K>Z*ifFyfrMPhD2PAYS9elAFmk*hd0xhSgHNOxDd!F=a2#OxI1!NoOufO=r%`D*;M|u<>1D)L{^C+q>WJ zJ(Jmv*Xs;Rb=q&&t3C&51vyhLl#3x8$Od7E10jwDJJVAI=z0UeCz&88f}F}=3UcUj cUDfA}4ImeROu=yG0Un5yo*@HE?2a4d0A!O;yZ`_I literal 0 HcmV?d00001 diff --git a/testing/btest/scripts/base/protocols/dns/tsig.bro b/testing/btest/scripts/base/protocols/dns/tsig.bro new file mode 100644 index 0000000000..79de4cf9f1 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/tsig.bro @@ -0,0 +1,10 @@ +# @TEST-EXEC: bro -r $TRACES/dns-tsig.trace %INPUT >out +# @TEST-EXEC: btest-diff out + +redef dns_skip_all_addl = F; + +event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional) + { + print ans; + print |ans$sig|; + } From d57b161c405b34e329da3232023147a9f2b49444 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 2 Sep 2014 16:18:55 -0500 Subject: [PATCH 036/106] Fix a memory leak when bind() fails due to EADDRINUSE. --- src/RemoteSerializer.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 3e46c5a1d2..965e360690 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -4212,6 +4212,7 @@ bool SocketComm::Listen() safe_close(fd); CloseListenFDs(); listen_next_try = time(0) + bind_retry_interval; + freeaddrinfo(res0); return false; } From 782b4d0eae18f413e44eaf53804cd6167f6ab59c Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 2 Sep 2014 16:22:15 -0500 Subject: [PATCH 037/106] Change EDNS parsing code to use rdlength more cautiously. It shouldn't ever be negative, but if it were, using it to modify the data pointer/length isn't appropriate. --- src/analyzer/protocol/dns/DNS.cc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/analyzer/protocol/dns/DNS.cc b/src/analyzer/protocol/dns/DNS.cc index 8f66d74857..e551351926 100644 --- a/src/analyzer/protocol/dns/DNS.cc +++ b/src/analyzer/protocol/dns/DNS.cc @@ -692,11 +692,6 @@ int DNS_Interpreter::ParseRR_EDNS(DNS_MsgInfo* msg, data += rdlength; len -= rdlength; } - else - { // no data, move on - data += rdlength; - len -= rdlength; - } return 1; } From ff6173721223807b6cf24a4264fd5d50de3e0250 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 2 Sep 2014 16:29:52 -0500 Subject: [PATCH 038/106] Simplify a conditional with equivalent branches. --- src/Val.cc | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Val.cc b/src/Val.cc index 5f605a178e..7c83830bf9 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -465,10 +465,7 @@ void Val::Describe(ODesc* d) const d->SP(); } - if ( d->IsReadable() ) - ValDescribe(d); - else - Val::ValDescribe(d); + ValDescribe(d); } void Val::DescribeReST(ODesc* d) const From 77955d76772eea871f238e4c4cb4da9a3896db43 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 3 Sep 2014 09:51:34 -0500 Subject: [PATCH 039/106] Fix possible abort on writing to a full pipe. --- src/Flare.cc | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/Flare.cc b/src/Flare.cc index 8a0418f631..960e66cbf4 100644 --- a/src/Flare.cc +++ b/src/Flare.cc @@ -1,7 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. #include "Flare.h" -#include "util.h" #include #include #include @@ -16,7 +15,21 @@ Flare::Flare() void Flare::Fire() { char tmp; - safe_write(pipe.WriteFD(), &tmp, 1); + + for ( ; ; ) + { + int n = write(pipe.WriteFD(), &tmp, 1); + + if ( n > 0 ) + // Success -- wrote a byte to pipe. + break; + + if ( n < 0 && errno == EAGAIN ) + // Success -- pipe is full and just need at least one byte in it. + break; + + // Loop because either the byte wasn't written or got EINTR error. + } } void Flare::Extinguish() @@ -25,5 +38,6 @@ void Flare::Extinguish() for ( ; ; ) if ( read(pipe.ReadFD(), &tmp, sizeof(tmp)) == -1 && errno == EAGAIN ) + // Pipe is now drained. break; } From 569853444fd20a958958b40663c21af3ad373bce Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 3 Sep 2014 12:45:38 -0700 Subject: [PATCH 040/106] A set of smaller API tweaks, and polishing. --- src/iosource/Component.cc | 21 +++++++++------------ src/iosource/Component.h | 4 ++-- src/iosource/PktSrc.cc | 19 +++++++++++++++++++ src/iosource/pcap/Source.cc | 11 +---------- 4 files changed, 31 insertions(+), 24 deletions(-) diff --git a/src/iosource/Component.cc b/src/iosource/Component.cc index f54c212352..20232161cd 100644 --- a/src/iosource/Component.cc +++ b/src/iosource/Component.cc @@ -64,10 +64,9 @@ PktSrcComponent::factory_callback PktSrcComponent::Factory() const return factory; } - -void PktSrcComponent::Describe(ODesc* d) const +void PktSrcComponent::DoDescribe(ODesc* d) const { - iosource::Component::Describe(d); + iosource::Component::DoDescribe(d); string prefs; @@ -77,16 +76,16 @@ void PktSrcComponent::Describe(ODesc* d) const if ( prefs.size() ) prefs += ", "; - prefs += *i; + prefs += '"' + *i + '"'; } - d->Add(" (interface prefix"); + d->Add("interface prefix"); if ( prefixes.size() > 1 ) d->Add("es"); - d->Add(": "); + d->Add(" "); d->Add(prefs); - d->Add("; "); + d->Add("; supports "); switch ( type ) { case LIVE: @@ -105,7 +104,6 @@ void PktSrcComponent::Describe(ODesc* d) const reporter->InternalError("unknown PkrSrc type"); } - d->Add(")"); } PktDumperComponent::PktDumperComponent(const std::string& name, const std::string& arg_prefix, factory_callback arg_factory) @@ -141,9 +139,9 @@ bool PktDumperComponent::HandlesPrefix(const string& prefix) const return false; } -void PktDumperComponent::Describe(ODesc* d) const +void PktDumperComponent::DoDescribe(ODesc* d) const { - plugin::Component::Describe(d); + plugin::Component::DoDescribe(d); string prefs; @@ -156,12 +154,11 @@ void PktDumperComponent::Describe(ODesc* d) const prefs += *i; } - d->Add(" (dumper prefix"); + d->Add("dumper prefix"); if ( prefixes.size() > 1 ) d->Add("es"); d->Add(": "); d->Add(prefs); - d->Add(")"); } diff --git a/src/iosource/Component.h b/src/iosource/Component.h index cef500e52f..35e8f612e6 100644 --- a/src/iosource/Component.h +++ b/src/iosource/Component.h @@ -93,7 +93,7 @@ public: * Generates a human-readable description of the component. This goes * into the output of \c "bro -NN". */ - virtual void Describe(ODesc* d) const; + virtual void DoDescribe(ODesc* d) const; private: std::vector prefixes; @@ -140,7 +140,7 @@ public: * Generates a human-readable description of the component. This goes * into the output of \c "bro -NN". */ - virtual void Describe(ODesc* d) const; + virtual void DoDescribe(ODesc* d) const; private: std::vector prefixes; diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc index acde8d5ff6..902aaa04be 100644 --- a/src/iosource/PktSrc.cc +++ b/src/iosource/PktSrc.cc @@ -89,6 +89,16 @@ double PktSrc::CurrentPacketWallClock() void PktSrc::Opened(const Properties& arg_props) { + if ( arg_props.hdr_size < 0 ) + { + char buf[512]; + safe_snprintf(buf, sizeof(buf), + "unknown data link type 0x%x", props.link_type); + Error(buf); + Close(); + return; + } + props = arg_props; SetClosed(false); @@ -98,6 +108,9 @@ void PktSrc::Opened(const Properties& arg_props) return; } + if ( props.is_live ) + Info(fmt("listening on %s, capture length %d bytes\n", props.path.c_str(), SnapLen())); + DBG_LOG(DBG_PKTIO, "Opened source %s", props.path.c_str()); } @@ -433,6 +446,9 @@ bool PktSrc::ExtractNextPacketInternal() bool PktSrc::PrecompileBPFFilter(int index, const std::string& filter) { + if ( index < 0 ) + return false; + char errbuf[PCAP_ERRBUF_SIZE]; // Compile filter. @@ -465,6 +481,9 @@ bool PktSrc::PrecompileBPFFilter(int index, const std::string& filter) BPF_Program* PktSrc::GetBPFFilter(int index) { + if ( index < 0 ) + return 0; + HashKey* hash = new HashKey(HashKey(bro_int_t(index))); BPF_Program* code = filters.Lookup(hash); delete hash; diff --git a/src/iosource/pcap/Source.cc b/src/iosource/pcap/Source.cc index 7cca94122b..96e0bb48e5 100644 --- a/src/iosource/pcap/Source.cc +++ b/src/iosource/pcap/Source.cc @@ -108,9 +108,8 @@ void PcapSource::OpenLive() return; props.is_live = true; - Opened(props); - Info(fmt("listening on %s, capture length %d bytes\n", props.path.c_str(), SnapLen())); + Opened(props); } void PcapSource::OpenOffline() @@ -264,14 +263,6 @@ void PcapSource::SetHdrSize() props.link_type = pcap_datalink(pd); props.hdr_size = GetLinkHeaderSize(props.link_type); - - if ( props.hdr_size < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "unknown data link type 0x%x", props.link_type); - Error(errbuf); - Close(); - } } iosource::PktSrc* PcapSource::Instantiate(const std::string& path, bool is_live) From 09214652975c5f9f5b92bcee7c12f325a3a7b37b Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 3 Sep 2014 16:23:13 -0500 Subject: [PATCH 041/106] Fix Pipe copy/assignment to make a copy of flags. --- src/Pipe.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Pipe.cc b/src/Pipe.cc index 51298d07b6..3f60409fdb 100644 --- a/src/Pipe.cc +++ b/src/Pipe.cc @@ -64,6 +64,8 @@ Pipe::Pipe(const Pipe& other) { fds[0] = dup_or_fail(other.fds[0], other.flags[0]); fds[1] = dup_or_fail(other.fds[1], other.flags[1]); + flags[0] = other.flags[0]; + flags[1] = other.flags[1]; } Pipe& Pipe::operator=(const Pipe& other) @@ -75,5 +77,7 @@ Pipe& Pipe::operator=(const Pipe& other) close(fds[1]); fds[0] = dup_or_fail(other.fds[0], other.flags[0]); fds[1] = dup_or_fail(other.fds[1], other.flags[1]); + flags[0] = other.flags[0]; + flags[1] = other.flags[1]; return *this; } From 43e63daa452f6933622d561c1d496fe4f36f2d3b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 3 Sep 2014 17:37:35 -0700 Subject: [PATCH 042/106] Fixing Bro-level BPF filtering. --- src/iosource/BPF_Program.cc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/iosource/BPF_Program.cc b/src/iosource/BPF_Program.cc index 064e9a743e..70469c97e7 100644 --- a/src/iosource/BPF_Program.cc +++ b/src/iosource/BPF_Program.cc @@ -58,7 +58,14 @@ int pcap_compile_nopcap(int snaplen_arg, int linktype_arg, } #endif -BPF_Program::BPF_Program() : m_compiled(), m_program() +// Simple heuristic to identify filters that always match, so that we can +// skip the filtering in that case. "ip or not ip" is Bro's default filter. +static bool filter_matches_anything(const char *filter) + { + return (! filter) || strlen(filter) == 0 || strcmp(filter, "ip or not ip") == 0; + } + +BPF_Program::BPF_Program() : m_compiled(), m_matches_anything(false), m_program() { } @@ -86,7 +93,7 @@ bool BPF_Program::Compile(pcap_t* pcap, const char* filter, uint32 netmask, } m_compiled = true; - m_matches_anything = (strlen(filter) == 0 || strcmp(filter, "ip or not ip") == 0); + m_matches_anything = filter_matches_anything(filter); return true; } @@ -114,7 +121,10 @@ bool BPF_Program::Compile(int snaplen, int linktype, const char* filter, #endif if ( err == 0 ) + { m_compiled = true; + m_matches_anything = filter_matches_anything(filter); + } return err == 0; } From 2d8368fee90bf53398ba572227c74a697033f2e4 Mon Sep 17 00:00:00 2001 From: Johanna Amann Date: Wed, 3 Sep 2014 22:07:21 -0700 Subject: [PATCH 043/106] fix null pointer dereference in ocsp verification code in case no certificate is sent as part as the ocsp reply. Addresses BIT-1212 There is an additional issue here that prevents the correct verification of proofs in quite a few cases; this will be addressed in a separate commit. --- src/file_analysis/analyzer/x509/functions.bif | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/file_analysis/analyzer/x509/functions.bif b/src/file_analysis/analyzer/x509/functions.bif index 9a8a8e78b7..d7903b4921 100644 --- a/src/file_analysis/analyzer/x509/functions.bif +++ b/src/file_analysis/analyzer/x509/functions.bif @@ -250,6 +250,17 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c // inject the certificates in the certificate list of the OCSP reply, they actually are used during // the lookup. // Yay. + + if ( basic->certs == 0 ) + { + basic->certs = sk_X509_new_null(); + if ( !basic->certs ) + { + rval = x509_result_record(-1, "Could not allocate basic x509 stack"); + goto x509_ocsp_cleanup; + } + } + issuer_certificate = 0; for ( int i = 0; i < sk_X509_num(untrusted_certs); i++) { From 5c9a7a92a49d5854a2732c03b72d5517bbf91888 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 4 Sep 2014 13:32:24 -0500 Subject: [PATCH 044/106] Add more script language reference documentation Added new sections on operators, statements, and directives. Also improved the documentation on types and attributes by providing more examples and added a chart on the top of each page with links to each type and attribute for easier access to the information. --- doc/ext/bro.py | 7 + doc/script-reference/attributes.rst | 179 +++++++-- doc/script-reference/directives.rst | 173 ++++++++ doc/script-reference/index.rst | 3 + doc/script-reference/operators.rst | 179 +++++++++ doc/script-reference/statements.rst | 602 ++++++++++++++++++++++++++++ doc/script-reference/types.rst | 523 +++++++++++++----------- 7 files changed, 1385 insertions(+), 281 deletions(-) create mode 100644 doc/script-reference/directives.rst create mode 100644 doc/script-reference/operators.rst create mode 100644 doc/script-reference/statements.rst diff --git a/doc/ext/bro.py b/doc/ext/bro.py index 9295c63312..1df4a518c2 100644 --- a/doc/ext/bro.py +++ b/doc/ext/bro.py @@ -176,6 +176,10 @@ class BroIdentifier(BroGeneric): def get_index_text(self, objectname, name): return name +class BroKeyword(BroGeneric): + def get_index_text(self, objectname, name): + return name + class BroAttribute(BroGeneric): def get_index_text(self, objectname, name): return _('%s (attribute)') % (name) @@ -213,6 +217,7 @@ class BroDomain(Domain): 'type': ObjType(l_('type'), 'type'), 'namespace': ObjType(l_('namespace'), 'namespace'), 'id': ObjType(l_('id'), 'id'), + 'keyword': ObjType(l_('keyword'), 'keyword'), 'enum': ObjType(l_('enum'), 'enum'), 'attr': ObjType(l_('attr'), 'attr'), } @@ -221,6 +226,7 @@ class BroDomain(Domain): 'type': BroGeneric, 'namespace': BroNamespace, 'id': BroIdentifier, + 'keyword': BroKeyword, 'enum': BroEnum, 'attr': BroAttribute, } @@ -229,6 +235,7 @@ class BroDomain(Domain): 'type': XRefRole(), 'namespace': XRefRole(), 'id': XRefRole(), + 'keyword': XRefRole(), 'enum': XRefRole(), 'attr': XRefRole(), 'see': XRefRole(), diff --git a/doc/script-reference/attributes.rst b/doc/script-reference/attributes.rst index ca66ab2112..0160499fb9 100644 --- a/doc/script-reference/attributes.rst +++ b/doc/script-reference/attributes.rst @@ -1,38 +1,120 @@ Attributes ========== -Attributes occur at the end of type or event declarations and change their -behavior. The syntax is ``&key`` or ``&key=val``, e.g., ``type T: -set[count] &read_expire=5min`` or ``event foo() &priority=-3``. The Bro -scripting language supports the following attributes. +The Bro scripting language supports the following attributes. -.. bro:attr:: &optional ++-----------------------------+-----------------------------------------------+ +| Name | Description | ++=============================+===============================================+ +| :bro:attr:`&redef` |Redefine a global constant or extend a type. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&priority` |Specify priority for event handler or hook. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&log` |Mark a record field to be written to a log. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&optional` |Allow a record field value to be missing. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&default` |Specifies a default value. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&add_func` |Specify a function to call for each "redef +=".| ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&delete_func` |Same as "&add_func", except for "redef -=". | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&expire_func` |Specify a function to call when container | +| |element expires. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&read_expire` |Specify a read timeout interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&write_expire` |Specify a write timeout interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&create_expire` |Specify a creation timeout interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&synchronized` |Synchronize a variable across nodes. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&persistent` |Make a variable persistent (written to disk). | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&rotate_interval`|Rotate a file after specified interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&rotate_size` |Rotate a file after specified file size. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&encrypt` |Encrypt a file when writing to disk. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&raw_output` |Open file in raw mode (chars. are not escaped).| ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&mergeable` |Prefer set union for synchronized state. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&group` |Group event handlers to activate/deactivate. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&error_handler` |Used internally for reporter framework events. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&type_column` |Used by input framework for "port" type. | ++-----------------------------+-----------------------------------------------+ - Allows a record field to be missing. For example the type ``record { - a: addr; b: port &optional; }`` could be instantiated both as - singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``. - -.. bro:attr:: &default - - Uses a default value for a record field, a function/hook/event - parameter, or container elements. For example, ``table[int] of - string &default="foo"`` would create a table that returns the - :bro:type:`string` ``"foo"`` for any non-existing index. +Here is a more detailed explanation of each attribute: .. bro:attr:: &redef - Allows for redefinition of initial object values. This is typically - used with constants, for example, ``const clever = T &redef;`` would - allow the constant to be redefined at some later point during script - execution. + Allows for redefinition of initial values of global objects declared as + constant. -.. bro:attr:: &rotate_interval + In this example, the constant (assuming it is global) can be redefined + with a :bro:keyword:`redef` at some later point:: - Rotates a file after a specified interval. + const clever = T &redef; -.. bro:attr:: &rotate_size +.. bro:attr:: &priority - Rotates a file after it has reached a given size in bytes. + Specifies the execution priority (as a signed integer) of a hook or + event handler. Higher values are executed before lower ones. The + default value is 0. Example:: + + event bro_init() &priority=10 + { + print "high priority"; + } + +.. bro:attr:: &log + + Writes a :bro:type:`record` field to the associated log stream. + +.. bro:attr:: &optional + + Allows a record field value to be missing (i.e., neither initialized nor + ever assigned a value). + + In this example, the record could be instantiated with either + "myrec($a=127.0.0.1)" or "myrec($a=127.0.0.1, $b=80/tcp)":: + + type myrec: record { a: addr; b: port &optional; }; + + The ``?$`` operator can be used to check if a record field has a value or + not (it returns a ``bool`` value of ``T`` if the field has a value, + and ``F`` if not). + +.. bro:attr:: &default + + Specifies a default value for a record field, container element, or a + function/hook/event parameter. + + In this example, the record could be instantiated with either + "myrec($a=5, $c=3.14)" or "myrec($a=5, $b=53/udp, $c=3.14)":: + + type myrec: record { a: count; b: port &default=80/tcp; c: double; }; + + In this example, the table will return the string ``"foo"`` for any + attempted access to a non-existing index:: + + global mytable: table[count] of string &default="foo"; + + When used with function/hook/event parameters, all of the parameters + with the "&default" attribute must come after all other parameters. + For example, the following function could be called either as "myfunc(5)" + or as "myfunc(5, 53/udp)":: + + function myfunc(a: count, b: port &default=80/tcp) + { + print a, b; + } .. bro:attr:: &add_func @@ -46,8 +128,8 @@ scripting language supports the following attributes. .. bro:attr:: &delete_func - Same as &add_func, except for "redef" declarations that use the "-=" - operator. + Same as :bro:attr:`&add_func`, except for :bro:keyword:`redef` declarations + that use the "-=" operator. .. bro:attr:: &expire_func @@ -76,23 +158,29 @@ scripting language supports the following attributes. is, the element expires after the given amount of time since it has been inserted into the container, regardless of any reads or writes. -.. bro:attr:: &persistent - - Makes a variable persistent, i.e., its value is written to disk (per - default at shutdown time). - .. bro:attr:: &synchronized Synchronizes variable accesses across nodes. The value of a ``&synchronized`` variable is automatically propagated to all peers when it changes. +.. bro:attr:: &persistent + + Makes a variable persistent, i.e., its value is written to disk (per + default at shutdown time). + +.. bro:attr:: &rotate_interval + + Rotates a file after a specified interval. + +.. bro:attr:: &rotate_size + + Rotates a file after it has reached a given size in bytes. + .. bro:attr:: &encrypt Encrypts files right before writing them to disk. -.. TODO: needs to be documented in more detail. - .. bro:attr:: &raw_output Opens a file in raw mode, i.e., non-ASCII characters are not @@ -108,21 +196,11 @@ scripting language supports the following attributes. inconsistencies and can be avoided by unifying the two sets, rather than merely overwriting the old value. -.. bro:attr:: &priority - - Specifies the execution priority (as a signed integer) of a hook or - event handler. Higher values are executed before lower ones. The - default value is 0. - .. bro:attr:: &group Groups event handlers such that those in the same group can be jointly activated or deactivated. -.. bro:attr:: &log - - Writes a record field to the associated log stream. - .. bro:attr:: &error_handler Internally set on the events that are associated with the reporter @@ -135,5 +213,20 @@ scripting language supports the following attributes. .. bro:attr:: &type_column Used by the input framework. It can be used on columns of type - :bro:type:`port` and specifies the name of an additional column in + :bro:type:`port` (such a column only contains the port number) and + specifies the name of an additional column in the input file which specifies the protocol of the port (tcp/udp/icmp). + + In the following example, the input file would contain four columns + named "ip", "srcp", "proto", and "msg":: + + type Idx: record { + ip: addr; + }; + + + type Val: record { + srcp: port &type_column = "proto"; + msg: string; + }; + diff --git a/doc/script-reference/directives.rst b/doc/script-reference/directives.rst new file mode 100644 index 0000000000..e513e93911 --- /dev/null +++ b/doc/script-reference/directives.rst @@ -0,0 +1,173 @@ +Directives +========== + +The Bro scripting language supports a number of directives that can +affect which scripts will be loaded or which lines in a script will be +executed. Directives are evaluated before script execution begins. + +.. bro:keyword:: @DEBUG + + TODO + + +.. bro:keyword:: @DIR + + Expands to the directory pathname where the current script is located. + + Example:: + + print "Directory:", @DIR + + +.. bro:keyword:: @FILENAME + + Expands to the filename of the current script. + + Example:: + + print "File:", @FILENAME + +.. bro:keyword:: @load + + Loads the specified Bro script, specified as the relative pathname + of the file (relative to one of the directories in Bro's file search path). + If the Bro script filename ends with ".bro", then you don't need to + specify the file extension. The filename cannot contain any whitespace. + + In this example, Bro will try to load a script + "policy/misc/capture-loss.bro" by looking in each directory in the file + search path (the file search path can be changed by setting the BROPATH + environment variable):: + + @load policy/misc/capture-loss + + If you specify the name of a directory instead of a filename, then + Bro will try to load a file in that directory called "__load__.bro" + (presumably that file will contain additional "@load" directives). + + In this example, Bro will try to load a file "tuning/defaults/__load__.bro" + by looking in each directory in the file search path:: + + @load tuning/defaults + + The purpose of this directive is to ensure that all script dependencies + are satisfied, and to avoid having to list every needed Bro script + on the command-line. Bro keeps track of which scripts have been + loaded, so it is not an error to load a script more than once (once + a script has been loaded, any subsequent "@load" directives + for that script are ignored). + + +.. bro:keyword:: @load-sigs + + This works similarly to "@load", except that in this case the filename + represents a signature file (not a Bro script). If the signature filename + ends with ".sig", then you don't need to specify the file extension + in the "@load-sigs" directive. The filename cannot contain any + whitespace. + + In this example, Bro will try to load a signature file + "base/protocols/ssl/dpd.sig":: + + @load-sigs base/protocols/ssl/dpd + + The format for a signature file is explained in the documentation for the + `Signature Framework <../frameworks/signatures.html>`_. + + +.. bro:keyword:: @unload + + This specifies a Bro script that we don't want to load (so a subsequent + attempt to load the specified script will be skipped). However, + if the specified script has already been loaded, then this directive + has no affect. + + In the following example, if the "policy/misc/capture-loss.bro" script + has not been loaded yet, then Bro will not load it:: + + @unload policy/misc/capture-loss + + +.. bro:keyword:: @prefixes + + Specifies a filename prefix to use when looking for script files + to load automatically. The prefix cannot contain any whitespace. + + In the following example, the prefix "cluster" is used and all prefixes + that were previously specified are not used:: + + @prefixes = cluster + + In the following example, the prefix "cluster-manager" is used in + addition to any previously-specified prefixes:: + + @prefixes += cluster-manager + + The way this works is that after Bro parses all script files, then for each + loaded script Bro will take the absolute path of the script and then + it removes the portion of the directory path that is in Bro's file + search path. Then it replaces each "/" character with a period "." + and then prepends the prefix (specified in the "@prefixes" directive) + followed by a period. The resulting filename is searched for in each + directory in Bro's file search path. If a matching file is found, then + the file is automatically loaded. + + For example, if a script called "local.bro" has been loaded, and a prefix + of "test" was specified, then Bro will look for a file named + "test.local.bro" in each directory of Bro's file search path. + + An alternative way to specify prefixes is to use the "-p" Bro + command-line option. + +.. bro:keyword:: @if + + The specified expression must evaluate to type :bro:type:`bool`. If the + value is true, then the following script lines (up to the next "@else" + or "@endif") are available to be executed. + + Example:: + + @if ( ver == 2 ) + print "version 2 detected"; + @endif + +.. bro:keyword:: @ifdef + + This works like "@if", except that the result is true if the specified + identifier is defined. + + Example:: + + @ifdef ( pi ) + print "pi is defined"; + @endif + +.. bro:keyword:: @ifndef + + This works exactly like "@ifdef", except that the result is true if the + specified identifier is not defined. + + Example:: + + @ifndef ( pi ) + print "pi is not defined"; + @endif + +.. bro:keyword:: @else + + This directive is optional after an "@if", "@ifdef", or + "@ifndef". If present, it provides an else clause. + + Example:: + + @ifdef ( pi ) + print "pi is defined"; + @else + print "pi is not defined"; + @endif + +.. bro:keyword:: @endif + + This directive is required to terminate each "@if", "@ifdef", or + "@ifndef". + diff --git a/doc/script-reference/index.rst b/doc/script-reference/index.rst index a2c6f0a24f..dc11447c5f 100644 --- a/doc/script-reference/index.rst +++ b/doc/script-reference/index.rst @@ -5,8 +5,11 @@ Script Reference .. toctree:: :maxdepth: 1 + operators types attributes + statements + directives notices proto-analyzers file-analyzers diff --git a/doc/script-reference/operators.rst b/doc/script-reference/operators.rst new file mode 100644 index 0000000000..7fa52cf4b2 --- /dev/null +++ b/doc/script-reference/operators.rst @@ -0,0 +1,179 @@ +Operators +========= + +The Bro scripting language supports the following operators. Note that +each data type only supports a subset of these operators. For more +details, see the documentation about the `data types `_. + +Relational operators +-------------------- + +The relational operators evaluate to type :bro:type:`bool`. + ++------------------------------+--------------+ +| Name | Syntax | ++==============================+==============+ +| Equality | *a* == *b* | ++------------------------------+--------------+ +| Inequality | *a* != *b* | ++------------------------------+--------------+ +| Less than | *a* < *b* | ++------------------------------+--------------+ +| Less than or equal | *a* <= *b* | ++------------------------------+--------------+ +| Greater than | *a* > *b* | ++------------------------------+--------------+ +| Greater than or equal | *a* >= *b* | ++------------------------------+--------------+ + + +Logical operators +----------------- + +The logical operators require operands of type :bro:type:`bool`, and +evaluate to type :bro:type:`bool`. + ++------------------------------+--------------+ +| Name | Syntax | ++==============================+==============+ +| Logical AND | *a* && *b* | ++------------------------------+--------------+ +| Logical OR | *a* \|\| *b* | ++------------------------------+--------------+ +| Logical NOT | ! *a* | ++------------------------------+--------------+ + + +Arithmetic operators +-------------------- + ++------------------------------+-------------+-------------------------------+ +| Name | Syntax | Notes | ++==============================+=============+===============================+ +| Addition | *a* + *b* | If operands are strings, then | +| | | this performs string | +| | | concatenation. | ++------------------------------+-------------+-------------------------------+ +| Subtraction | *a* - *b* | | ++------------------------------+-------------+-------------------------------+ +| Multiplication | *a* \* *b* | | ++------------------------------+-------------+-------------------------------+ +| Division | *a* / *b* | | ++------------------------------+-------------+-------------------------------+ +| Modulo | *a* % *b* | Operand types cannot be | +| | | double. | ++------------------------------+-------------+-------------------------------+ +| Unary plus | \+ *a* | | ++------------------------------+-------------+-------------------------------+ +| Unary minus | \- *a* | | ++------------------------------+-------------+-------------------------------+ +| Pre-increment | ++ *a* | Operand type cannot be | +| | | double. | ++------------------------------+-------------+-------------------------------+ +| Pre-decrement | ``--`` *a* | Operand type cannot be | +| | | double. | ++------------------------------+-------------+-------------------------------+ +| Absolute value | \| *a* \| | If operand is string, set, | +| | | table, or vector, this | +| | | evaluates to number | +| | | of elements. | ++------------------------------+-------------+-------------------------------+ + + +Assignment operators +-------------------- + +The assignment operators evaluate to the result of the assignment. + ++------------------------------+-------------+ +| Name | Syntax | ++==============================+=============+ +| Assignment | *a* = *b* | ++------------------------------+-------------+ +| Addition assignment | *a* += *b* | ++------------------------------+-------------+ +| Subtraction assignment | *a* -= *b* | ++------------------------------+-------------+ + + +Record field operators +---------------------- + +The record field operators take a :bro:type:`record` as the first operand, +and a field name as the second operand. For both operators, the specified +field name must be in the declaration of the record type. + ++------------------------------+-------------+-------------------------------+ +| Name | Syntax | Notes | ++==============================+=============+===============================+ +| Field access | *a* $ *b* | | ++------------------------------+-------------+-------------------------------+ +| Field value existence test | *a* ?$ *b* | Evaluates to type "bool". | +| | | True if the specified field | +| | | has been assigned a value, or | +| | | false if not. | ++------------------------------+-------------+-------------------------------+ + + +Other operators +--------------- + ++--------------------------------+-------------------+------------------------+ +| Name | Syntax | Notes | ++================================+===================+========================+ +| Membership test | *a* in *b* |Evaluates to type | +| | |"bool". Do not | +| | |confuse this use of "in"| +| | |with that used in a | +| | |:bro:keyword:`for` | +| | |statement. | ++--------------------------------+-------------------+------------------------+ +| Non-membership test | *a* !in *b* |This is the logical NOT | +| | |of the "in" operator. | +| | |For example: "a !in b" | +| | |is equivalent to | +| | |"!(a in b)". | ++--------------------------------+-------------------+------------------------+ +| Table or vector element access | *a* [ *b* ] |This operator can also | +| | |be used with a set, but | +| | |only with the | +| | |:bro:keyword:`add` or | +| | |:bro:keyword:`delete` | +| | |statement. | ++--------------------------------+-------------------+------------------------+ +| Substring extraction | *a* [ *b* : *c* ] |See the | +| | |:bro:type:`string` type | +| | |for more details. | ++--------------------------------+-------------------+------------------------+ +| Create a deep copy | copy ( *a* ) |This is relevant only | +| | |for data types that are | +| | |assigned by reference, | +| | |such as "vector", "set",| +| | |"table", and "record". | ++--------------------------------+-------------------+------------------------+ +| Module namespace access | *a* \:\: *b* |The first operand is the| +| | |module name, and the | +| | |second operand is an | +| | |identifier that refers | +| | |to a global variable, | +| | |enumeration constant, or| +| | |user-defined type that | +| | |was exported from the | +| | |module. | ++--------------------------------+-------------------+------------------------+ +| Conditional | *a* ? *b* : *c* |The first operand must | +| | |evaluate to a "bool" | +| | |type. If true, then the| +| | |second expression is | +| | |evaluated and is the | +| | |result of the entire | +| | |expression. Otherwise, | +| | |the third expression is | +| | |evaluated and is the | +| | |result of the entire | +| | |expression. The types of| +| | |the second and third | +| | |operands must be | +| | |compatible. | ++--------------------------------+-------------------+------------------------+ + diff --git a/doc/script-reference/statements.rst b/doc/script-reference/statements.rst new file mode 100644 index 0000000000..9c8b8de20d --- /dev/null +++ b/doc/script-reference/statements.rst @@ -0,0 +1,602 @@ +Declarations and Statements +=========================== + +The Bro scripting language supports the following declarations and +statements. + + +Declarations +~~~~~~~~~~~~ + ++----------------------------+-----------------------------+ +| Name | Description | ++============================+=============================+ +| :bro:keyword:`module` | Change the current module | ++----------------------------+-----------------------------+ +| :bro:keyword:`export` | Export identifiers from the | +| | current module | ++----------------------------+-----------------------------+ +| :bro:keyword:`global` | Declare a global variable | ++----------------------------+-----------------------------+ +| :bro:keyword:`const` | Declare a constant | ++----------------------------+-----------------------------+ +| :bro:keyword:`type` | Declare a user-defined type | ++----------------------------+-----------------------------+ +| :bro:keyword:`redef` | Redefine a global value or | +| | extend a user-defined type | ++----------------------------+-----------------------------+ +| `function/event/hook`_ | Declare a function, event | +| | handler, or hook | ++----------------------------+-----------------------------+ + +Statements +~~~~~~~~~~ + ++----------------------------+------------------------+ +| Name | Description | ++============================+========================+ +| :bro:keyword:`local` | Declare a local | +| | variable | ++----------------------------+------------------------+ +| :bro:keyword:`add`, | Add or delete | +| :bro:keyword:`delete` | elements | ++----------------------------+------------------------+ +| :bro:keyword:`print` | Print to stdout or a | +| | file | ++----------------------------+------------------------+ +| :bro:keyword:`for`, | Loop over each | +| :bro:keyword:`next`, | element in a container | +| :bro:keyword:`break` | object | ++----------------------------+------------------------+ +| :bro:keyword:`if` | Evaluate boolean | +| | expression and if true,| +| | execute a statement | ++----------------------------+------------------------+ +| :bro:keyword:`switch`, | Evaluate expression | +| :bro:keyword:`break`, | and execute statement | +| :bro:keyword:`fallthrough` | with a matching value | ++----------------------------+------------------------+ +| :bro:keyword:`when` | Asynchronous execution | ++----------------------------+------------------------+ +| :bro:keyword:`event`, | Invoke or schedule | +| :bro:keyword:`schedule` | an event handler | ++----------------------------+------------------------+ +| :bro:keyword:`return` | Return from function, | +| | hook, or event handler | ++----------------------------+------------------------+ + +Declarations +------------ + +The following global declarations cannot occur within a function, hook, or +event handler. Also, these declarations cannot appear after any statements +that are outside of a function, hook, or event handler. + +.. bro:keyword:: module + + The "module" keyword is used to change the current module. This + affects the scope of any subsequently declared global identifiers. + + Example:: + + module mymodule; + + If a global identifier is declared after a "module" declaration, + then its scope ends at the end of the current Bro script or at the + next "module" declaration, whichever comes first. However, if a + global identifier is declared after a "module" declaration, but inside + an :bro:keyword:`export` block, then its scope ends at the end of the + last loaded Bro script, but it must be referenced using the namespace + operator (``::``) in other modules. + + There can be any number of "module" declarations in a Bro script. + The same "module" declaration can appear in any number of different + Bro scripts. + + +.. bro:keyword:: export + + An "export" block contains one or more declarations + (no statements are allowed in an "export" block) that the current + module is exporting. This enables these global identifiers to be visible + in other modules (but not prior to their declaration) via the namespace + operator (``::``). See the :bro:keyword:`module` keyword for a more + detailed explanation. + + Example:: + + export { + redef enum Log::ID += { LOG }; + + type Info: record { + ts: time &log; + uid: string &log; + }; + + const conntime = 30sec &redef; + } + + Note that the braces in an "export" block are always required + (they do not indicate a compound statement). Also, no semicolon is + needed to terminate an "export" block. + +.. bro:keyword:: global + + Variables declared with the "global" keyword will be global. + If a type is not specified, then an initializer is required so that + the type can be inferred. Likewise, if an initializer is not supplied, + then the type must be specified. Example:: + + global pi = 3.14; + global hosts: set[addr]; + global ciphers: table[string] of string = table(); + + Variable declarations outside of any function, hook, or event handler are + required to use this keyword (unless they are declared with the + :bro:keyword:`const` keyword). Definitions of functions, hooks, and + event handlers are not allowed to use the "global" + keyword (they already have global scope), except function declarations + where no function body is supplied use the "global" keyword. + + The scope of a global variable begins where the declaration is located, + and extends through all remaining Bro scripts that are loaded (however, + see the :bro:keyword:`module` keyword for an explanation of how modules + change the visibility of global identifiers). + + +.. bro:keyword:: const + + A variable declared with the "const" keyword will be constant. + Variables declared as constant are required to be initialized at the + time of declaration. Example:: + + const pi = 3.14; + const ssh_port: port = 22/tcp; + + The value of a constant cannot be changed later (the only + exception is if the variable is global and has the :bro:attr:`&redef` + attribute, then its value can be changed only with a :bro:keyword:`redef`). + + The scope of a constant is local if the declaration is in a + function, hook, or event handler, and global otherwise. + Note that the "const" keyword cannot be used with either the "local" + or "global" keywords (i.e., "const" replaces "local" and "global"). + + +.. bro:keyword:: type + + The "type" keyword is used to declare a user-defined type. The name + of this new type has global scope and can be used anywhere a built-in + type name can occur. + + The "type" keyword is most commonly used when defining a + :bro:type:`record` or an :bro:type:`enum`, but is also useful when + dealing with more complex types. + + Example:: + + type mytype: table[count] of table[addr, port] of string; + global myvar: mytype; + +.. bro:keyword:: redef + + There are three ways that "redef" can be used: to change the value of + a global variable, to extend a record type or enum type, or to specify + a new event handler body that replaces all those that were previously + defined. + + If you're using "redef" to change a global variable (defined using either + :bro:keyword:`const` or :bro:keyword:`global`), then the variable that you + want to change must have the :bro:attr:`&redef` attribute. If the variable + you're changing is a table, set, or pattern, you can use ``+=`` to add + new elements, or you can use ``=`` to specify a new value (all previous + contents of the object are removed). If the variable you're changing is a + set or table, then you can use the ``-=`` operator to remove the + specified elements (nothing happens for specified elements that don't + exist). If the variable you are changing is not a table, set, or pattern, + then you must use the ``=`` operator. + + Examples:: + + redef pi = 3.14; + + If you're using "redef" to extend a record or enum, then you must + use the ``+=`` assignment operator. + For an enum, you can add more enumeration constants, and for a record + you can add more record fields (however, each record field in the "redef" + must have either the :bro:attr:`&optional` or :bro:attr:`&default` + attribute). + + Examples:: + + redef enum color += { Blue, Red }; + redef record MyRecord += { n2:int &optional; s2:string &optional; }; + + If you're using "redef" to specify a new event handler body that + replaces all those that were previously defined (i.e., any subsequently + defined event handler body will not be affected by this "redef"), then + the syntax is the same as a regular event handler definition except for + the presence of the "redef" keyword. + + Example:: + + redef event myevent(s:string) { print "Redefined", s; } + + +.. _function/event/hook: + +**function/event/hook** + For details on how to declare a :bro:type:`function`, + :bro:type:`event` handler, or :bro:type:`hook`, + see the documentation for those types. + + +Statements +---------- + +Each statement in a Bro script must be terminated with a semicolon (with a +few exceptions noted below). An individual statement can span multiple +lines. + +All statements (except those contained within a function, hook, or event +handler) must appear after all global declarations. + +Here are the statements that the Bro scripting language supports. + +.. bro:keyword:: add + + The "add" statement is used to add an element to a :bro:type:`set`. + Nothing happens if the specified element already exists in the set. + + Example:: + + local myset: set[string]; + add myset["test"]; + +.. bro:keyword:: break + + The "break" statement is used to break out of a :bro:keyword:`switch` or + :bro:keyword:`for` statement. + + +.. bro:keyword:: delete + + The "delete" statement is used to remove an element from a + :bro:type:`set` or :bro:type:`table`. Nothing happens if the + specified element does not exist in the set or table. + + Example:: + + local myset = set("this", "test"); + local mytable = table(["key1"] = 80/tcp, ["key2"] = 53/udp); + delete myset["test"]; + delete mytable["key1"]; + +.. bro:keyword:: event + + The "event" statement immediately queues invocation of an event handler. + + Example:: + + event myevent("test", 5); + +.. bro:keyword:: fallthrough + + The "fallthrough" statement can be used as the last statement in a + "case" block to indicate that execution should continue into the + next "case" or "default" label. + + For an example, see the :bro:keyword:`switch` statement. + +.. bro:keyword:: for + + A "for" loop iterates over each element in a string, set, vector, or + table and executes a statement for each iteration. + + For each iteration of the loop, a loop variable will be assigned to an + element if the expression evaluates to a string or set, or an index if + the expression evaluates to a vector or table. Then the statement + is executed. However, the statement will not be executed if the expression + evaluates to an object with no elements. + + If the expression is a table or a set with more than one index, then the + loop variable must be specified as a comma-separated list of different + loop variables (one for each index), enclosed in brackets. + + A :bro:keyword:`break` statement can be used at any time to immediately + terminate the "for" loop, and a :bro:keyword:`next` statement can be + used to skip to the next loop iteration. + + Note that the loop variable in a "for" statement is not allowed to be + a global variable, and it does not need to be declared prior to the "for" + statement. The type will be inferred from the elements of the + expression. + + Example:: + + local myset = set(80/tcp, 81/tcp); + local mytable = table([10.0.0.1, 80/tcp]="s1", [10.0.0.2, 81/tcp]="s2"); + + for (p in myset) + print p; + + for ([i,j] in mytable) { + if (mytable[i,j] == "done") + break; + if (mytable[i,j] == "skip") + next; + print i,j; + } + + +.. bro:keyword:: if + + Evaluates a given expression, which must yield a :bro:type:`bool` value. + If true, then a specified statement is executed. If false, then + the statement is not executed. Example:: + + if ( x == 2 ) print "x is 2"; + + + However, if the expression evaluates to false and if an "else" is + provided, then the statement following the "else" is executed. Example:: + + if ( x == 2 ) + print "x is 2"; + else + print "x is not 2"; + +.. bro:keyword:: local + + A variable declared with the "local" keyword will be local. If a type + is not specified, then an initializer is required so that the type can + be inferred. Likewise, if an initializer is not supplied, then the + type must be specified. + + Examples:: + + local x1 = 5.7; + local x2: double; + local x3: double = 5.7; + + Variable declarations inside a function, hook, or event handler are + required to use this keyword (the only two exceptions are variables + declared with :bro:keyword:`const`, and variables implicitly declared in a + :bro:keyword:`for` statement). + + The scope of a local variable starts at the location where it is declared + and persists to the end of the function, hook, + or event handler in which it is declared (this is true even if the + local variable was declared within a `compound statement`_ or is the loop + variable in a "for" statement). + + +.. bro:keyword:: next + + The "next" statement can only appear within a :bro:keyword:`for` loop. + It causes execution to skip to the next iteration. + + For an example, see the :bro:keyword:`for` statement. + +.. bro:keyword:: print + + The "print" statement takes a comma-separated list of one or more + expressions. Each expression in the list is evaluated and then converted + to a string. Then each string is printed, with each string separated by + a comma in the output. + + Examples:: + + print 3.14; + print "Results", x, y; + + By default, the "print" statement writes to the standard + output (stdout). However, if the first expression is of type + :bro:type:`file`, then "print" writes to that file. + + If a string contains non-printable characters (i.e., byte values that are + not in the range 32 - 126), then the "print" statement converts each + non-printable character to an escape sequence before it is printed. + + For more control over how the strings are formatted, see the :bro:id:`fmt` + function. + +.. bro:keyword:: return + + The "return" statement immediately exits the current function, hook, or + event handler. For a function, the specified expression (if any) is + evaluated and returned. A "return" statement in a hook or event handler + cannot return a value because event handlers and hooks do not have + return types. + + Examples:: + + function my_func(): string + { + return "done"; + } + + event my_event(n: count) + { + if ( n == 0 ) return; + + print n; + } + + There is a special form of the "return" statement that is only allowed + in functions. Syntactically, it looks like a :bro:keyword:`when` statement + immediately preceded by the "return" keyword. This form of the "return" + statement is used to specify a function that delays its result (such a + function can only be called in the expression of a :bro:keyword:`when` + statement). The function returns at the time the "when" + statement's condition becomes true, and the function returns the value + that the "when" statement's body returns (or if the condition does + not become true within the specified timeout interval, then the function + returns the value that the "timeout" block returns). + + Example:: + + global X: table[string] of count; + + function a() : count + { + # This delays until condition becomes true. + return when ( "a" in X ) + { + return X["a"]; + } + timeout 30 sec + { + return 0; + } + } + + event bro_init() + { + # Installs a trigger which fires if a() returns 42. + when ( a() == 42 ) + print "expected result"; + + print "Waiting for a() to return..."; + X["a"] = 42; + } + + +.. bro:keyword:: schedule + + The "schedule" statement is used to raise a specified event with + specified parameters at a later time specified as an :bro:type:`interval`. + + Example:: + + schedule 30sec { myevent(x, y, z) }; + + Note that the braces are always required (they do not indicate a + `compound statement`_). + + Note that "schedule" is actually an expression that returns a value + of type "timer", but in practice the return value is not used. + +.. bro:keyword:: switch + + A "switch" statement evaluates a given expression and jumps to + the first "case" label which contains a matching value (the result of the + expression must be type-compatible with all of the values in all of the + "case" labels). If there is no matching value, then execution jumps to + the "default" label instead, and if there is no "default" label then + execution jumps out of the "switch" block. + + Here is an example (assuming that "get_day_of_week" is a + function that returns a string):: + + switch get_day_of_week() + { + case "Sa", "Su": + print "weekend"; + fallthrough; + case "Mo", "Tu", "We", "Th", "Fr": + print "valid result"; + break; + default: + print "invalid result"; + break; + } + + A "switch" block can have any number of "case" labels, and one + optional "default" label. + + A "case" label can have a comma-separated list of + more than one value. A value in a "case" label can be an expression, + but it must be a constant expression (i.e., the expression can consist + only of constants). + + Each "case" and the "default" block must + end with either a :bro:keyword:`break`, :bro:keyword:`fallthrough`, or + :bro:keyword:`return` statement (although "return" is allowed only + if the "switch" statement is inside a function, hook, or event handler). + If a "case" (or "default") block contain more than one statement, then + there is no need to wrap them in braces. + + Note that the braces in a "switch" statement are always required (these + do not indicate the presence of a `compound statement`_), and that no + semicolon is needed at the end of a "switch" statement. + + +.. bro:keyword:: when + + Evaluates a given expression, which must result in a value of type + :bro:type:`bool`. When the value of the expression becomes available + and if the result is true, then a specified statement is executed. + + In the following example, if the expression evaluates to true, then + the "print" statement is executed:: + + when ( (local x = foo()) && x == 42 ) + print x; + + However, if a timeout is specified, and if the expression does not + evaluate to true within the specified timeout interval, then the + statement following the "timeout" keyword is executed:: + + when ( (local x = foo()) && x == 42 ) + print x; + timeout 5sec { + print "timeout"; + } + + Note that when a timeout is specified the braces are + always required (these do not indicate a `compound statement`_). + + The expression in a "when" statement can contain a declaration of a local + variable but only if the declaration is written in the form + "local *var* = *init*" (example: "local x = myfunction()"). This form + of a local declaration is actually an expression, the result of which + is always a boolean true value. + + The expression in a "when" statement can contain an asynchronous function + call such as :bro:id:`lookup_hostname` (in fact, this is the only place + such a function can be called), but it can also contain an ordinary + function call. When an asynchronous function call is in the expression, + then Bro will continue processing statements in the script following + the "when" statement, and when the result of the function call is available + Bro will finish evaluating the expression in the "when" statement. + See the :bro:keyword:`return` statement for an explanation of how to + create an asynchronous function in a Bro script. + + +.. _compound statement: + +**compound statement** + A compound statement is created by wrapping zero or more statements in + braces ``{ }``. Individual statements inside the braces need to be + terminated by a semicolon, but a semicolon is not needed at the end + (outside of the braces) of a compound statement. + + A compound statement is required in order to execute more than one + statement in the body of a :bro:keyword:`for`, :bro:keyword:`if`, or + :bro:keyword:`when` statement. + + Example:: + + if ( x == 2 ) { + print "x is 2"; + ++x; + } + + Note that there are other places in the Bro scripting language that use + braces, but that do not indicate the presence of a compound + statement (these are noted in the documentation). + +.. _null: + +**null statement** + The null statement (executing it has no effect) consists of just a + semicolon. This might be useful during testing or debugging a Bro script + in places where a statement is required, but it is probably not useful + otherwise. + + Example:: + + if ( x == 2 ) + ; + diff --git a/doc/script-reference/types.rst b/doc/script-reference/types.rst index 049b43c04a..75988e0fb5 100644 --- a/doc/script-reference/types.rst +++ b/doc/script-reference/types.rst @@ -1,89 +1,114 @@ Types ===== -Every value in a Bro script has a type (see below for a list of all built-in -types). Although Bro variables have static types (meaning that their type -is fixed), their type is inferred from the value to which they are -initially assigned when the variable is declared without an explicit type -name. +The Bro scripting language supports the following built-in types: -Automatic conversions happen when a binary operator has operands of -different types. Automatic conversions are limited to converting between -numeric types. The numeric types are ``int``, ``count``, and ``double`` -(``bool`` is not a numeric type). -When an automatic conversion occurs, values are promoted to the "highest" -type in the expression. In general, this promotion follows a simple -hierarchy: ``double`` is highest, ``int`` comes next, and ``count`` is -lowest. ++-----------------------+--------------------+ +| Name | Description | ++=======================+====================+ +| :bro:type:`bool` | Boolean | ++-----------------------+--------------------+ +| :bro:type:`count`, | Numeric types | +| :bro:type:`int`, | | +| :bro:type:`double` | | ++-----------------------+--------------------+ +| :bro:type:`time`, | Time types | +| :bro:type:`interval` | | ++-----------------------+--------------------+ +| :bro:type:`string` | String | ++-----------------------+--------------------+ +| :bro:type:`pattern` | Regular expression | ++-----------------------+--------------------+ +| :bro:type:`port`, | Network types | +| :bro:type:`addr`, | | +| :bro:type:`subnet` | | ++-----------------------+--------------------+ +| :bro:type:`enum` | Enumeration | +| | (user-defined type)| ++-----------------------+--------------------+ +| :bro:type:`table`, | Container types | +| :bro:type:`set`, | | +| :bro:type:`vector`, | | +| :bro:type:`record` | | ++-----------------------+--------------------+ +| :bro:type:`function`, | Executable types | +| :bro:type:`event`, | | +| :bro:type:`hook` | | ++-----------------------+--------------------+ +| :bro:type:`file` | File type (only | +| | for writing) | ++-----------------------+--------------------+ +| :bro:type:`opaque` | Opaque type (for | +| | some built-in | +| | functions) | ++-----------------------+--------------------+ +| :bro:type:`any` | Any type (for | +| | functions or | +| | containers) | ++-----------------------+--------------------+ -The Bro scripting language supports the following built-in types. - -.. bro:type:: void - - An internal Bro type (i.e., "void" is not a reserved keyword in the Bro - scripting language) representing the absence of a return type for a - function. +Here is a more detailed description of each type: .. bro:type:: bool Reflects a value with one of two meanings: true or false. The two - ``bool`` constants are ``T`` and ``F``. + "bool" constants are ``T`` and ``F``. - The ``bool`` type supports the following operators: equality/inequality + The "bool" type supports the following operators: equality/inequality (``==``, ``!=``), logical and/or (``&&``, ``||``), logical - negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0). + negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0, + and in both cases the result type is :bro:type:`count`). .. bro:type:: int - A numeric type representing a 64-bit signed integer. An ``int`` constant - is a string of digits preceded by a ``+`` or ``-`` sign, e.g. + A numeric type representing a 64-bit signed integer. An "int" constant + is a string of digits preceded by a "+" or "-" sign, e.g. ``-42`` or ``+5`` (the "+" sign is optional but see note about type - inferencing below). An ``int`` constant can also be written in + inferencing below). An "int" constant can also be written in hexadecimal notation (in which case "0x" must be between the sign and the hex digits), e.g. ``-0xFF`` or ``+0xabc123``. - The ``int`` type supports the following operators: arithmetic + The "int" type supports the following operators: arithmetic operators (``+``, ``-``, ``*``, ``/``, ``%``), comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators (``=``, ``+=``, ``-=``), pre-increment (``++``), pre-decrement - (``--``), and absolute value (e.g., ``|-3|`` is 3). + (``--``), unary plus and minus (``+``, ``-``), and absolute value + (e.g., ``|-3|`` is 3, but the result type is :bro:type:`count`). When using type inferencing use care so that the - intended type is inferred, e.g. ``local size_difference = 0`` will - infer :bro:type:`count`, while ``local size_difference = +0`` - will infer :bro:type:`int`. + intended type is inferred, e.g. "local size_difference = 0" will + infer ":bro:type:`count`", while "local size_difference = +0" + will infer "int". .. bro:type:: count - A numeric type representing a 64-bit unsigned integer. A ``count`` - constant is a string of digits, e.g. ``1234`` or ``0``. A ``count`` + A numeric type representing a 64-bit unsigned integer. A "count" + constant is a string of digits, e.g. ``1234`` or ``0``. A "count" can also be written in hexadecimal notation (in which case "0x" must precede the hex digits), e.g. ``0xff`` or ``0xABC123``. - The ``count`` type supports the same operators as the :bro:type:`int` - type. A unary plus or minus applied to a ``count`` results in an ``int``. - -.. bro:type:: counter - - An alias to :bro:type:`count`. + The "count" type supports the same operators as the ":bro:type:`int`" + type, but a unary plus or minus applied to a "count" results in an + "int". .. bro:type:: double A numeric type representing a double-precision floating-point number. Floating-point constants are written as a string of digits with an optional decimal point, optional scale-factor in scientific - notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``, + notation, and optional "+" or "-" sign. Examples are ``-1234``, ``-1234e0``, ``3.14159``, and ``.003E-23``. - The ``double`` type supports the following operators: arithmetic + The "double" type supports the following operators: arithmetic operators (``+``, ``-``, ``*``, ``/``), comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators - (``=``, ``+=``, ``-=``), and absolute value (e.g., ``|-3.14|`` is 3.14). + (``=``, ``+=``, ``-=``), unary plus and minus (``+``, ``-``), and + absolute value (e.g., ``|-3.14|`` is 3.14). When using type inferencing use care so that the - intended type is inferred, e.g. ``local size_difference = 5`` will - infer :bro:type:`count`, while ``local size_difference = 5.0`` - will infer :bro:type:`double`. + intended type is inferred, e.g. "local size_difference = 5" will + infer ":bro:type:`count`", while "local size_difference = 5.0" + will infer "double". .. bro:type:: time @@ -94,10 +119,10 @@ The Bro scripting language supports the following built-in types. Time values support the comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``). A ``time`` value can be subtracted from - another ``time`` value to produce an ``interval`` value. An ``interval`` - value can be added to, or subtracted from, a ``time`` value to produce a - ``time`` value. The absolute value of a ``time`` value is a ``double`` - with the same numeric value. + another ``time`` value to produce an :bro:type:`interval` value. An + ``interval`` value can be added to, or subtracted from, a ``time`` value + to produce a ``time`` value. The absolute value of a ``time`` value is + a :bro:type:`double` with the same numeric value. .. bro:type:: interval @@ -112,52 +137,58 @@ The Bro scripting language supports the following built-in types. ``3.5mins``. An ``interval`` can also be negated, for example ``-12 hr`` represents "twelve hours in the past". - Intervals support addition and subtraction. Intervals also support - division (in which case the result is a ``double`` value), the - comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), - and the assignment operators (``=``, ``+=``, ``-=``). Also, an - ``interval`` can be multiplied or divided by an arithmetic type - (``count``, ``int``, or ``double``) to produce an ``interval`` value. - The absolute value of an ``interval`` is a ``double`` value equal to the - number of seconds in the ``interval`` (e.g., ``|-1 min|`` is 60). + Intervals support addition and subtraction, the comparison operators + (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), the assignment + operators (``=``, ``+=``, ``-=``), and unary plus and minus (``+``, ``-``). + + Intervals also support division (in which case the result is a + :bro:type:`double` value). An ``interval`` can be multiplied or divided + by an arithmetic type (``count``, ``int``, or ``double``) to produce + an ``interval`` value. The absolute value of an ``interval`` is a + ``double`` value equal to the number of seconds in the ``interval`` + (e.g., ``|-1 min|`` is 60.0). .. bro:type:: string - A type used to hold character-string values which represent text. - String constants are created by enclosing text in double quotes (") - and the backslash character (\\) introduces escape sequences (all of - the C-style escape sequences are supported). + A type used to hold character-string values which represent text, although + strings in a Bro script can actually contain any arbitrary binary data. + + String constants are created by enclosing text within a pair of double + quotes ("). A string constant cannot span multiple lines in a Bro script. + The backslash character (\\) introduces escape sequences. The + following escape sequences are recognized: ``\n``, ``\t``, ``\v``, ``\b``, + ``\r``, ``\f``, ``\a``, ``\ooo`` (where each 'o' is an octal digit), + ``\xhh`` (where each 'h' is a hexadecimal digit). For escape sequences + that don't match any of these, Bro will just remove the backslash (so + to represent a literal backslash in a string constant, you just use + two consecutive backslashes). Strings support concatenation (``+``), and assignment (``=``, ``+=``). Strings also support the comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``). The number of characters in a string can be found by enclosing the string within pipe characters (e.g., ``|"abc"|`` - is 3). - - The subscript operator can extract an individual character or a substring - of a string (string indexing is zero-based, but an index of - -1 refers to the last character in the string, and -2 refers to the - second-to-last character, etc.). When extracting a substring, the - starting and ending index values are separated by a colon. For example:: - - local orig = "0123456789"; - local third_char = orig[2]; - local last_char = orig[-1]; - local first_three_chars = orig[0:2]; - - Substring searching can be performed using the "in" or "!in" + is 3). Substring searching can be performed using the "in" or "!in" operators (e.g., "bar" in "foobar" yields true). - Note that Bro represents strings internally as a count and vector of - bytes rather than a NUL-terminated byte string (although string - constants are also automatically NUL-terminated). This is because - network traffic can easily introduce NULs into strings either by - nature of an application, inadvertently, or maliciously. And while - NULs are allowed in Bro strings, when present in strings passed as - arguments to many functions, a run-time error can occur as their - presence likely indicates a sort of problem. In that case, the - string will also only be represented to the user as the literal - "" string. + The subscript operator can extract a substring of a string. To do this, + specify the starting index to extract (if the starting index is omitted, + then zero is assumed), followed by a colon and index + one past the last character to extract (if the last index is omitted, + then the extracted substring will go to the end of the original string). + However, if both the colon and last index are omitted, then a string of + length one is extracted. String indexing is zero-based, but an index + of -1 refers to the last character in the string, and -2 refers to the + second-to-last character, etc. Here are a few examples:: + + local orig = "0123456789"; + local second_char = orig[1]; + local last_char = orig[-1]; + local first_two_chars = orig[:2]; + local last_two_chars = orig[8:]; + local no_first_and_last = orig[1:9]; + + Note that the subscript operator cannot be used to modify a string (i.e., + it cannot be on the left side of an assignment operator). .. bro:type:: pattern @@ -171,7 +202,7 @@ The Bro scripting language supports the following built-in types. and embedded. In exact matching the ``==`` equality relational operator is used - with one :bro:type:`pattern` operand and one :bro:type:`string` + with one "pattern" operand and one ":bro:type:`string`" operand (order of operands does not matter) to check whether the full string exactly matches the pattern. In exact matching, the ``^`` beginning-of-line and ``$`` end-of-line anchors are redundant since @@ -187,8 +218,8 @@ The Bro scripting language supports the following built-in types. yields false. The ``!=`` operator would yield the negation of ``==``. In embedded matching the ``in`` operator is used with one - :bro:type:`pattern` operand (which must be on the left-hand side) and - one :bro:type:`string` operand, but tests whether the pattern + "pattern" operand (which must be on the left-hand side) and + one ":bro:type:`string`" operand, but tests whether the pattern appears anywhere within the given string. For example:: /foo|bar/ in "foobar" @@ -200,27 +231,12 @@ The Bro scripting language supports the following built-in types. is false since "oob" does not appear at the start of "foobar". The ``!in`` operator would yield the negation of ``in``. -.. bro:type:: enum - - A type allowing the specification of a set of related values that - have no further structure. An example declaration: - - .. code:: bro - - type color: enum { Red, White, Blue, }; - - The last comma after ``Blue`` is optional. - - The only operations allowed on enumerations are equality comparisons - (``==``, ``!=``) and assignment (``=``). - Enumerations do not have associated values or ordering. - .. bro:type:: port - A type representing transport-level port numbers. Besides TCP and + A type representing transport-level port numbers (besides TCP and UDP ports, there is a concept of an ICMP "port" where the source port is the ICMP message type and the destination port the ICMP - message code. A ``port`` constant is written as an unsigned integer + message code). A ``port`` constant is written as an unsigned integer followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``. Ports support the comparison operators (``==``, ``!=``, ``<``, ``<=``, @@ -252,14 +268,6 @@ The Bro scripting language supports the following built-in types. address) are treated internally as IPv4 addresses (for example, ``[::ffff:192.168.1.100]`` is equal to ``192.168.1.100``). - Hostname constants can also be used, but since a hostname can - correspond to multiple IP addresses, the type of such a variable is a - :bro:type:`set` of :bro:type:`addr` elements. For example: - - .. code:: bro - - local a = www.google.com; - Addresses can be compared for equality (``==``, ``!=``), and also for ordering (``<``, ``<=``, ``>``, ``>=``). The absolute value of an address gives the size in bits (32 for IPv4, and 128 for IPv6). @@ -282,8 +290,16 @@ The Bro scripting language supports the following built-in types. if ( a in s ) print "true"; - Note that you can check if a given ``addr`` is IPv4 or IPv6 using + You can check if a given ``addr`` is IPv4 or IPv6 using the :bro:id:`is_v4_addr` and :bro:id:`is_v6_addr` built-in functions. + + Note that hostname constants can also be used, but since a hostname can + correspond to multiple IP addresses, the type of such a variable is + "set[addr]". For example: + + .. code:: bro + + local a = www.google.com; .. bro:type:: subnet @@ -293,13 +309,24 @@ The Bro scripting language supports the following built-in types. number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``. Subnets can be compared for equality (``==``, ``!=``). An - :bro:type:`addr` can be checked for inclusion in a subnet using - the "in" or "!in" operators. + "addr" can be checked for inclusion in a subnet using + the ``in`` or ``!in`` operators. -.. bro:type:: any +.. bro:type:: enum - Used to bypass strong typing. For example, a function can take an - argument of type ``any`` when it may be of different types. + A type allowing the specification of a set of related values that + have no further structure. An example declaration: + + .. code:: bro + + type color: enum { Red, White, Blue, }; + + The last comma after ``Blue`` is optional. Both the type name ``color`` + and the individual values (``Red``, etc.) have global scope. + + Enumerations do not have associated values or ordering. + The only operations allowed on enumerations are equality comparisons + (``==``, ``!=``) and assignment (``=``). .. bro:type:: table @@ -313,24 +340,25 @@ The Bro scripting language supports the following built-in types. table [ type^+ ] of type - where *type^+* is one or more types, separated by commas. For example: + where *type^+* is one or more types, separated by commas. + For example: .. code:: bro global a: table[count] of string; - declares a table indexed by :bro:type:`count` values and yielding - :bro:type:`string` values. The yield type can also be more complex: + declares a table indexed by "count" values and yielding + "string" values. The yield type can also be more complex: .. code:: bro global a: table[count] of table[addr, port] of string; - which declares a table indexed by :bro:type:`count` and yielding - another :bro:type:`table` which is indexed by an :bro:type:`addr` - and :bro:type:`port` to yield a :bro:type:`string`. + which declares a table indexed by "count" and yielding + another "table" which is indexed by an "addr" + and "port" to yield a "string". - Initialization of tables occurs by enclosing a set of initializers within + One way to initialize a table is by enclosing a set of initializers within braces, for example: .. code:: bro @@ -340,18 +368,17 @@ The Bro scripting language supports the following built-in types. [5] = "five", }; - A table constructor (equivalent to above example) can also be used - to create a table: + A table constructor can also be used to create a table: .. code:: bro - global t2: table[count] of string = table( - [11] = "eleven", - [5] = "five" + global t2 = table( + [192.168.0.2, 22/tcp] = "ssh", + [192.168.0.3, 80/tcp] = "http" ); Table constructors can also be explicitly named by a type, which is - useful for when a more complex index type could otherwise be + useful when a more complex index type could otherwise be ambiguous: .. code:: bro @@ -378,17 +405,7 @@ The Bro scripting language supports the following built-in types. if ( 13 in t ) ... - - Iterate over tables with a ``for`` loop: - - .. code:: bro - - local t: table[count] of string; - for ( n in t ) - ... - - local services: table[addr, port] of string; - for ( [a, p] in services ) + if ( [192.168.0.2, 22/tcp] in t2 ) ... Add or overwrite individual table elements by assignment: @@ -397,7 +414,7 @@ The Bro scripting language supports the following built-in types. t[13] = "thirteen"; - Remove individual table elements with ``delete``: + Remove individual table elements with :bro:keyword:`delete`: .. code:: bro @@ -413,6 +430,9 @@ The Bro scripting language supports the following built-in types. |t| + See the :bro:keyword:`for` statement for info on how to iterate over + the elements in a table. + .. bro:type:: set A set is like a :bro:type:`table`, but it is a collection of indices @@ -423,25 +443,22 @@ The Bro scripting language supports the following built-in types. where *type^+* is one or more types separated by commas. - Sets are initialized by listing elements enclosed by curly braces: + Sets can be initialized by listing elements enclosed by curly braces: .. code:: bro global s: set[port] = { 21/tcp, 23/tcp, 80/tcp, 443/tcp }; global s2: set[port, string] = { [21/tcp, "ftp"], [23/tcp, "telnet"] }; - The types are explicitly shown in the example above, but they could - have been left to type inference. - A set constructor (equivalent to above example) can also be used to create a set: .. code:: bro - global s3: set[port] = set(21/tcp, 23/tcp, 80/tcp, 443/tcp); + global s3 = set(21/tcp, 23/tcp, 80/tcp, 443/tcp); Set constructors can also be explicitly named by a type, which is - useful for when a more complex index type could otherwise be + useful when a more complex index type could otherwise be ambiguous: .. code:: bro @@ -462,18 +479,10 @@ The Bro scripting language supports the following built-in types. if ( 21/tcp in s ) ... - if ( 21/tcp !in s ) + if ( [21/tcp, "ftp"] !in s2 ) ... - Iterate over a set with a ``for`` loop: - - .. code:: bro - - local s: set[port]; - for ( p in s ) - ... - - Elements are added with ``add``: + Elements are added with :bro:keyword:`add`: .. code:: bro @@ -482,7 +491,7 @@ The Bro scripting language supports the following built-in types. Nothing happens if the element with value ``22/tcp`` was already present in the set. - And removed with ``delete``: + And removed with :bro:keyword:`delete`: .. code:: bro @@ -498,6 +507,9 @@ The Bro scripting language supports the following built-in types. |s| + See the :bro:keyword:`for` statement for info on how to iterate over + the elements in a set. + .. bro:type:: vector A vector is like a :bro:type:`table`, except it's always indexed by a @@ -512,7 +524,7 @@ The Bro scripting language supports the following built-in types. .. code:: bro - global v: vector of string = vector("one", "two", "three"); + local v = vector("one", "two", "three"); Vector constructors can also be explicitly named by a type, which is useful for when a more complex yield type could otherwise be @@ -536,14 +548,6 @@ The Bro scripting language supports the following built-in types. print v[2]; - Iterate over a vector with a ``for`` loop: - - .. code:: bro - - local v: vector of string; - for ( n in v ) - ... - An element can be added to a vector by assigning the value (a value that already exists at that index will be overwritten): @@ -574,11 +578,17 @@ The Bro scripting language supports the following built-in types. The resulting vector of bool is the logical "and" (or logical "or") of each element of the operand vectors. + See the :bro:keyword:`for` statement for info on how to iterate over + the elements in a vector. + .. bro:type:: record - A ``record`` is a collection of values. Each value has a field name + A "record" is a collection of values. Each value has a field name and a type. Values do not need to have the same type and the types - have no restrictions. An example record type definition: + have no restrictions. Field names must follow the same syntax as + regular variable names (except that field names are allowed to be the + same as local or global variables). An example record type + definition: .. code:: bro @@ -587,85 +597,44 @@ The Bro scripting language supports the following built-in types. s: string &optional; }; - Access to a record field uses the dollar sign (``$``) operator: - - .. code:: bro - - global r: MyRecordType; - r$c = 13; - - Record assignment can be done field by field or as a whole like: - - .. code:: bro - - r = [$c = 13, $s = "thirteen"]; - + Records can be initialized or assigned as a whole in three different ways. When assigning a whole record value, all fields that are not :bro:attr:`&optional` or have a :bro:attr:`&default` attribute must - be specified. - - To test for existence of a field that is :bro:attr:`&optional`, use the - ``?$`` operator: + be specified. First, there's a constructor syntax: .. code:: bro - if ( r?$s ) - ... - - Records can also be created using a constructor syntax: - - .. code:: bro - - global r2: MyRecordType = record($c = 7); + local r: MyRecordType = record($c = 7); And the constructor can be explicitly named by type, too, which - is arguably more readable code: + is arguably more readable: .. code:: bro - global r3 = MyRecordType($c = 42); + local r = MyRecordType($c = 42); -.. bro:type:: opaque - - A data type whose actual representation/implementation is - intentionally hidden, but whose values may be passed to certain - functions that can actually access the internal/hidden resources. - Opaque types are differentiated from each other by qualifying them - like ``opaque of md5`` or ``opaque of sha1``. Any valid identifier - can be used as the type qualifier. - - An example use of this type is the set of built-in functions which - perform hashing: + And the third way is like this: .. code:: bro - local handle: opaque of md5 = md5_hash_init(); - md5_hash_update(handle, "test"); - md5_hash_update(handle, "testing"); - print md5_hash_finish(handle); + local r: MyRecordType = [$c = 13, $s = "thirteen"]; - Here the opaque type is used to provide a handle to a particular - resource which is calculating an MD5 checksum incrementally over - time, but the details of that resource aren't relevant, it's only - necessary to have a handle as a way of identifying it and - distinguishing it from other such resources. - -.. bro:type:: file - - Bro supports writing to files, but not reading from them. Files - can be opened using either the :bro:id:`open` or :bro:id:`open_for_append` - built-in functions, and closed using the :bro:id:`close` built-in - function. For example, declare, open, and write to a file - and finally close it like: + Access to a record field uses the dollar sign (``$``) operator, and + record fields can be assigned with this: .. code:: bro - global f: file = open("myfile"); - print f, "hello, world"; - close(f); + local r: MyRecordType; + r$c = 13; - Writing to files like this for logging usually isn't recommended, for better - logging support see :doc:`/frameworks/logging`. + To test if a field that is :bro:attr:`&optional` has been assigned a + value, use the ``?$`` operator (it returns a :bro:type:`bool` value of + ``T`` if the field has been assigned a value, or ``F`` if not): + + .. code:: bro + + if ( r ?$ s ) + ... .. bro:type:: function @@ -697,6 +666,16 @@ The Bro scripting language supports the following built-in types. type, but when it is, the return type and argument list (including the name of each argument) must match exactly. + Here is an example function that takes no parameters and does not + return a value: + + .. code:: bro + + function my_func() + { + print "my_func"; + } + Function types don't need to have a name and can be assigned anonymously: .. code:: bro @@ -739,9 +718,20 @@ The Bro scripting language supports the following built-in types. Event handlers are nearly identical in both syntax and semantics to a :bro:type:`function`, with the two differences being that event handlers have no return type since they never return a value, and - you cannot call an event handler. Instead of directly calling an - event handler from a script, event handler bodies are executed when - they are invoked by one of three different methods: + you cannot call an event handler. + + Example: + + .. code:: bro + + event my_event(r: bool, s: string) + { + print "my_event", r, s; + } + + Instead of directly calling an event handler from a script, event + handler bodies are executed when they are invoked by one of three + different methods: - From the event engine @@ -762,7 +752,7 @@ The Bro scripting language supports the following built-in types. This assumes that ``password_exposed`` was previously declared as an event handler type with compatible arguments. - - Via the ``schedule`` expression in a script + - Via the :bro:keyword:`schedule` expression in a script This delays the invocation of event handlers until some time in the future. For example: @@ -786,8 +776,8 @@ The Bro scripting language supports the following built-in types. immediate and they do not get scheduled through an event queue. Also, a unique feature of a hook is that a given hook handler body can short-circuit the execution of remaining hook handlers simply by - exiting from the body as a result of a ``break`` statement (as - opposed to a ``return`` or just reaching the end of the body). + exiting from the body as a result of a :bro:keyword:`break` statement (as + opposed to a :bro:keyword:`return` or just reaching the end of the body). A hook type is declared like:: @@ -856,3 +846,60 @@ The Bro scripting language supports the following built-in types. executed due to one handler body exiting as a result of a ``break`` statement. +.. bro:type:: file + + Bro supports writing to files, but not reading from them (to read from + files see the :doc:`/frameworks/input`). Files + can be opened using either the :bro:id:`open` or :bro:id:`open_for_append` + built-in functions, and closed using the :bro:id:`close` built-in + function. For example, declare, open, and write to a file and finally + close it like: + + .. code:: bro + + local f = open("myfile"); + print f, "hello, world"; + close(f); + + Writing to files like this for logging usually isn't recommended, for better + logging support see :doc:`/frameworks/logging`. + +.. bro:type:: opaque + + A data type whose actual representation/implementation is + intentionally hidden, but whose values may be passed to certain + built-in functions that can actually access the internal/hidden resources. + Opaque types are differentiated from each other by qualifying them + like "opaque of md5" or "opaque of sha1". + + An example use of this type is the set of built-in functions which + perform hashing: + + .. code:: bro + + local handle = md5_hash_init(); + md5_hash_update(handle, "test"); + md5_hash_update(handle, "testing"); + print md5_hash_finish(handle); + + Here the opaque type is used to provide a handle to a particular + resource which is calculating an MD5 hash incrementally over + time, but the details of that resource aren't relevant, it's only + necessary to have a handle as a way of identifying it and + distinguishing it from other such resources. + +.. bro:type:: any + + Used to bypass strong typing. For example, a function can take an + argument of type ``any`` when it may be of different types. + The only operation allowed on a variable of type ``any`` is assignment. + + Note that users aren't expected to use this type. It's provided mainly + for use by some built-in functions and scripts included with Bro. + +.. bro:type:: void + + An internal Bro type (i.e., "void" is not a reserved keyword in the Bro + scripting language) representing the absence of a return type for a + function. + From 8f1cbb8b0a9a8099e14aa806ae5120f6e8790d8f Mon Sep 17 00:00:00 2001 From: Johanna Amann Date: Thu, 4 Sep 2014 11:15:16 -0700 Subject: [PATCH 045/106] Fix ocsp reply validation - there were a few things that definitely were wrong. Now the right signer certificate for the reply is looked up (and no longer assumed that it is the first one) and a few compares are fixed. Plus - there are more test cases that partially send certificates in the ocsp message and partially do not - and it seems to work fine in all cases. Addresses BIT-1212 --- src/file_analysis/analyzer/x509/functions.bif | 57 ++++++++++++++++-- .../ssl-digicert.log | 10 +++ .../ssl-twimg.log | 10 +++ .../Traces/tls/ocsp-stapling-digicert.trace | Bin 0 -> 6395 bytes .../Traces/tls/ocsp-stapling-twimg.trace | Bin 0 -> 6513 bytes .../policy/protocols/ssl/validate-ocsp.bro | 6 ++ 6 files changed, 79 insertions(+), 4 deletions(-) create mode 100644 testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-digicert.log create mode 100644 testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-twimg.log create mode 100644 testing/btest/Traces/tls/ocsp-stapling-digicert.trace create mode 100644 testing/btest/Traces/tls/ocsp-stapling-twimg.trace diff --git a/src/file_analysis/analyzer/x509/functions.bif b/src/file_analysis/analyzer/x509/functions.bif index d7903b4921..a3d8258b33 100644 --- a/src/file_analysis/analyzer/x509/functions.bif +++ b/src/file_analysis/analyzer/x509/functions.bif @@ -104,6 +104,39 @@ STACK_OF(X509)* x509_get_untrusted_stack(VectorVal* certs_vec) return untrusted_certs; } +// we need this function to be able to identify the signer certificate of an OCSP request out +// of a list of possible certificates. +X509* x509_get_ocsp_signer(STACK_OF(X509) *certs, OCSP_RESPID *rid) + { + // we support two lookup types - either by response id or by key. + if ( rid->type == V_OCSP_RESPID_NAME ) + return X509_find_by_subject(certs, rid->value.byName); + + // there only should be name and type - but let's be sure... + if ( rid->type != V_OCSP_RESPID_KEY ) + return 0; + + // Just like OpenSSL, we just support SHA-1 lookups and bail out otherwhise. + if ( rid->value.byKey->length != SHA_DIGEST_LENGTH ) + return 0; + + unsigned char* key_hash = rid->value.byKey->data; + for ( int i = 0; i < sk_X509_num(certs); ++i ) + { + unsigned char digest[SHA_DIGEST_LENGTH]; + X509* cert = sk_X509_value(certs, i); + if ( !X509_pubkey_digest(cert, EVP_sha1(), digest, NULL) ) + // digest failed for this certificate, try with next + continue; + + if ( memcmp(digest, key_hash, SHA_DIGEST_LENGTH) == 0 ) + // keys match, return certificate + return cert; + } + + return 0; + } + %%} ## Parses a certificate into an X509::Certificate structure. @@ -221,6 +254,7 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c int out = -1; int result = -1; X509* issuer_certificate = 0; + X509* signer = 0; OCSP_RESPONSE *resp = d2i_OCSP_RESPONSE(NULL, &start, ocsp_reply->Len()); if ( ! resp ) { @@ -266,14 +300,30 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c { sk_X509_push(basic->certs, X509_dup(sk_X509_value(untrusted_certs, i))); - if ( X509_NAME_cmp(X509_get_issuer_name(cert), X509_get_subject_name(sk_X509_value(untrusted_certs, i))) ) + if ( X509_NAME_cmp(X509_get_issuer_name(cert), X509_get_subject_name(sk_X509_value(untrusted_certs, i))) == 0 ) issuer_certificate = sk_X509_value(untrusted_certs, i); } // Because we actually want to be able to give nice error messages that show why we were // not able to verify the OCSP response - do our own verification logic first. + signer = x509_get_ocsp_signer(basic->certs, basic->tbsResponseData->responderId); + /* + Do this perhaps - OpenSSL also cannot do it, so I do not really feel bad about it. + Needs a different lookup because the root store is no stack of X509 certs + + if ( !signer ) + // if we did not find it in the certificates that were sent, search in the root store + signer = x509_get_ocsp_signer(basic->certs, basic->tbsResponseData->responderId); + */ + + if ( !signer ) + { + rval = x509_result_record(-1, "Could not find OCSP responder certificate"); + goto x509_ocsp_cleanup; + } + csc = X509_STORE_CTX_new(); - X509_STORE_CTX_init(csc, ctx, sk_X509_value(basic->certs, 0), basic->certs); + X509_STORE_CTX_init(csc, ctx, signer, basic->certs); X509_STORE_CTX_set_time(csc, 0, (time_t) verify_time); X509_STORE_CTX_set_purpose(csc, X509_PURPOSE_OCSP_HELPER); @@ -292,7 +342,6 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c goto x509_ocsp_cleanup; } - // ok, now we verified the OCSP response. This means that we have a valid chain tying it // to a root that we trust and that the signature also hopefully is valid. This does not yet // mean that the ocsp response actually matches the certificate the server send us or that @@ -333,7 +382,7 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c goto x509_ocsp_cleanup; } - if ( ! OCSP_id_cmp(certid, single->certId) ) + if ( OCSP_id_cmp(certid, single->certId) != 0 ) return x509_result_record(-1, "OCSP reply is not for host certificate"); // next - check freshness of proof... diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-digicert.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-digicert.log new file mode 100644 index 0000000000..bb0a25ac0c --- /dev/null +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-digicert.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ssl +#open 2014-09-04-19-17-18 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name session_id last_alert established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer ocsp_status +#types time string addr port addr port string string string string string string bool vector[string] vector[string] string string string string string +1404148886.994021 CXWv6p3arKYeMETxOg 192.168.4.149 51293 72.21.91.29 443 TLSv10 TLS_ECDHE_RSA_WITH_RC4_128_SHA secp256r1 - - - T FhwjYM0FkbvVCvMf2,Fajs2d2lipsadwoK1h (empty) CN=www.digicert.com,O=DigiCert\, Inc.,L=Lehi,ST=Utah,C=US,postalCode=84043,street=2600 West Executive Parkway,street=Suite 500,serialNumber=5299537-0142,1.3.6.1.4.1.311.60.2.1.2=#130455746168,1.3.6.1.4.1.311.60.2.1.3=#13025553,businessCategory=Private Organization CN=DigiCert SHA2 Extended Validation Server CA,OU=www.digicert.com,O=DigiCert Inc,C=US - - good +#close 2014-09-04-19-17-18 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-twimg.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-twimg.log new file mode 100644 index 0000000000..4806744a5c --- /dev/null +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.validate-ocsp/ssl-twimg.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ssl +#open 2014-09-04-19-17-14 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name session_id last_alert established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer ocsp_status +#types time string addr port addr port string string string string string string bool vector[string] vector[string] string string string string string +1409786981.016881 CXWv6p3arKYeMETxOg 192.168.4.149 53106 93.184.216.146 443 TLSv10 TLS_ECDHE_RSA_WITH_RC4_128_SHA secp256r1 - - - T FtaZVlJfywdNmVFr1,FoILekwkdtTuZtlVa (empty) CN=si0.twimg.com,O=Twitter\, Inc.,L=San Francisco,ST=California,C=US CN=DigiCert High Assurance CA-3,OU=www.digicert.com,O=DigiCert Inc,C=US - - good +#close 2014-09-04-19-17-14 diff --git a/testing/btest/Traces/tls/ocsp-stapling-digicert.trace b/testing/btest/Traces/tls/ocsp-stapling-digicert.trace new file mode 100644 index 0000000000000000000000000000000000000000..982249c0f5ac877bb97e32850978e3136b0f5692 GIT binary patch literal 6395 zcmc&&c|4Tc|3A-cwy}({j3R?B3VCL-riI8FiAY*yj|gK|G8mGgQc>Ac)K!$F8`bnJ zMK|1#O3BTwZlNquTI72knr@cd-}kTIyk6(^nsc7>yg%pj{(R2+bDq<3<3cVD5CHEt zItl;+J4a5G+Di|M02}xpYd|^i?|m_abPw}8tNFnS01Rb^41pmD0B#}Su({-Vx!R3L z)nny=F{l#U=%`m705}4%48h|G2*NKYnq$|50TFQH=(Y1i!E)Hg+BojGjWKjBXaMMt zo~*%;F$+jl?Z)oH5Cv#~xLaJG-y!lFIzqrf3nXm4V)I3T4n)J+l=Z5fcbj&-M2M|^t)l0qHby2cO0Y!yL0`ocMoDaeXB18meFhd9xAmJSdBr49qo*{`+v5p{>Y~tgY78)A5zzsffg^w1v`fq1wp-k)%o~ri!4SRDVCVfR1%iYh- z-Hq<(;_L0^!t(a_quaX&hPVgPjg8Pb(!%gfEmR*J`#Hm|4ywgqGMHH38D%3v3^{Z* zhAc{!unqJMabdaBR|R^y_<2W+fib8kg(Rd#LI?<3cQM>w-`D@cAdX6KV7Ykl+#*uR z+Dv_Y?S;A;C_{_M5J$x@3j$OD`(ST~uZ^ONxv1j!*leZ_iqhA*2eF_p?ykWs?+|yo ztxKR!s7p9Q1SMm$C{&^zbYtN}5`|GA>>>X*o4Dv3Q6q>AteQ^z=7 zz#|zym1IbV;*Wtthyc$C2eg7Uy+ur@W?LvsOhn(-Ba*hmxF$i&m$4T0vV^Aee_4jrNIlnM=qHW4_irY z;e7RfR5s9S;V1ug&z!VIN|%8hL-vbf^Y3Lo$8p(q3I`fYExnb`M^;p8>X@(bS|u`& z`r_98x5vtxZ@stO=CfhMW7)mNT~ax0Jqi%5Tkqz?TG>B>-&C|$qBNz~gOyoExM%Cg zFCv(ybJcQlOGh#debs+jJ*VS^7wOJFk0bAVN&C{JR%@}i1@x!Di68Z4R_kTDv7@ICn@I!r3tsG_!%30vWV(nO@sp%T%JJFhEke3Tn!#zYJ zXlyfTcrq5iIaA-7xSpT9wHz}`Cm6tetV!o}W{yu0UnWIKxTx__Mq3 ztKC*Sp?Zr5Jd?Hi{Q7$>)mwlhU~M|{iN z?3}$19cnr={&)g@@tGfMsk-U$IU(|MoR>-BQFM-W+mc_;|H?@0?|Sw5vF;I%l^Qw? z@GI|_8`dv{(wx$pY1M<->Dr>+vw+7yun4rKUUabr3E^+ab?X)xRL z%Q1mDfN&XQ6Dm+PKI>bRB8L~5lq6FQ(+|3A?(~t1oy0xrJ5gZ&_bR1^&V#86tCS)y z9q8uXo?djLprGJD7e7~b`U*$7oxeYeSDY3yP$sV$NeeUee(3&x)y#h@0uSs@5>^jq zoRTuP@>Dr<$l?$EH{_BMkkId`%85_j@7h*fYN*%QJk(P_q+MFFv?fEE*+WbI)K5O; zrV#%-pO{vOU80)2*YT#(6!DUaO@AlK%lGPquCd>|JAB!KS!W98Sg8r0t#tiOyK<*Q z^*XDL)s{`Uo#f6`cEYxET@Tz^(;Z%ooS$XuyQZ2lFk(57SrsiZc>n5tsXgKj54iza zZ{I)B=((Bj3uoU$h36&KF+(&wM%_bMm_&nnh9&e2Xj5n3Kp{4h=9`8#sv+v_%4cS&;tvecjUe-u-dS@^98 zAQrGJu0X{xX&T$h{3r=-X?V1pD5?nO-$Yf5Coxc%R8i&sqcB;GOR)*M95u{YnzNX* zh^J10zLP|imy1{`e$T`ej#wU!DUvU`WL#k3*$5u92vBRm|D>(|6uN2a;E#o}bm7C)}n)lKcjq-_Y#906`jDveqlhdaq%euZV`qC>?c+&(aeqcJ*3( z*)GN@JYSvM@pOAPW8iX5lyAR!%d;z?ak}^hW_j(gm|2zwZ46A0#ea>>`~oE^PRD(*P5I40mxX5k8h!58oIR=3}d-1^F&%<8_> zHhe8XINbI?>T#OE0ZjQNq4G!L$J^vHKR%%TCZ_yC_7jyqCTUFhX<%qn1AuiZ{o1P3 zv&y^fe~5sGcBDfJAdlg>= z6J^4)EUL2*MfJ3`u(NXz8h~2BLp@PV1dp2qzq2GP2CpR1u7!}&Yn~cvco_t2d;XF$ zJ=)X{hTUL_g&fMaAzf4#R(^Pfcb+a&?q!P|N%9Vwy#gnZXc*tye#l0m*7L#k5cRhO zH_J}(d8I_~bzCZw+5UMyb=lIoFHf&m>G5~Ze?u8zKb*&HIN&^6pDZ7;*sDQ5y6JWv zEq+d)=ab{ZkB_PD8Wgp=dnB%p+@t^Q-RU-wuxo{1JI_iTiP=(4VK2VuIWElSLJr+9u{LOGC<7mK-!3)lA@X=D8wjxmA?e# zlR3!eDx;`puotB6i7Pj-CDnXL%M<$C(|V$pLr<+*J9GgJKey%hA=%1P$;#Ze&k;#F zP`*})1-dG)zZ1zS*;%?aD5^42$~s-^eww)TjprkMtA<=JIbW_3h;NAgGwodKnG?|g zLl54VT(k3|vx*Put+~RV@FsBH?@IAzzBhAseEz-DBE9<{%_Dl)J2 zy56Dl~Q*XM@2{pFLYCZJn7 z)@f7jx&qTri(WdX7TC|S)}oDQ)w{R&`kRXzr}R8_Tc$=}D&^Oso`=(zAIg zP#>O+U0D!BzI}pQ!R`3-dz87ero90f=q`*&vK3oqgWaQGn|jrZDEYBJLy2pKC^9@0 zam|_5@(bULC|x|1_qqU-K<}_N`A!GqWm0EA!R~b6ofbxrsTjOeFd{sElO=Ja;$Wm> z9nml%!|S!5U)>-nz4_?dnuOMsk20eh4~kiT3|gmp#s2Q1jk5fiWhn-q1q$!sM+0g% z=)E%eRRa5}Xxey&16BO7hUM?HHGf z0sxSJcNAEgERYDPz6KLW;VTmbGOqcta-gp*6v!)O#c0KF%O!_1tPkPbhO;Az7ZW~xi9y<=PhZPVSd=?@`X!&4 z+ilP#nYC!cwSc3J^wi`tccpe`E^WN`pD$}OAN6*+M~?Pp%+G7eN_%%dLgKnyD+{K=Gy?4Dd`Y}R!jsAFiEgBB~#28tQk;Xrg#+rAPoaME1a1eem&=JTZhUm zjo@hWP{7mT96oz}-$pCUNKiWwR6~5MO%5kGSAPo&r-;PFaK@LyaPGyzdEIB+NDOyS z`P@z`gOgm7gq_KKl zYzbLQNvF_!>tCjyb|xYNz8xXVz!K#!0jI<)rR6c&j+co;O z@^%+dA)4-h6Gil-Q+0CK-Vd+J8+FqiH(ta~cb)HVEi@Jqq_Dtk*$%s@CafL$zNZHfWa`r0uA>5~lDjTxYT zq3S~T+Sm`n>VQ~3&yfrBr;YC3?m~_(%#jbg9C~^e<%u5eDwA}!stOy$TIc3f& z(;7GhDYLhwj9;o_C{m)N8G@)HhXt|Rcv2AUvAEen(GuD)9>Vc0DmydQoH4BB|Bm%v DA!xCz literal 0 HcmV?d00001 diff --git a/testing/btest/Traces/tls/ocsp-stapling-twimg.trace b/testing/btest/Traces/tls/ocsp-stapling-twimg.trace new file mode 100644 index 0000000000000000000000000000000000000000..f53762f6e0cf1a6c48338a95b48bffafd8e46997 GIT binary patch literal 6513 zcmcgwdpwle*Wb_FTn6KQOPX>yQg~cL?juQ&k}gis7-NjwVvwRVN0c8*bt)=Jlsc7@ z&_!vwoSag+DJ3069i@;&mx&4FDhbj0P}V-=4j;lpgANb&fQc27t??$u7W!0)U{D^!t*N_cz~& z#i~oJVpb#JKM9{d3r_<;AdxFDB9Vk)(y~TQ#ZVdm5@85E-vt0SNTV@?JETK|ZUq$p z8_Em^gp4#`)i+{ulLuU&hBc|mcG8z_52Irw0@Of3_F}F8*g!NizOmQnjn8-OT?0{F zeHC4RuxQAx9*Cj=U4X`D6ilWm?IU>(p>DBp&brw4n|L2NCoomfc#q^wHfD2<=}iuLLrfW5eSNM+Wl4oJ8x~*Uz{?p zVqj_}E>Dp$PWMcICy!xdGQdO4skjV<=ue`k5d8hj)o>L=$g0S>b2(fFJC;YE5ydjs z!Zi?^s-l>fm}nXTr&w^xlocImZh-5dDWZzbmld8|P8i)aE-pTn8O37L8LlRlxUPo0 zxfO0F z9tfQq$BGv7WmV+oC31N@cI;$iw5ga+RFRM4;-*#&wY@88s==GCpJs#|FAnf+?Oo}RY)R_o+P-gXSzN1I zIp$X69^!TGN&|YM!>5lHv^03%DU0xUzk1fO zye)Za??y5P2(TVt`}iaaoKG^v$3dTu)gx)*>Zz)}9dR2TP82F`EO>^|8~#bk^Ze}{ zJ{Acmkkm=4g}WPWyJt)ZE-%Y>kkJY6z(Vg`!}){_NI;3iC!|6mnJ*a)z8ok1YiuB8 zF@lm$FlB{AnI>?f*f4T@S|pDp!JxvRqJX2Y@@XhHM>15aV;fhtt3h75hN|f zB!rzXf}sI-C1Q=^5R8yi*p25K&eQ>u+JnCkw7jS%3T8zsw~ozD6k?*5wR~%1e}S)$B_c? zg}8Acs{Dy5Kdt=1qRMYBQTbB{ec~>X6b*n;p<%jy7>^fYZ)V1djj)ucre-Xrr3uHv z))dY_jiY3(ek|*T45gE%;&%8Hipf})+(^6~0;aBm^u|HOqpP?X!`BaA zC6>f6QZyck>x*d|OV=E>IGPm~BVh$T?TZv4-A=qOCTb6H5`t#p_JF#>m*#+a1KcbR zmqQy+L4hIQU}SZMO$t_L40s*58Te*(@JHu9P9XPcffUEjlxaoJVpV~zs=_sbRqh@Q zjTwRQ%l_22i)K|WTT#D=>lC}_n5O4v-KK5(-qu@k6!NZ-SdYDPlc(kA&;1B)z716S z*tjtGQLy5wI5NXeV{t46NI?d}F*mh;q6 zUc)0KTk8M4wNJt)!l=O3K0zEi`l!}EZ9aW&bTkjPc+}u=TO79d;f}5$Z(#vR%i(_i z|MsHs-Kxvt+{^}0~4T6*bYx98-E ziWxSUM(omVb17b=&2}>*k5E#ragyQ6yt_FUBWD)|9y#NfdtuS4K${4G-whyb;iMR{ zP*H_jE7!HlZ4UTAcJI=!UpcNbOX;j~X|LK_ot5TXY|Mdwmk9DBR?8H$&vo}wwAXvO zvx-<&x~t;XJqz%a@Y)E!m{hLDWHu8>(<>T7^ zX1_eRYDwYbYo8n*M0Q^LIFpferQu`=Q>Wae%1F88)cc^pJLOlp9V>KaChN?~vnglJK#!kT?ZLC50-kCqzJijAGng{GW$4 z#dP>T|4m7Wi;FN}#l)G!vSXs-xV-4tBq)krzCymNMriYO`Pf`|PKW|YfDYn89ALxW zXb=j&c_0jMA%_kakQ)nWF646o3ozj`l4C*1C@6tucyKl1B6%j9VZrAZ$YDYW^gI$3 zQSNXJ2M#ed(&7tGE$G`%gb@Wo;4j34m4DCAD?dwA`Ekk0FMUv4`JV~_+zZk=7k)>; zy>g*d97y{@`_RtN?p+}#7S6LlBpgwYO9zP}AF$vz9j@cT8MM=(XC?@S6ygr8M>|Cp zBSfSN=|CQeg?>QaLtaA?$QC4nB9~2iHiDofvJ}Nd0S5bHT24Pfp{O<@O zUu=&#UE~tqpPP;~>@d`pCUmnxCS4ryV*B0qUdfY`s7dLzmUtFKJoK}rb??_UKHN%V zbmpIN%84KIdcbtTqQfczW^4P$7kSq+Wa8%;o%2}QTr+3#=H4;6$>!2Aty4~&|F!h& zJ}MAus|^s3EDZmvRzJ^kx`(;$=I)9hAFJ9;=giN)KbDq$a~F1~C38Nd-u{Kc(e1n? zjmz`hctQVUGm`&#;or#xb>zn@nM})DTHYU|y9(|8GO#`|=@PB{QBTPU1^r8gM%hV; zeP*v0oKBW%c=G9b0KQ;#y5jUl=HnVa6otzUN&{ngtFIjCQ!CxwVBIIw8j4%kdfT ztV*7wKqP3x%LLU<&m)iOR)JQ9G~9+W(l+t2q{#ZS=5(mjp()#|hdg?R!(Sj%#`b-BJ5!R=9ZEfL?8U)122v4iBoE zX-~EFc53=fT|s#JR8}sHwX^P^uzGO6cHh%Y0Wa6&K9=uURLUvKPiCA*_H1eUC+cNs zb7y~MM#uVj`_^CgJ7wHDKlhqC@k#c7njR3AfAHLPF;rR7BdzV_Y*s}+Y`d`j!lfM{scZmt^wm(zmS#(Z&bG-8l{~PMh`h9xh_o@ap z@>{0utMqEvf?_;V4q(%XK*N>9NVA`t5PN!J`uiPCvJCB03D|_|1t+DhHoau3zo^_B z+7)hl(b`QXslV&4=7)Lrk1{CI?~3MRPV%^C;aQ*P#mB%O9e-S`>B7PuY9D#p8oa3S zjYUU;`n;9(Cgp`%)V!K-owB?C89Z-)T+!B2qh$62ztVM?)_*MZp!Qal2Kf2!wrcpj z{$Exk)p_IiTK^X(4RYDtbz16|!;5#g7C*r}#wg9x(}_&0F51${3t+y`m{?ie*2-2l zZa%c(`YsMZ&5?F;{JKT5_YPC{m)KXXHP~cgaDUUYj)Wa8nO>Hy_Nej~!O9Qs+C*dY zZ4cNg3=!QNB}!I)g(afOuL=4pya7mwxOT~7|1Q$w+l6;^PHYywxgDCnMwpOr=bF(K zYv!PFziGz3GGTSy-1#HjZ5*W!LdU=2vyOf%56ai{Ex< zIb61VR(&PN_V6uw{%aqyiQAatjxZ{Kq%eFxRSYfu@x@|b&iAeamoLptkv{0F=v1KutMwQ1jD1PEzeKNXL4Jqe@KUi`%5P!Gq!#7 zlGSbt2!By))bkNqSNKGB;y%^Ox4l{Od-y+XX2)zc+jLYMsY_ zqv(&T20#@X8l$6+QG{$!^hu+le+5Rr$rnbS3B!QY9fCL3`{7OXWKif^^Sgpn`NIy% z;GqvG4;HVfifnGpI%^bQ@+h?6>MY)y4MFV#$6uV>bL(nTckeCJz$V`*sg8A9aty2N zjPo)d*7`WsIz>Dl@3P>td-K>geeXVn9y&-DE_3vL_sX60_LY)ztVP8h=cI>jj^T3N zoqo5+*ZneM0qxSR@bY8*AvvLDwGWOSPO2r~%k$fa+7?HaoWG@gyeGpk`Y)Umdo18p z_y`|eF*EM8ByVoi*}1LJhSaBLOSx%mPa0?{ZOdAbzP5AI{^GDW^}ZcE_-f|pga8a-s3;*WOC|*E08EH}ln}Hd z!)6ZZtIP{H&@f)fe{9W+Y3#B2{O$8kEJkKJLj^zfX2pr`n%R$><_oh->`ixwg6_J} b_{K~lAxC89<56Z_5Sgh2`X<&OJoNb=RnO#{ literal 0 HcmV?d00001 diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.bro b/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.bro index b0392f9c27..e7e3c3ff8e 100644 --- a/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.bro +++ b/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.bro @@ -1,4 +1,10 @@ # @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-stapling.trace %INPUT # @TEST-EXEC: btest-diff ssl.log +# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-stapling-twimg.trace %INPUT +# @TEST-EXEC: mv ssl.log ssl-twimg.log +# @TEST-EXEC: btest-diff ssl-twimg.log +# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-stapling-digicert.trace %INPUT +# @TEST-EXEC: mv ssl.log ssl-digicert.log +# @TEST-EXEC: btest-diff ssl-digicert.log @load protocols/ssl/validate-ocsp From 42979b89f766ddf2d7852d286e8033824e368174 Mon Sep 17 00:00:00 2001 From: Johanna Amann Date: Thu, 4 Sep 2014 16:02:10 -0700 Subject: [PATCH 046/106] Also make links in documentation templates protocol relative. In case we do a point release, including this commit would be very helpful - without the bro documentation pages are somewhat broken over https. --- doc/_templates/layout.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index 2f8ea02aff..3df56a12ff 100644 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -10,7 +10,7 @@ {% endblock %} {% block header %} - {% endblock %} @@ -108,6 +108,6 @@ {% endblock %} {% block footer %} - {% endblock %} From b813b6f83bf0af218a1a84a36e600d2cce8700f2 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 4 Sep 2014 16:08:14 -0700 Subject: [PATCH 047/106] Test updates. --- testing/btest/Baseline/plugins.hooks/output | 50 ++++++++++++------- .../btest/Baseline/plugins.pktsrc/conn.log | 4 +- testing/btest/Baseline/plugins.writer/output | 22 ++++---- testing/btest/plugins/api-version-mismatch.sh | 2 +- .../btest/plugins/bifs-and-scripts-install.sh | 3 +- testing/btest/plugins/bifs-and-scripts.sh | 2 +- testing/btest/plugins/file.bro | 2 +- testing/btest/plugins/hooks.bro | 2 +- testing/btest/plugins/init-plugin.bro | 2 +- testing/btest/plugins/pktsrc.bro | 2 +- .../plugins/protocol-plugin/CMakeLists.txt | 1 - testing/btest/plugins/protocol.bro | 2 +- testing/btest/plugins/reader.bro | 2 +- testing/btest/plugins/writer.bro | 6 +-- 14 files changed, 57 insertions(+), 45 deletions(-) diff --git a/testing/btest/Baseline/plugins.hooks/output b/testing/btest/Baseline/plugins.hooks/output index 7b0f9262ae..83341f3075 100644 --- a/testing/btest/Baseline/plugins.hooks/output +++ b/testing/btest/Baseline/plugins.hooks/output @@ -182,7 +182,7 @@ 0.000000 MetaHookPost CallFunction(Log::__create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -> -0.000000 MetaHookPost CallFunction(Log::__write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::__write, (PacketFilter::LOG, [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T])) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Cluster::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Communication::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Conn::LOG)) -> @@ -273,8 +273,8 @@ 0.000000 MetaHookPost CallFunction(Log::create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -> -0.000000 MetaHookPost CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -> -0.000000 MetaHookPost CallFunction(Log::write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::write, (PacketFilter::LOG, [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T])) -> 0.000000 MetaHookPost CallFunction(Notice::want_pp, ()) -> 0.000000 MetaHookPost CallFunction(PacketFilter::build, ()) -> 0.000000 MetaHookPost CallFunction(PacketFilter::combine_filters, (ip or not ip, and, )) -> @@ -316,7 +316,11 @@ 0.000000 MetaHookPost LoadFile(../main) -> -1 0.000000 MetaHookPost LoadFile(./Bro_ARP.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_AYIYA.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_AsciiReader.ascii.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_AsciiWriter.ascii.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_BackDoor.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_BenchmarkReader.benchmark.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_BinaryReader.binary.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_BitTorrent.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_ConnSize.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_DCE_RPC.events.bif.bro) -> -1 @@ -347,16 +351,20 @@ 0.000000 MetaHookPost LoadFile(./Bro_NetBIOS.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_NetBIOS.functions.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_NetFlow.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_NoneWriter.none.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_PIA.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_POP3.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_RADIUS.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_RPC.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_RawReader.raw.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SMB.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SMTP.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SMTP.functions.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SNMP.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SNMP.types.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SOCKS.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_SQLiteReader.sqlite.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_SQLiteWriter.sqlite.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SSH.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SSL.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SteppingStone.events.bif.bro) -> -1 @@ -380,21 +388,20 @@ 0.000000 MetaHookPost LoadFile(./cardinality-counter.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./const.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./consts.bro) -> -1 0.000000 MetaHookPost LoadFile(./contents) -> -1 0.000000 MetaHookPost LoadFile(./dcc-send) -> -1 0.000000 MetaHookPost LoadFile(./entities) -> -1 0.000000 MetaHookPost LoadFile(./event.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./exec) -> -1 0.000000 MetaHookPost LoadFile(./file_analysis.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./files) -> -1 -0.000000 MetaHookPost LoadFile(./functions.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./gridftp) -> -1 0.000000 MetaHookPost LoadFile(./hll_unique) -> -1 +0.000000 MetaHookPost LoadFile(./hooks.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./inactivity) -> -1 0.000000 MetaHookPost LoadFile(./info) -> -1 +0.000000 MetaHookPost LoadFile(./init.bro) -> -1 0.000000 MetaHookPost LoadFile(./input) -> -1 0.000000 MetaHookPost LoadFile(./input.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./last) -> -1 @@ -408,6 +415,7 @@ 0.000000 MetaHookPost LoadFile(./netstats) -> -1 0.000000 MetaHookPost LoadFile(./non-cluster) -> -1 0.000000 MetaHookPost LoadFile(./patterns) -> -1 +0.000000 MetaHookPost LoadFile(./pcap.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./plugins) -> -1 0.000000 MetaHookPost LoadFile(./polling) -> -1 0.000000 MetaHookPost LoadFile(./postprocessors) -> -1 @@ -432,9 +440,7 @@ 0.000000 MetaHookPost LoadFile(.<...>/ascii) -> -1 0.000000 MetaHookPost LoadFile(.<...>/benchmark) -> -1 0.000000 MetaHookPost LoadFile(.<...>/binary) -> -1 -0.000000 MetaHookPost LoadFile(.<...>/dataseries) -> -1 0.000000 MetaHookPost LoadFile(.<...>/drop) -> -1 -0.000000 MetaHookPost LoadFile(.<...>/elasticsearch) -> -1 0.000000 MetaHookPost LoadFile(.<...>/email_admin) -> -1 0.000000 MetaHookPost LoadFile(.<...>/hostnames) -> -1 0.000000 MetaHookPost LoadFile(.<...>/none) -> -1 @@ -699,7 +705,7 @@ 0.000000 MetaHookPre CallFunction(Log::__create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -0.000000 MetaHookPre CallFunction(Log::__write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::__write, (PacketFilter::LOG, [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T])) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Cluster::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Communication::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Conn::LOG)) @@ -790,8 +796,8 @@ 0.000000 MetaHookPre CallFunction(Log::create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) 0.000000 MetaHookPre CallFunction(Log::create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) 0.000000 MetaHookPre CallFunction(Log::create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -0.000000 MetaHookPre CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -0.000000 MetaHookPre CallFunction(Log::write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::write, (PacketFilter::LOG, [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T])) 0.000000 MetaHookPre CallFunction(Notice::want_pp, ()) 0.000000 MetaHookPre CallFunction(PacketFilter::build, ()) 0.000000 MetaHookPre CallFunction(PacketFilter::combine_filters, (ip or not ip, and, )) @@ -833,7 +839,11 @@ 0.000000 MetaHookPre LoadFile(../main) 0.000000 MetaHookPre LoadFile(./Bro_ARP.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_AYIYA.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_AsciiReader.ascii.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_AsciiWriter.ascii.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_BackDoor.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_BenchmarkReader.benchmark.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_BinaryReader.binary.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_BitTorrent.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_ConnSize.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_DCE_RPC.events.bif.bro) @@ -864,16 +874,20 @@ 0.000000 MetaHookPre LoadFile(./Bro_NetBIOS.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_NetBIOS.functions.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_NetFlow.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_NoneWriter.none.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_PIA.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_POP3.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_RADIUS.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_RPC.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_RawReader.raw.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SMB.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SMTP.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SMTP.functions.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SNMP.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SNMP.types.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SOCKS.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_SQLiteReader.sqlite.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_SQLiteWriter.sqlite.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SSH.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SSL.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SteppingStone.events.bif.bro) @@ -897,21 +911,20 @@ 0.000000 MetaHookPre LoadFile(./cardinality-counter.bif.bro) 0.000000 MetaHookPre LoadFile(./const.bif.bro) 0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts.bif.bro) 0.000000 MetaHookPre LoadFile(./consts.bro) 0.000000 MetaHookPre LoadFile(./contents) 0.000000 MetaHookPre LoadFile(./dcc-send) 0.000000 MetaHookPre LoadFile(./entities) 0.000000 MetaHookPre LoadFile(./event.bif.bro) -0.000000 MetaHookPre LoadFile(./events.bif.bro) 0.000000 MetaHookPre LoadFile(./exec) 0.000000 MetaHookPre LoadFile(./file_analysis.bif.bro) 0.000000 MetaHookPre LoadFile(./files) -0.000000 MetaHookPre LoadFile(./functions.bif.bro) 0.000000 MetaHookPre LoadFile(./gridftp) 0.000000 MetaHookPre LoadFile(./hll_unique) +0.000000 MetaHookPre LoadFile(./hooks.bif.bro) 0.000000 MetaHookPre LoadFile(./inactivity) 0.000000 MetaHookPre LoadFile(./info) +0.000000 MetaHookPre LoadFile(./init.bro) 0.000000 MetaHookPre LoadFile(./input) 0.000000 MetaHookPre LoadFile(./input.bif.bro) 0.000000 MetaHookPre LoadFile(./last) @@ -925,6 +938,7 @@ 0.000000 MetaHookPre LoadFile(./netstats) 0.000000 MetaHookPre LoadFile(./non-cluster) 0.000000 MetaHookPre LoadFile(./patterns) +0.000000 MetaHookPre LoadFile(./pcap.bif.bro) 0.000000 MetaHookPre LoadFile(./plugins) 0.000000 MetaHookPre LoadFile(./polling) 0.000000 MetaHookPre LoadFile(./postprocessors) @@ -949,9 +963,7 @@ 0.000000 MetaHookPre LoadFile(.<...>/ascii) 0.000000 MetaHookPre LoadFile(.<...>/benchmark) 0.000000 MetaHookPre LoadFile(.<...>/binary) -0.000000 MetaHookPre LoadFile(.<...>/dataseries) 0.000000 MetaHookPre LoadFile(.<...>/drop) -0.000000 MetaHookPre LoadFile(.<...>/elasticsearch) 0.000000 MetaHookPre LoadFile(.<...>/email_admin) 0.000000 MetaHookPre LoadFile(.<...>/hostnames) 0.000000 MetaHookPre LoadFile(.<...>/none) @@ -1216,7 +1228,7 @@ 0.000000 | HookCallFunction Log::__create_stream(Unified2::LOG, [columns=, ev=Unified2::log_unified2]) 0.000000 | HookCallFunction Log::__create_stream(Weird::LOG, [columns=, ev=Weird::log_weird]) 0.000000 | HookCallFunction Log::__create_stream(X509::LOG, [columns=, ev=X509::log_x509]) -0.000000 | HookCallFunction Log::__write(PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::__write(PacketFilter::LOG, [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T]) 0.000000 | HookCallFunction Log::add_default_filter(Cluster::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Communication::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Conn::LOG) @@ -1307,8 +1319,8 @@ 0.000000 | HookCallFunction Log::create_stream(Unified2::LOG, [columns=, ev=Unified2::log_unified2]) 0.000000 | HookCallFunction Log::create_stream(Weird::LOG, [columns=, ev=Weird::log_weird]) 0.000000 | HookCallFunction Log::create_stream(X509::LOG, [columns=, ev=X509::log_x509]) -0.000000 | HookCallFunction Log::default_path_func(PacketFilter::LOG, , [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T]) -0.000000 | HookCallFunction Log::write(PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::default_path_func(PacketFilter::LOG, , [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::write(PacketFilter::LOG, [ts=1409853900.737227, node=bro, filter=ip or not ip, init=T, success=T]) 0.000000 | HookCallFunction Notice::want_pp() 0.000000 | HookCallFunction PacketFilter::build() 0.000000 | HookCallFunction PacketFilter::combine_filters(ip or not ip, and, ) diff --git a/testing/btest/Baseline/plugins.pktsrc/conn.log b/testing/btest/Baseline/plugins.pktsrc/conn.log index 550f520352..ab218f18fd 100644 --- a/testing/btest/Baseline/plugins.pktsrc/conn.log +++ b/testing/btest/Baseline/plugins.pktsrc/conn.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path conn -#open 2014-08-28-04-53-05 +#open 2014-09-04-18-06-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count set[string] 1409193037.000000 CXWv6p3arKYeMETxOg 1.2.0.2 2527 1.2.0.3 6649 tcp - - - - S0 - 0 S 1 64 0 0 (empty) -#close 2014-08-28-04-53-05 +#close 2014-09-04-18-06-05 diff --git a/testing/btest/Baseline/plugins.writer/output b/testing/btest/Baseline/plugins.writer/output index f737e892a0..49c130d6e2 100644 --- a/testing/btest/Baseline/plugins.writer/output +++ b/testing/btest/Baseline/plugins.writer/output @@ -2,21 +2,21 @@ Demo::Foo - A Foo test logging writer (dynamic, version 1.0) [Writer] Foo (Log::WRITER_FOO) === -[packet_filter] 1406831942.605829|bro|ip or not ip|T|T [conn] 1340213005.165293|CXWv6p3arKYeMETxOg|10.0.0.55|53994|60.190.189.214|8124|tcp|-|4.314406|0|0|S0|-|0|S|5|320|0|0| -[tunnel] 1340213015.276495|-|10.0.0.55|0|60.190.189.214|8124|Tunnel::SOCKS|Tunnel::DISCOVER -[socks] 1340213015.276495|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|-|succeeded|-|www.osnews.com|80|192.168.0.31|-|2688 -[http] 1340213019.013158|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|1|GET|www.osnews.com|/images/printer2.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- -[http] 1340213019.013426|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|2|GET|www.osnews.com|/img2/shorturl.jpg|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- -[http] 1340213019.580162|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|3|GET|www.osnews.com|/images/icons/9.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- -[http] 1340213020.155861|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|4|GET|www.osnews.com|/images/icons/26.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|1368|200|OK|-|-|-||-|-|-|-|-|FBtZ7y1ppK8iIeY622|image/gif -[files] 1340213020.732581|FBtZ7y1ppK8iIeY622|60.190.189.214|10.0.0.55|CjhGID4nQcgTWjvg4c|HTTP|0||image/gif|-|0.000000|-|F|1368|1368|0|0|F|-|-|-|-|- -[http] 1340213020.732963|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|GET|www.osnews.com|/images/icons/17.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- -[http] 1340213021.300269|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|6|GET|www.osnews.com|/images/left.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- [conn] 1340213010.582723|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|tcp|http,socks|13.839419|3860|2934|SF|-|0|ShADadfF|23|5080|20|3986| -[http] 1340213021.861584|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|7|GET|www.osnews.com|/images/icons/32.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- [conn] 1340213048.780152|CCvvfg3TEfuqmmG4bh|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| [conn] 1340213097.272764|CsRx2w45OKnoww6xl4|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| [conn] 1340213162.160367|CRJuHdVW0XPVINV8a|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| [conn] 1340213226.561757|CPbrpk1qSsw6ESzHV4|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| [conn] 1340213290.981995|C6pKV8GSxOnSLghOa|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[files] 1340213020.732581|FBtZ7y1ppK8iIeY622|60.190.189.214|10.0.0.55|CjhGID4nQcgTWjvg4c|HTTP|0||image/gif|-|0.000000|-|F|1368|1368|0|0|F|-|-|-|-|- +[http] 1340213019.013158|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|1|GET|www.osnews.com|/images/printer2.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213019.013426|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|2|GET|www.osnews.com|/img2/shorturl.jpg|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213019.580162|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|3|GET|www.osnews.com|/images/icons/9.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213020.155861|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|4|GET|www.osnews.com|/images/icons/26.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|1368|200|OK|-|-|-||-|-|-|-|-|FBtZ7y1ppK8iIeY622|image/gif +[http] 1340213020.732963|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|GET|www.osnews.com|/images/icons/17.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213021.300269|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|6|GET|www.osnews.com|/images/left.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213021.861584|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|7|GET|www.osnews.com|/images/icons/32.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[packet_filter] 1409859343.786281|bro|ip or not ip|T|T +[socks] 1340213015.276495|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|-|succeeded|-|www.osnews.com|80|192.168.0.31|-|2688 +[tunnel] 1340213015.276495|-|10.0.0.55|0|60.190.189.214|8124|Tunnel::SOCKS|Tunnel::DISCOVER diff --git a/testing/btest/plugins/api-version-mismatch.sh b/testing/btest/plugins/api-version-mismatch.sh index a75ff05655..f8d88b4fc4 100644 --- a/testing/btest/plugins/api-version-mismatch.sh +++ b/testing/btest/plugins/api-version-mismatch.sh @@ -1,6 +1,6 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: bash %INPUT -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC-FAIL: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output 2>&1 # @TEST-EXEC: btest-diff output diff --git a/testing/btest/plugins/bifs-and-scripts-install.sh b/testing/btest/plugins/bifs-and-scripts-install.sh index 4f0174a69f..58d0987f5e 100644 --- a/testing/btest/plugins/bifs-and-scripts-install.sh +++ b/testing/btest/plugins/bifs-and-scripts-install.sh @@ -1,6 +1,7 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: bash %INPUT -# @TEST-EXEC: BRO_PLUGIN_INSTALL=`pwd`/test-install make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} +# @TEST-EXEC: BRO_PLUGIN_INSTALL=`pwd`/test-install make # @TEST-EXEC: make install # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd`/test-install bro -NN Demo::Foo >>output # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro demo/foo -r $TRACES/empty.trace >>output diff --git a/testing/btest/plugins/bifs-and-scripts.sh b/testing/btest/plugins/bifs-and-scripts.sh index 9975da21c6..7b4c45cd4e 100644 --- a/testing/btest/plugins/bifs-and-scripts.sh +++ b/testing/btest/plugins/bifs-and-scripts.sh @@ -1,6 +1,6 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: bash %INPUT -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output diff --git a/testing/btest/plugins/file.bro b/testing/btest/plugins/file.bro index fb882d1d7e..7d25cab538 100644 --- a/testing/btest/plugins/file.bro +++ b/testing/btest/plugins/file.bro @@ -1,6 +1,6 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: cp -r %DIR/file-plugin/* . -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/ftp/retr.trace %INPUT >>output diff --git a/testing/btest/plugins/hooks.bro b/testing/btest/plugins/hooks.bro index 1a4f35fc55..786e6ccc88 100644 --- a/testing/btest/plugins/hooks.bro +++ b/testing/btest/plugins/hooks.bro @@ -1,6 +1,6 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Hooks # @TEST-EXEC: cp -r %DIR/hooks-plugin/* . -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/http/get.trace %INPUT 2>&1 | $SCRIPTS/diff-remove-abspath | sort | uniq >output # @TEST-EXEC: btest-diff output diff --git a/testing/btest/plugins/init-plugin.bro b/testing/btest/plugins/init-plugin.bro index 1ad5be6aea..2fffa88f2c 100644 --- a/testing/btest/plugins/init-plugin.bro +++ b/testing/btest/plugins/init-plugin.bro @@ -1,5 +1,5 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/port4242.trace >>output diff --git a/testing/btest/plugins/pktsrc.bro b/testing/btest/plugins/pktsrc.bro index 2bd9be7bb7..39d2fa9aff 100644 --- a/testing/btest/plugins/pktsrc.bro +++ b/testing/btest/plugins/pktsrc.bro @@ -4,5 +4,5 @@ # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r foo:XXX %INPUT FilteredTraceDetection::enable=F >>output -# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff conn.log +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/plugins/protocol-plugin/CMakeLists.txt b/testing/btest/plugins/protocol-plugin/CMakeLists.txt index a032edbd89..4bc8460c06 100644 --- a/testing/btest/plugins/protocol-plugin/CMakeLists.txt +++ b/testing/btest/plugins/protocol-plugin/CMakeLists.txt @@ -15,6 +15,5 @@ bro_plugin_begin(Demo Foo) bro_plugin_cc(src/Plugin.cc) bro_plugin_cc(src/Foo.cc) bro_plugin_bif(src/events.bif) -bro_plugin_bif(src/functions.bif) bro_plugin_pac(src/foo.pac src/foo-protocol.pac src/foo-analyzer.pac) bro_plugin_end() diff --git a/testing/btest/plugins/protocol.bro b/testing/btest/plugins/protocol.bro index dadbbb7717..671edb6cf1 100644 --- a/testing/btest/plugins/protocol.bro +++ b/testing/btest/plugins/protocol.bro @@ -1,6 +1,6 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: cp -r %DIR/protocol-plugin/* . -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/port4242.trace %INPUT >>output diff --git a/testing/btest/plugins/reader.bro b/testing/btest/plugins/reader.bro index cecb5306da..5065678c2e 100644 --- a/testing/btest/plugins/reader.bro +++ b/testing/btest/plugins/reader.bro @@ -1,6 +1,6 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: cp -r %DIR/reader-plugin/* . -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` btest-bg-run bro bro %INPUT diff --git a/testing/btest/plugins/writer.bro b/testing/btest/plugins/writer.bro index 49fbbb9395..f2e74ad667 100644 --- a/testing/btest/plugins/writer.bro +++ b/testing/btest/plugins/writer.bro @@ -1,8 +1,8 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: cp -r %DIR/writer-plugin/* . -# @TEST-EXEC: make BRO=${DIST} +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/socks.trace Log::default_writer=Log::WRITER_FOO %INPUT >>output -# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/socks.trace Log::default_writer=Log::WRITER_FOO %INPUT | sort >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=diff-remove-timestamps btest-diff output From 6e33c92cf00fe1fb8419ccd802c39390b318ca40 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 4 Sep 2014 20:30:28 -0700 Subject: [PATCH 048/106] Adding test for dynamic packet dumper plugin. --- .../btest/Baseline/plugins.pktdumper/output | 12 ++++++ .../plugins/pktdumper-plugin/.btest-ignore | 0 .../plugins/pktdumper-plugin/CMakeLists.txt | 17 ++++++++ .../btest/plugins/pktdumper-plugin/src/Foo.cc | 40 +++++++++++++++++++ .../btest/plugins/pktdumper-plugin/src/Foo.h | 30 ++++++++++++++ .../plugins/pktdumper-plugin/src/Plugin.cc | 20 ++++++++++ testing/btest/plugins/pktdumper.bro | 8 ++++ 7 files changed, 127 insertions(+) create mode 100644 testing/btest/Baseline/plugins.pktdumper/output create mode 100644 testing/btest/plugins/pktdumper-plugin/.btest-ignore create mode 100644 testing/btest/plugins/pktdumper-plugin/CMakeLists.txt create mode 100644 testing/btest/plugins/pktdumper-plugin/src/Foo.cc create mode 100644 testing/btest/plugins/pktdumper-plugin/src/Foo.h create mode 100644 testing/btest/plugins/pktdumper-plugin/src/Plugin.cc create mode 100644 testing/btest/plugins/pktdumper.bro diff --git a/testing/btest/Baseline/plugins.pktdumper/output b/testing/btest/Baseline/plugins.pktdumper/output new file mode 100644 index 0000000000..05ffec25cb --- /dev/null +++ b/testing/btest/Baseline/plugins.pktdumper/output @@ -0,0 +1,12 @@ +Demo::Foo - A Foo packet dumper (dynamic, version 1.0) + [Packet Dumper] FooPktDumper (dumper prefix: foo) + +=== +Dumping to XXX: 1373858797.646968 len 94 +Dumping to XXX: 1373858797.646998 len 94 +Dumping to XXX: 1373858797.647041 len 86 +Dumping to XXX: 1373858797.647147 len 98 +Dumping to XXX: 1373858797.647186 len 86 +Dumping to XXX: 1373858797.647250 len 86 +Dumping to XXX: 1373858797.647317 len 86 +Dumping to XXX: 1373858797.647350 len 86 diff --git a/testing/btest/plugins/pktdumper-plugin/.btest-ignore b/testing/btest/plugins/pktdumper-plugin/.btest-ignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/btest/plugins/pktdumper-plugin/CMakeLists.txt b/testing/btest/plugins/pktdumper-plugin/CMakeLists.txt new file mode 100644 index 0000000000..2234907ad2 --- /dev/null +++ b/testing/btest/plugins/pktdumper-plugin/CMakeLists.txt @@ -0,0 +1,17 @@ + +project(Bro-Plugin-Demo-Foo) + +cmake_minimum_required(VERSION 2.6.3) + +if ( NOT BRO_DIST ) + message(FATAL_ERROR "BRO_DIST not set") +endif () + +set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) + +include(BroPlugin) + +bro_plugin_begin(Demo Foo) +bro_plugin_cc(src/Plugin.cc) +bro_plugin_cc(src/Foo.cc) +bro_plugin_end() diff --git a/testing/btest/plugins/pktdumper-plugin/src/Foo.cc b/testing/btest/plugins/pktdumper-plugin/src/Foo.cc new file mode 100644 index 0000000000..fdd364b034 --- /dev/null +++ b/testing/btest/plugins/pktdumper-plugin/src/Foo.cc @@ -0,0 +1,40 @@ + +#include +#include + +#include "Foo.h" + +using namespace plugin::Demo_Foo; + +Foo::Foo(const std::string& path, bool is_live) + { + props.path = path; + } + +Foo::~Foo() + { + } + +void Foo::Open() + { + props.open_time = network_time; + props.hdr_size = 0; + Opened(props); + } + +void Foo::Close() + { + Closed(); + } + +bool Foo::Dump(const Packet* pkt) + { + double t = double(pkt->hdr->ts.tv_sec) + double(pkt->hdr->ts.tv_usec) / 1e6; + fprintf(stdout, "Dumping to %s: %.6f len %u\n", props.path.c_str(), t, (unsigned int)pkt->hdr->len); + return true; + } + +iosource::PktDumper* Foo::Instantiate(const std::string& path, bool append) + { + return new Foo(path, append); + } diff --git a/testing/btest/plugins/pktdumper-plugin/src/Foo.h b/testing/btest/plugins/pktdumper-plugin/src/Foo.h new file mode 100644 index 0000000000..b8c8291728 --- /dev/null +++ b/testing/btest/plugins/pktdumper-plugin/src/Foo.h @@ -0,0 +1,30 @@ + +#ifndef BRO_PLUGIN_DEMO_FOO_H +#define BRO_PLUGIN_DEMO_FOO_H + +#include +#include + +namespace plugin { +namespace Demo_Foo { + +class Foo : public iosource::PktDumper { +public: + Foo(const std::string& path, bool is_live); + virtual ~Foo(); + + static PktDumper* Instantiate(const std::string& path, bool append); + +protected: + virtual void Open(); + virtual void Close(); + virtual bool Dump(const Packet* pkt); + +private: + Properties props; +}; + +} +} + +#endif diff --git a/testing/btest/plugins/pktdumper-plugin/src/Plugin.cc b/testing/btest/plugins/pktdumper-plugin/src/Plugin.cc new file mode 100644 index 0000000000..81ef8c79f4 --- /dev/null +++ b/testing/btest/plugins/pktdumper-plugin/src/Plugin.cc @@ -0,0 +1,20 @@ + +#include "Plugin.h" + +#include "Foo.h" + +namespace plugin { namespace Demo_Foo { Plugin plugin; } } + +using namespace plugin::Demo_Foo; + +plugin::Configuration Plugin::Configure() + { + AddComponent(new ::iosource::PktDumperComponent("FooPktDumper", "foo", ::plugin::Demo_Foo::Foo::Instantiate)); + + plugin::Configuration config; + config.name = "Demo::Foo"; + config.description = "A Foo packet dumper"; + config.version.major = 1; + config.version.minor = 0; + return config; + } diff --git a/testing/btest/plugins/pktdumper.bro b/testing/btest/plugins/pktdumper.bro new file mode 100644 index 0000000000..61540897d8 --- /dev/null +++ b/testing/btest/plugins/pktdumper.bro @@ -0,0 +1,8 @@ +# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo +# @TEST-EXEC: cp -r %DIR/pktdumper-plugin/* . +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/port4242.trace -w foo:XXX %INPUT FilteredTraceDetection::enable=F >>output +# @TEST-EXEC: btest-diff output + From 042afd2feb530e0cf77cbd76dfd5fbe5c9ba4f88 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 4 Sep 2014 20:55:20 -0700 Subject: [PATCH 049/106] Fixing remaining tests. --- aux/bro-aux | 2 +- cmake | 2 +- src/iosource/Component.cc | 2 +- src/iosource/IOSource.h | 4 ++++ src/iosource/PktSrc.h | 4 ---- .../plugins.bifs-and-scripts-install/output | 2 +- .../Baseline/plugins.bifs-and-scripts/output | 2 +- testing/btest/plugins/bifs-and-scripts-install.sh | 15 +++++++-------- testing/btest/plugins/bifs-and-scripts.sh | 11 +++++------ 9 files changed, 21 insertions(+), 23 deletions(-) diff --git a/aux/bro-aux b/aux/bro-aux index 181f084432..4bb294d4d1 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 181f084432e277f899140647d9b788059b3cccb1 +Subproject commit 4bb294d4d15b14a9e49f875b459fdef8286b5957 diff --git a/cmake b/cmake index aa15263ae3..4f830a0fbf 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit aa15263ae39667e5e9bd73690b05aa4af9147ca3 +Subproject commit 4f830a0fbf078d6b64a9a2264d2d06d89e8fe619 diff --git a/src/iosource/Component.cc b/src/iosource/Component.cc index 20232161cd..a9cfa37d34 100644 --- a/src/iosource/Component.cc +++ b/src/iosource/Component.cc @@ -151,7 +151,7 @@ void PktDumperComponent::DoDescribe(ODesc* d) const if ( prefs.size() ) prefs += ", "; - prefs += *i; + prefs += '"' + *i + '"'; } d->Add("dumper prefix"); diff --git a/src/iosource/IOSource.h b/src/iosource/IOSource.h index 9083e8f4f1..0e7087a2dd 100644 --- a/src/iosource/IOSource.h +++ b/src/iosource/IOSource.h @@ -3,6 +3,10 @@ #ifndef IOSOURCE_IOSOURCE_H #define IOSOURCE_IOSOURCE_H +extern "C" { +#include +} + #include #include "Timer.h" diff --git a/src/iosource/PktSrc.h b/src/iosource/PktSrc.h index c616a2d5b4..75fd2633d0 100644 --- a/src/iosource/PktSrc.h +++ b/src/iosource/PktSrc.h @@ -3,10 +3,6 @@ #ifndef IOSOURCE_PKTSRC_PKTSRC_H #define IOSOURCE_PKTSRC_PKTSRC_H -extern "C" { -#include -} - #include "IOSource.h" #include "BPF_Program.h" #include "Dict.h" diff --git a/testing/btest/Baseline/plugins.bifs-and-scripts-install/output b/testing/btest/Baseline/plugins.bifs-and-scripts-install/output index a4187d0f7c..f03cfddc81 100644 --- a/testing/btest/Baseline/plugins.bifs-and-scripts-install/output +++ b/testing/btest/Baseline/plugins.bifs-and-scripts-install/output @@ -1,6 +1,6 @@ Demo::Foo - (dynamic, version 1.0) - [Event] plugin_event [Function] hello_plugin_world + [Event] plugin_event plugin: automatically loaded at startup calling bif, Hello from the plugin! diff --git a/testing/btest/Baseline/plugins.bifs-and-scripts/output b/testing/btest/Baseline/plugins.bifs-and-scripts/output index a082b3d690..47dd6ed430 100644 --- a/testing/btest/Baseline/plugins.bifs-and-scripts/output +++ b/testing/btest/Baseline/plugins.bifs-and-scripts/output @@ -1,6 +1,6 @@ Demo::Foo - (dynamic, version 1.0) - [Event] plugin_event [Function] hello_plugin_world + [Event] plugin_event === plugin: automatically loaded at startup diff --git a/testing/btest/plugins/bifs-and-scripts-install.sh b/testing/btest/plugins/bifs-and-scripts-install.sh index 58d0987f5e..158f5fc01f 100644 --- a/testing/btest/plugins/bifs-and-scripts-install.sh +++ b/testing/btest/plugins/bifs-and-scripts-install.sh @@ -1,12 +1,14 @@ # @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin Demo Foo # @TEST-EXEC: bash %INPUT -# @TEST-EXEC: ./configure --bro-dist=${DIST} -# @TEST-EXEC: BRO_PLUGIN_INSTALL=`pwd`/test-install make +# @TEST-EXEC: ./configure --bro-dist=${DIST} --install-root=`pwd`/test-install +# @TEST-EXEC: make # @TEST-EXEC: make install # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd`/test-install bro -NN Demo::Foo >>output # @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro demo/foo -r $TRACES/empty.trace >>output # @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output +mkdir -p scripts/demo/foo/base/ + cat >scripts/__load__.bro <scripts/demo/foo/base/at-startup.bro <src/functions.bif <src/foo.bif <activate.bro <src/events.bif <scripts/__load__.bro <scripts/demo/foo/base/at-startup.bro <src/functions.bif <src/foo.bif <activate.bro <src/events.bif < Date: Thu, 4 Sep 2014 22:43:25 -0700 Subject: [PATCH 050/106] fix more http links. This does not break the layout, thus these are not really important enough for the .1. --- NEWS | 2 +- doc/install/install.rst | 2 +- doc/quickstart/index.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index f06115d4ea..6da13833c3 100644 --- a/NEWS +++ b/NEWS @@ -22,7 +22,7 @@ New Functionality plugin can furthermore hook into Bro's processing a number of places to add custom logic. - See http://www.bro.org/sphinx-git/devel/plugins.html for more + See https://www.bro.org/sphinx-git/devel/plugins.html for more information on writing plugins. Changed Functionality diff --git a/doc/install/install.rst b/doc/install/install.rst index 9a258773ce..0052acafb0 100644 --- a/doc/install/install.rst +++ b/doc/install/install.rst @@ -180,7 +180,7 @@ automatically. Finally, use ``make install-aux`` to install some of the other programs that are in the ``aux/bro-aux`` directory. OpenBSD users, please see our `FAQ -`_ if you are having +`_ if you are having problems installing Bro. Finally, if you want to build the Bro documentation (not required, because diff --git a/doc/quickstart/index.rst b/doc/quickstart/index.rst index 173373c769..bb642ee75a 100644 --- a/doc/quickstart/index.rst +++ b/doc/quickstart/index.rst @@ -1,5 +1,5 @@ -.. _FAQ: http://www.bro.org/documentation/faq.html +.. _FAQ: //www.bro.org/documentation/faq.html .. _quickstart: From 4a66a8e341e93f3859d48282644c3fd9cdcce8da Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 5 Sep 2014 18:20:23 -0700 Subject: [PATCH 051/106] Bugfixes and test updates. --- TODO.iosources | 4 - src/DNS_Mgr.cc | 2 +- src/DNS_Mgr.h | 2 +- src/PktSrc.cc | 709 ------------------ src/RemoteSerializer.cc | 2 +- src/RemoteSerializer.h | 2 +- src/bro.bif | 2 +- src/iosource/Component.cc | 1 + src/iosource/Component.h | 37 +- src/iosource/IOSource.h | 110 ++- src/iosource/Manager.cc | 3 +- src/iosource/Manager.h | 85 ++- src/iosource/PktDumper.h | 3 + src/iosource/PktSrc.cc | 4 +- src/iosource/PktSrc.h | 3 + src/iosource/pcap/Dumper.h | 1 + src/iosource/pcap/Source.cc | 1 + src/main.cc | 7 +- .../canonified_loaded_scripts.log | 5 +- 19 files changed, 212 insertions(+), 771 deletions(-) delete mode 100644 src/PktSrc.cc diff --git a/TODO.iosources b/TODO.iosources index ee19dea169..e69de29bb2 100644 --- a/TODO.iosources +++ b/TODO.iosources @@ -1,4 +0,0 @@ -- Tests - - pktsrc plugin - - pktdump plugin - diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index 69f54b2bbc..7f37ee8c5f 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -405,7 +405,7 @@ DNS_Mgr::~DNS_Mgr() delete [] dir; } -void DNS_Mgr::Init() +void DNS_Mgr::InitPostScript() { if ( did_init ) return; diff --git a/src/DNS_Mgr.h b/src/DNS_Mgr.h index 5978f3a597..b8b0fc7e35 100644 --- a/src/DNS_Mgr.h +++ b/src/DNS_Mgr.h @@ -45,7 +45,7 @@ public: DNS_Mgr(DNS_MgrMode mode); virtual ~DNS_Mgr(); - void Init(); + void InitPostScript(); void Flush(); // Looks up the address or addresses of the given host, and returns diff --git a/src/PktSrc.cc b/src/PktSrc.cc deleted file mode 100644 index 7a0ed4fa0b..0000000000 --- a/src/PktSrc.cc +++ /dev/null @@ -1,709 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include -#include - -#include "config.h" - -#include "util.h" -#include "PktSrc.h" -#include "Hash.h" -#include "Net.h" -#include "Sessions.h" - - -// ### This needs auto-confing. -#ifdef HAVE_PCAP_INT_H -#include -#endif - -PktSrc::PktSrc() - { - interface = readfile = 0; - data = last_data = 0; - memset(&hdr, 0, sizeof(hdr)); - hdr_size = 0; - datalink = 0; - netmask = 0xffffff00; - pd = 0; - idle = false; - - next_sync_point = 0; - first_timestamp = current_timestamp = next_timestamp = 0.0; - first_wallclock = current_wallclock = 0; - - stats.received = stats.dropped = stats.link = 0; - } - -PktSrc::~PktSrc() - { - Close(); - - loop_over_list(program_list, i) - delete program_list[i]; - - BPF_Program* code; - IterCookie* cookie = filters.InitForIteration(); - while ( (code = filters.NextEntry(cookie)) ) - delete code; - - delete [] interface; - delete [] readfile; - } - -void PktSrc::GetFds(int* read, int* write, int* except) - { - if ( pseudo_realtime ) - { - // Select would give erroneous results. But we simulate it - // by setting idle accordingly. - idle = CheckPseudoTime() == 0; - return; - } - - if ( selectable_fd >= 0 ) - *read = selectable_fd; - } - -int PktSrc::ExtractNextPacket() - { - // Don't return any packets if processing is suspended (except for the - // very first packet which we need to set up times). - if ( net_is_processing_suspended() && first_timestamp ) - { - idle = true; - return 0; - } - - data = last_data = pcap_next(pd, &hdr); - - if ( data && (hdr.len == 0 || hdr.caplen == 0) ) - { - sessions->Weird("empty_pcap_header", &hdr, data); - return 0; - } - - if ( data ) - next_timestamp = hdr.ts.tv_sec + double(hdr.ts.tv_usec) / 1e6; - - if ( pseudo_realtime ) - current_wallclock = current_time(true); - - if ( ! first_timestamp ) - first_timestamp = next_timestamp; - - idle = (data == 0); - - if ( data ) - ++stats.received; - - // Source has gone dry. If it's a network interface, this just means - // it's timed out. If it's a file, though, then the file has been - // exhausted. - if ( ! data && ! IsLive() ) - { - closed = true; - - if ( pseudo_realtime && using_communication ) - { - if ( remote_trace_sync_interval ) - remote_serializer->SendFinalSyncPoint(); - else - remote_serializer->Terminate(); - } - } - - return data != 0; - } - -double PktSrc::NextTimestamp(double* local_network_time) - { - if ( ! data && ! ExtractNextPacket() ) - return -1.0; - - if ( pseudo_realtime ) - { - // Delay packet if necessary. - double packet_time = CheckPseudoTime(); - if ( packet_time ) - return packet_time; - - idle = true; - return -1.0; - } - - return next_timestamp; - } - -void PktSrc::ContinueAfterSuspend() - { - current_wallclock = current_time(true); - } - -double PktSrc::CurrentPacketWallClock() - { - // We stop time when we are suspended. - if ( net_is_processing_suspended() ) - current_wallclock = current_time(true); - - return current_wallclock; - } - -double PktSrc::CheckPseudoTime() - { - if ( ! data && ! ExtractNextPacket() ) - return 0; - - if ( ! current_timestamp ) - return bro_start_time; - - if ( remote_trace_sync_interval ) - { - if ( next_sync_point == 0 || next_timestamp >= next_sync_point ) - { - int n = remote_serializer->SendSyncPoint(); - next_sync_point = first_timestamp + - n * remote_trace_sync_interval; - remote_serializer->Log(RemoteSerializer::LogInfo, - fmt("stopping at packet %.6f, next sync-point at %.6f", - current_timestamp, next_sync_point)); - - return 0; - } - } - - double pseudo_time = next_timestamp - first_timestamp; - double ct = (current_time(true) - first_wallclock) * pseudo_realtime; - - return pseudo_time <= ct ? bro_start_time + pseudo_time : 0; - } - -void PktSrc::Process() - { - if ( ! data && ! ExtractNextPacket() ) - return; - - current_timestamp = next_timestamp; - - int pkt_hdr_size = hdr_size; - - // Unfortunately some packets on the link might have MPLS labels - // while others don't. That means we need to ask the link-layer if - // labels are in place. - bool have_mpls = false; - - int protocol = 0; - - switch ( datalink ) { - case DLT_NULL: - { - protocol = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0]; - - // From the Wireshark Wiki: "AF_INET6, unfortunately, has - // different values in {NetBSD,OpenBSD,BSD/OS}, - // {FreeBSD,DragonFlyBSD}, and {Darwin/Mac OS X}, so an IPv6 - // packet might have a link-layer header with 24, 28, or 30 - // as the AF_ value." As we may be reading traces captured on - // platforms other than what we're running on, we accept them - // all here. - if ( protocol != AF_INET - && protocol != AF_INET6 - && protocol != 24 - && protocol != 28 - && protocol != 30 ) - { - sessions->Weird("non_ip_packet_in_null_transport", &hdr, data); - data = 0; - return; - } - - break; - } - - case DLT_EN10MB: - { - // Get protocol being carried from the ethernet frame. - protocol = (data[12] << 8) + data[13]; - - switch ( protocol ) - { - // MPLS carried over the ethernet frame. - case 0x8847: - // Remove the data link layer and denote a - // header size of zero before the IP header. - have_mpls = true; - data += get_link_header_size(datalink); - pkt_hdr_size = 0; - break; - - // VLAN carried over the ethernet frame. - case 0x8100: - data += get_link_header_size(datalink); - - // Check for MPLS in VLAN. - if ( ((data[2] << 8) + data[3]) == 0x8847 ) - have_mpls = true; - - data += 4; // Skip the vlan header - pkt_hdr_size = 0; - - // Check for 802.1ah (Q-in-Q) containing IP. - // Only do a second layer of vlan tag - // stripping because there is no - // specification that allows for deeper - // nesting. - if ( ((data[2] << 8) + data[3]) == 0x0800 ) - data += 4; - - break; - - // PPPoE carried over the ethernet frame. - case 0x8864: - data += get_link_header_size(datalink); - protocol = (data[6] << 8) + data[7]; - data += 8; // Skip the PPPoE session and PPP header - pkt_hdr_size = 0; - - if ( protocol != 0x0021 && protocol != 0x0057 ) - { - // Neither IPv4 nor IPv6. - sessions->Weird("non_ip_packet_in_pppoe_encapsulation", &hdr, data); - data = 0; - return; - } - break; - } - - break; - } - - case DLT_PPP_SERIAL: - { - // Get PPP protocol. - protocol = (data[2] << 8) + data[3]; - - if ( protocol == 0x0281 ) - { - // MPLS Unicast. Remove the data link layer and - // denote a header size of zero before the IP header. - have_mpls = true; - data += get_link_header_size(datalink); - pkt_hdr_size = 0; - } - - else if ( protocol != 0x0021 && protocol != 0x0057 ) - { - // Neither IPv4 nor IPv6. - sessions->Weird("non_ip_packet_in_ppp_encapsulation", &hdr, data); - data = 0; - return; - } - break; - } - } - - if ( have_mpls ) - { - // Skip the MPLS label stack. - bool end_of_stack = false; - - while ( ! end_of_stack ) - { - end_of_stack = *(data + 2) & 0x01; - data += 4; - } - } - - if ( pseudo_realtime ) - { - current_pseudo = CheckPseudoTime(); - net_packet_dispatch(current_pseudo, &hdr, data, pkt_hdr_size, this); - if ( ! first_wallclock ) - first_wallclock = current_time(true); - } - - else - net_packet_dispatch(current_timestamp, &hdr, data, pkt_hdr_size, this); - - data = 0; - } - -bool PktSrc::GetCurrentPacket(const struct pcap_pkthdr** arg_hdr, - const u_char** arg_pkt) - { - if ( ! last_data ) - return false; - - *arg_hdr = &hdr; - *arg_pkt = last_data; - return true; - } - -int PktSrc::PrecompileFilter(int index, const char* filter) - { - // Compile filter. - BPF_Program* code = new BPF_Program(); - - if ( ! code->Compile(pd, filter, netmask, errbuf, sizeof(errbuf)) ) - { - delete code; - return 0; - } - - // Store it in hash. - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* oldcode = filters.Lookup(hash); - if ( oldcode ) - delete oldcode; - - filters.Insert(hash, code); - delete hash; - - return 1; - } - -int PktSrc::SetFilter(int index) - { - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* code = filters.Lookup(hash); - delete hash; - - if ( ! code ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "No precompiled pcap filter for index %d", - index); - return 0; - } - - if ( pcap_setfilter(pd, code->GetProgram()) < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_setfilter(%d): %s", - index, pcap_geterr(pd)); - return 0; - } - -#ifndef HAVE_LINUX - // Linux doesn't clear counters when resetting filter. - stats.received = stats.dropped = stats.link = 0; -#endif - - return 1; - } - -void PktSrc::SetHdrSize() - { - int dl = pcap_datalink(pd); - hdr_size = get_link_header_size(dl); - - if ( hdr_size < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "unknown data link type 0x%x", dl); - Close(); - } - - datalink = dl; - } - -void PktSrc::Close() - { - if ( pd ) - { - pcap_close(pd); - pd = 0; - closed = true; - } - } - -void PktSrc::Statistics(Stats* s) - { - if ( reading_traces ) - s->received = s->dropped = s->link = 0; - - else - { - struct pcap_stat pstat; - if ( pcap_stats(pd, &pstat) < 0 ) - { - reporter->Error("problem getting packet filter statistics: %s", - ErrorMsg()); - s->received = s->dropped = s->link = 0; - } - - else - { - s->dropped = pstat.ps_drop; - s->link = pstat.ps_recv; - } - } - - s->received = stats.received; - - if ( pseudo_realtime ) - s->dropped = 0; - - stats.dropped = s->dropped; - } - -PktInterfaceSrc::PktInterfaceSrc(const char* arg_interface, const char* filter, - PktSrc_Filter_Type ft) -: PktSrc() - { - char tmp_errbuf[PCAP_ERRBUF_SIZE]; - filter_type = ft; - - // Determine interface if not specified. - if ( ! arg_interface && ! (arg_interface = pcap_lookupdev(tmp_errbuf)) ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_lookupdev: %s", tmp_errbuf); - return; - } - - interface = copy_string(arg_interface); - - // Determine network and netmask. - uint32 net; - if ( pcap_lookupnet(interface, &net, &netmask, tmp_errbuf) < 0 ) - { - // ### The lookup can fail if no address is assigned to - // the interface; and libpcap doesn't have any useful notion - // of error codes, just error strings - how bogus - so we - // just kludge around the error :-(. - // sprintf(errbuf, "pcap_lookupnet %s", tmp_errbuf); - // return; - net = 0; - netmask = 0xffffff00; - } - - // We use the smallest time-out possible to return almost immediately if - // no packets are available. (We can't use set_nonblocking() as it's - // broken on FreeBSD: even when select() indicates that we can read - // something, we may get nothing if the store buffer hasn't filled up - // yet.) - pd = pcap_open_live(interface, snaplen, 1, 1, tmp_errbuf); - - if ( ! pd ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_open_live: %s", tmp_errbuf); - closed = true; - return; - } - - // ### This needs autoconf'ing. -#ifdef HAVE_PCAP_INT_H - reporter->Info("pcap bufsize = %d\n", ((struct pcap *) pd)->bufsize); -#endif - -#ifdef HAVE_LINUX - if ( pcap_setnonblock(pd, 1, tmp_errbuf) < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_setnonblock: %s", tmp_errbuf); - pcap_close(pd); - closed = true; - return; - } -#endif - selectable_fd = pcap_fileno(pd); - - if ( PrecompileFilter(0, filter) && SetFilter(0) ) - { - SetHdrSize(); - - if ( closed ) - // Couldn't get header size. - return; - - reporter->Info("listening on %s, capture length %d bytes\n", interface, snaplen); - } - else - closed = true; - } - - -PktFileSrc::PktFileSrc(const char* arg_readfile, const char* filter, - PktSrc_Filter_Type ft) -: PktSrc() - { - readfile = copy_string(arg_readfile); - - filter_type = ft; - - pd = pcap_open_offline((char*) readfile, errbuf); - - if ( pd && PrecompileFilter(0, filter) && SetFilter(0) ) - { - SetHdrSize(); - - if ( closed ) - // Unknown link layer type. - return; - - // We don't put file sources into non-blocking mode as - // otherwise we would not be able to identify the EOF. - - selectable_fd = fileno(pcap_file(pd)); - - if ( selectable_fd < 0 ) - reporter->InternalError("OS does not support selectable pcap fd"); - } - else - closed = true; - } - -PktDumper::PktDumper(const char* arg_filename, bool arg_append) - { - filename[0] = '\0'; - is_error = false; - append = arg_append; - dumper = 0; - open_time = 0.0; - - // We need a pcap_t with a reasonable link-layer type. We try to get it - // from the packet sources. If not available, we fall back to Ethernet. - // FIXME: Perhaps we should make this configurable? - int linktype = -1; - - if ( pkt_srcs.length() ) - linktype = pkt_srcs[0]->LinkType(); - - if ( linktype < 0 ) - linktype = DLT_EN10MB; - - pd = pcap_open_dead(linktype, snaplen); - if ( ! pd ) - { - Error("error for pcap_open_dead"); - return; - } - - if ( arg_filename ) - Open(arg_filename); - } - -bool PktDumper::Open(const char* arg_filename) - { - if ( ! arg_filename && ! *filename ) - { - Error("no filename given"); - return false; - } - - if ( arg_filename ) - { - if ( dumper && streq(arg_filename, filename) ) - // Already open. - return true; - - safe_strncpy(filename, arg_filename, FNBUF_LEN); - } - - if ( dumper ) - Close(); - - struct stat s; - int exists = 0; - - if ( append ) - { - // See if output file already exists (and is non-empty). - exists = stat(filename, &s); ; - - if ( exists < 0 && errno != ENOENT ) - { - Error(fmt("can't stat file %s: %s", filename, strerror(errno))); - return false; - } - } - - if ( ! append || exists < 0 || s.st_size == 0 ) - { - // Open new file. - dumper = pcap_dump_open(pd, filename); - if ( ! dumper ) - { - Error(pcap_geterr(pd)); - return false; - } - } - - else - { - // Old file and we need to append, which, unfortunately, - // is not supported by libpcap. So, we have to hack a - // little bit, knowing that pcap_dumpter_t is, in fact, - // a FILE ... :-( - dumper = (pcap_dumper_t*) fopen(filename, "a"); - if ( ! dumper ) - { - Error(fmt("can't open dump %s: %s", filename, strerror(errno))); - return false; - } - } - - open_time = network_time; - is_error = false; - return true; - } - -bool PktDumper::Close() - { - if ( dumper ) - { - pcap_dump_close(dumper); - dumper = 0; - is_error = false; - } - - return true; - } - -bool PktDumper::Dump(const struct pcap_pkthdr* hdr, const u_char* pkt) - { - if ( ! dumper ) - return false; - - if ( ! open_time ) - open_time = network_time; - - pcap_dump((u_char*) dumper, hdr, pkt); - - return true; - } - -void PktDumper::Error(const char* errstr) - { - safe_strncpy(errbuf, errstr, sizeof(errbuf)); - is_error = true; - } - -int get_link_header_size(int dl) - { - switch ( dl ) { - case DLT_NULL: - return 4; - - case DLT_EN10MB: - return 14; - - case DLT_FDDI: - return 13 + 8; // fddi_header + LLC - -#ifdef DLT_LINUX_SLL - case DLT_LINUX_SLL: - return 16; -#endif - - case DLT_PPP_SERIAL: // PPP_SERIAL - return 4; - - case DLT_RAW: - return 0; - } - - return -1; - } diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index e08ccb1c6d..6b103974fb 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -558,7 +558,7 @@ RemoteSerializer::~RemoteSerializer() delete io; } -void RemoteSerializer::Init() +void RemoteSerializer::Enable() { if ( initialized ) return; diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 749c09bc5b..f297342cc6 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -28,7 +28,7 @@ public: virtual ~RemoteSerializer(); // Initialize the remote serializer (calling this will fork). - void Init(); + void Enable(); // FIXME: Use SourceID directly (or rename everything to Peer*). typedef SourceID PeerID; diff --git a/src/bro.bif b/src/bro.bif index 1255f05f50..1757a9d12e 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -4459,7 +4459,7 @@ function enable_communication%(%): any return 0; using_communication = 1; - remote_serializer->Init(); + remote_serializer->Enable(); return 0; %} diff --git a/src/iosource/Component.cc b/src/iosource/Component.cc index a9cfa37d34..a285cd8552 100644 --- a/src/iosource/Component.cc +++ b/src/iosource/Component.cc @@ -1,3 +1,4 @@ +// See the file "COPYING" in the main distribution directory for copyright. #include "Component.h" diff --git a/src/iosource/Component.h b/src/iosource/Component.h index 35e8f612e6..4a38a9cd22 100644 --- a/src/iosource/Component.h +++ b/src/iosource/Component.h @@ -22,7 +22,10 @@ public: typedef IOSource* (*factory_callback)(); /** - * XXX + * Constructor. + * + * @param name A descriptive name for the component. This name must + * be unique across all components of this type. */ Component(const std::string& name); @@ -37,9 +40,14 @@ public: ~Component(); protected: - /** - * XXXX - */ + /** + * Constructor to use by derived classes. + * + * @param type The type of the componnent. + * + * @param name A descriptive name for the component. This name must + * be unique across all components of this type. + */ Component(plugin::component::Type type, const std::string& name); }; @@ -48,12 +56,29 @@ protected: */ class PktSrcComponent : public iosource::Component { public: - enum InputType { LIVE, TRACE, BOTH }; + /** + * Type of input a packet source supports. + */ + enum InputType { + LIVE, ///< Live input. + TRACE, ///< Offline input from trace file. + BOTH ///< Live input as well as offline. + }; typedef PktSrc* (*factory_callback)(const std::string& path, bool is_live); /** - * XXX + * Constructor. + * + * @param name A descriptive name for the component. This name must + * be unique across all components of this type. + * + * @param prefixes The list of interface/file prefixes associated + * with this component. + * + * @param type Type of input the component supports. + * + * @param factor Factory function to instantiate component. */ PktSrcComponent(const std::string& name, const std::string& prefixes, InputType type, factory_callback factory); diff --git a/src/iosource/IOSource.h b/src/iosource/IOSource.h index 0e7087a2dd..b00065e02c 100644 --- a/src/iosource/IOSource.h +++ b/src/iosource/IOSource.h @@ -14,56 +14,120 @@ extern "C" { namespace iosource { /** - * Interface class for components providing/consuming data inside Bro's main loop. + * Interface class for components providing/consuming data inside Bro's main + * loop. */ class IOSource { public: + /** + * Constructor. + */ IOSource() { idle = false; closed = false; } + + /** + * Destructor. + */ virtual ~IOSource() {} - // Returns true if source has nothing ready to process. + /** + * Returns true if source has nothing ready to process. + */ bool IsIdle() const { return idle; } - // Returns true if more data is to be expected in the future. - // Otherwise, source may be removed. + /** + * Returns true if more data is to be expected in the future. + * Otherwise, source may be removed. + */ bool IsOpen() const { return ! closed; } - // XXX + /** + * Initializes the source. Can be overwritten by derived classes. + */ virtual void Init() { } - // XXX + /** + * Finalizes the source when it's being closed. Can be overwritten by + * derived classes. + */ virtual void Done() { } - // Returns select'able fds (leaves args untouched if we don't have - // selectable fds). + /** + * Returns select'able file descriptors for this source. Leaves the + * passed values untouched if not available. + * + * @param read Pointer to where to store a read descriptor. + * + * @param write Pointer to where to store a write descriptor. + * + * @param except Pointer to where to store a except descriptor. + */ virtual void GetFds(int* read, int* write, int* except) = 0; - // The following two methods are only called when either IsIdle() - // returns false or select() on one of the fds indicates that there's - // data to process. - - // Returns timestamp (in global network time) associated with next - // data item. If the source wants the data item to be processed - // with a local network time, it sets the argument accordingly. + /** + * Returns the timestamp (in \a global network time) associated with + * next data item from this source. If the source wants the data + * item to be processed with a local network time, it sets the + * argument accordingly. + * + * This method will be called only when either IsIdle() returns + * false, or select() on one of the fds returned by GetFDs() + * indicates that there's data to process. + * + * Must be overridden by derived classes. + * + * @param network_time A pointer to store the \a local network time + * associated with the next item (as opposed to global network time). + * + * @return The global network time of the next entry, or a value + * smaller than zero if none is available currently. + */ virtual double NextTimestamp(double* network_time) = 0; - // Processes and consumes next data item. + /** + * Processes and consumes next data item. + * + * This method will be called only when either IsIdle() returns + * false, or select() on one of the fds returned by GetFDs() + * indicates that there's data to process. + * + * Must be overridden by derived classes. + */ virtual void Process() = 0; - // Returns tag of timer manager associated with last processed - // data item, nil for global timer manager. + /** + * Returns the tag of the timer manafger associated with the last + * procesees data item. + * + * Can be overridden by derived classes. + * + * @return The tag, or null for the global timer manager. + * + */ virtual TimerMgr::Tag* GetCurrentTag() { return 0; } - // Returns a descriptual tag for debugging. + /** + * Returns a descriptual tag representing the source for debugging. + * + * Can be overridden by derived classes. + * + * @return The debugging name. + */ virtual const char* Tag() = 0; protected: - // Derived classed are to set this to true if they have gone dry - // temporarily. + /* + * Callback for derived classes to call when they have gone dry + * temporarily. + * + * @param is_idle True if the source is idle currently. + */ void SetIdle(bool is_idle) { idle = is_idle; } - // Derived classed are to set this to true if they have gone dry - // temporarily. + /* + * Callback for derived class to call when they have shutdown. + * + * @param is_closed True if the source is now closed. + */ void SetClosed(bool is_closed) { closed = is_closed; } private: diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index ebd92e9527..2983cb1377 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -1,3 +1,4 @@ +// See the file "COPYING" in the main distribution directory for copyright. #include #include @@ -22,7 +23,7 @@ Manager::~Manager() { for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) { - // ??? (*i)->src->Done(); + (*i)->src->Done(); delete *i; } diff --git a/src/iosource/Manager.h b/src/iosource/Manager.h index bebed61de7..f16461aecb 100644 --- a/src/iosource/Manager.h +++ b/src/iosource/Manager.h @@ -12,43 +12,100 @@ class IOSource; class PktSrc; class PktDumper; +/** + * Singleton class managing all IOSources. + */ class Manager { public: + /** + * Constructor. + */ Manager() { call_count = 0; dont_counts = 0; } + + /** + * Destructor. + */ ~Manager(); - // If dont_count is true, this source does not contribute to the - // number of IOSources returned by Size(). The effect is that - // if all sources but the non-counting ones have gone dry, - // processing will shut down. + /** + * Registers an IOSource with the manager. + * + * @param src The source. The manager takes ownership. + * + * @param dont_count If true, this source does not contribute to the + * number of IOSources returned by Size(). The effect is that if all + * sources except for the non-counting ones have gone dry, processing + * will shut down. + */ void Register(IOSource* src, bool dont_count = false); - // This may block for some time. + /** + * Returns the packet source with the soonest available input. This + * may block for a little while if all are dry. + * + * @param ts A pointer where to store the timestamp of the input that + * the soonest source has available next. + * + * @return The source, or null if no source has input. + */ IOSource* FindSoonest(double* ts); + /** + * Returns the number of registered and still active sources, + * excluding those that are registered as \a dont_cont. + */ int Size() const { return sources.size() - dont_counts; } typedef std::list PktSrcList; + + /** + * Returns a list of all registered PktSrc instances. This is a + * subset of all registered IOSource instances. + */ const PktSrcList& GetPktSrcs() const { return pkt_srcs; } - // Terminate IOSource processing immediately by removing all - // sources (and therefore returning a Size() of zero). + /** + * Terminate all processing immediately by removing all sources (and + * therefore now returning a Size() of zero). + */ void Terminate() { RemoveAll(); } + /** + * Opens a new packet source. + * + * @param path The interface or file name, as one would give to Bro \c -i. + * + * @param is_live True if \a path represents a live interface, false + * for a file. + * + * @return The new packet source, or null if an error occured. + */ PktSrc* OpenPktSrc(const std::string& path, bool is_live); + + /** + * Opens a new packet dumper. + * + * @param path The file name to dump into. + * + * @param append True to append if \a path already exists. + * + * @return The new packet dumper, or null if an error occured. + */ PktDumper* OpenPktDumper(const std::string& path, bool append); -protected: - void Register(PktSrc* src); - - // When looking for a source with something to process, - // every SELECT_FREQUENCY calls we will go ahead and - // block on a select(). +private: + /** + * When looking for a source with something to process, every + * SELECT_FREQUENCY calls we will go ahead and block on a select(). + */ static const int SELECT_FREQUENCY = 25; - // Microseconds to wait in an empty select if no source is ready. + /** + * Microseconds to wait in an empty select if no source is ready. + */ static const int SELECT_TIMEOUT = 50; + void Register(PktSrc* src); void RemoveAll(); unsigned int call_count; diff --git a/src/iosource/PktDumper.h b/src/iosource/PktDumper.h index 5e35bf1ca7..56555c247a 100644 --- a/src/iosource/PktDumper.h +++ b/src/iosource/PktDumper.h @@ -7,6 +7,9 @@ namespace iosource { +/** + * Base class for packet dumpers. + */ class PktDumper { public: /** diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc index 902aaa04be..4bfcd230b5 100644 --- a/src/iosource/PktSrc.cc +++ b/src/iosource/PktSrc.cc @@ -387,13 +387,13 @@ void PktSrc::Process() if ( pseudo_realtime ) { current_pseudo = CheckPseudoTime(); - net_packet_dispatch(current_pseudo, current_packet.hdr, current_packet.data, pkt_hdr_size, this); + net_packet_dispatch(current_pseudo, current_packet.hdr, data, pkt_hdr_size, this); if ( ! first_wallclock ) first_wallclock = current_time(true); } else - net_packet_dispatch(current_packet.ts, current_packet.hdr, current_packet.data, pkt_hdr_size, this); + net_packet_dispatch(current_packet.ts, current_packet.hdr, data, pkt_hdr_size, this); have_packet = 0; DoneWithPacket(); diff --git a/src/iosource/PktSrc.h b/src/iosource/PktSrc.h index 75fd2633d0..c126d19c34 100644 --- a/src/iosource/PktSrc.h +++ b/src/iosource/PktSrc.h @@ -11,6 +11,9 @@ declare(PDict,BPF_Program); namespace iosource { +/** + * Base class for packet sources. + */ class PktSrc : public IOSource { public: /** diff --git a/src/iosource/pcap/Dumper.h b/src/iosource/pcap/Dumper.h index 8013afcb8e..7950912d56 100644 --- a/src/iosource/pcap/Dumper.h +++ b/src/iosource/pcap/Dumper.h @@ -1,3 +1,4 @@ +// See the file in the main distribution directory for copyright. #ifndef IOSOURCE_PKTSRC_PCAP_DUMPER_H #define IOSOURCE_PKTSRC_PCAP_DUMPER_H diff --git a/src/iosource/pcap/Source.cc b/src/iosource/pcap/Source.cc index 96e0bb48e5..1e1281dfa6 100644 --- a/src/iosource/pcap/Source.cc +++ b/src/iosource/pcap/Source.cc @@ -1,3 +1,4 @@ +// See the file in the main distribution directory for copyright. #include diff --git a/src/main.cc b/src/main.cc index bdd3d7072b..92a783e44d 100644 --- a/src/main.cc +++ b/src/main.cc @@ -377,18 +377,14 @@ void terminate_bro() delete broxygen_mgr; delete timer_mgr; - delete dns_mgr; delete persistence_serializer; - delete event_player; delete event_serializer; delete state_serializer; delete event_registry; - delete remote_serializer; delete analyzer_mgr; delete file_mgr; delete log_mgr; delete plugin_mgr; - delete thread_mgr; delete reporter; delete iosource_mgr; @@ -841,6 +837,7 @@ int main(int argc, char** argv) // policy, but we can't parse policy without DNS resolution. dns_mgr->SetDir(".state"); + iosource_mgr = new iosource::Manager(); persistence_serializer = new PersistenceSerializer(); remote_serializer = new RemoteSerializer(); event_registry = new EventRegistry(); @@ -848,7 +845,6 @@ int main(int argc, char** argv) log_mgr = new logging::Manager(); input_mgr = new input::Manager(); file_mgr = new file_analysis::Manager(); - iosource_mgr = new iosource::Manager(); plugin_mgr->InitPreScript(); analyzer_mgr->InitPreScript(); @@ -908,6 +904,7 @@ int main(int argc, char** argv) analyzer_mgr->InitPostScript(); file_mgr->InitPostScript(); + dns_mgr->InitPostScript(); if ( parse_only ) { diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index bcd32fa94c..1a8685c86a 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-08-14-04-31-10 +#open 2014-09-06-01-19-42 #fields name #types string scripts/base/init-bare.bro @@ -43,6 +43,7 @@ scripts/base/init-bare.bro scripts/base/frameworks/files/magic/__load__.bro build/scripts/base/bif/__load__.bro build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/pcap.bif.bro build/scripts/base/bif/bloom-filter.bif.bro build/scripts/base/bif/cardinality-counter.bif.bro build/scripts/base/bif/top-k.bif.bro @@ -113,4 +114,4 @@ scripts/base/init-bare.bro build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro scripts/policy/misc/loaded-scripts.bro scripts/base/utils/paths.bro -#close 2014-08-14-04-31-10 +#close 2014-09-06-01-19-42 From 5c6dfb240814f61f032fab9fbd05497fe3c6040b Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sat, 6 Sep 2014 10:34:24 -0700 Subject: [PATCH 052/106] Fixing link-layer handling. Something had gotten mixed up here. --- src/iosource/PktSrc.cc | 32 +++++++++++-------- .../canonified_loaded_scripts.log | 5 +-- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc index 4bfcd230b5..1d982fc124 100644 --- a/src/iosource/PktSrc.cc +++ b/src/iosource/PktSrc.cc @@ -291,8 +291,7 @@ void PktSrc::Process() && protocol != 30 ) { Weird("non_ip_packet_in_null_transport", ¤t_packet); - data = 0; - return; + goto done; } break; @@ -307,12 +306,21 @@ void PktSrc::Process() { // MPLS carried over the ethernet frame. case 0x8847: + // Remove the data link layer and denote a + // header size of zero before the IP header. have_mpls = true; + data += GetLinkHeaderSize(props.link_type); + pkt_hdr_size = 0; break; // VLAN carried over the ethernet frame. case 0x8100: data += GetLinkHeaderSize(props.link_type); + + // Check for MPLS in VLAN. + if ( ((data[2] << 8) + data[3]) == 0x8847 ) + have_mpls = true; + data += 4; // Skip the vlan header pkt_hdr_size = 0; @@ -337,8 +345,7 @@ void PktSrc::Process() { // Neither IPv4 nor IPv6. Weird("non_ip_packet_in_pppoe_encapsulation", ¤t_packet); - data = 0; - return; + goto done; } break; } @@ -352,15 +359,19 @@ void PktSrc::Process() protocol = (data[2] << 8) + data[3]; if ( protocol == 0x0281 ) - // MPLS Unicast + { + // MPLS Unicast. Remove the data link layer and + // denote a header size of zero before the IP header. have_mpls = true; + data += GetLinkHeaderSize(props.link_type); + pkt_hdr_size = 0; + } else if ( protocol != 0x0021 && protocol != 0x0057 ) { // Neither IPv4 nor IPv6. Weird("non_ip_packet_in_ppp_encapsulation", ¤t_packet); - data = 0; - return; + goto done; } break; } @@ -368,12 +379,6 @@ void PktSrc::Process() if ( have_mpls ) { - // Remove the data link layer - data += GetLinkHeaderSize(props.link_type); - - // Denote a header size of zero before the IP header - pkt_hdr_size = 0; - // Skip the MPLS label stack. bool end_of_stack = false; @@ -395,6 +400,7 @@ void PktSrc::Process() else net_packet_dispatch(current_packet.ts, current_packet.hdr, data, pkt_hdr_size, this); +done: have_packet = 0; DoneWithPacket(); } diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index b100d86ecb..ebcb980eec 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-08-14-04-31-11 +#open 2014-09-06-01-20-32 #fields name #types string scripts/base/init-bare.bro @@ -43,6 +43,7 @@ scripts/base/init-bare.bro scripts/base/frameworks/files/magic/__load__.bro build/scripts/base/bif/__load__.bro build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/pcap.bif.bro build/scripts/base/bif/bloom-filter.bif.bro build/scripts/base/bif/cardinality-counter.bif.bro build/scripts/base/bif/top-k.bif.bro @@ -242,4 +243,4 @@ scripts/base/init-default.bro scripts/base/misc/find-checksum-offloading.bro scripts/base/misc/find-filtered-trace.bro scripts/policy/misc/loaded-scripts.bro -#close 2014-08-14-04-31-11 +#close 2014-09-06-01-20-32 From 7a46a70b77f2e70dc2cc9a658cb2326f0e19f607 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 8 Sep 2014 18:04:03 -0500 Subject: [PATCH 053/106] BIT-1240: Fix MIME entity file data/gap ordering. MIME entities buffered data and passed it along to protocol analyzers in discrete amounts, but a gap is always passed along right away, so the ordering of these "events" can cause incorrect file analysis. The change here is to never leave any MIME data buffered -- it should now be passed along line by line as it is seen, but may still temporarily make use of a buffer allocated by the analyzer as it works on decoding content. --- src/analyzer/protocol/mime/MIME.cc | 16 +- src/analyzer/protocol/mime/MIME.h | 1 + src/file_analysis/File.cc | 2 +- .../out | 18 +- .../files.log | 6 +- .../files.log | 6 +- .../all-events-no-args.log | 399 +++- .../all-events.log | 2001 ++++++++++++++++- 8 files changed, 2422 insertions(+), 27 deletions(-) diff --git a/src/analyzer/protocol/mime/MIME.cc b/src/analyzer/protocol/mime/MIME.cc index 6f992c9256..a8f0598b87 100644 --- a/src/analyzer/protocol/mime/MIME.cc +++ b/src/analyzer/protocol/mime/MIME.cc @@ -643,11 +643,7 @@ void MIME_Entity::EndOfData() if ( content_encoding == CONTENT_ENCODING_BASE64 ) FinishDecodeBase64(); - if ( data_buf_offset > 0 ) - { - SubmitData(data_buf_offset, data_buf_data); - data_buf_offset = -1; - } + FlushData(); } message->EndEntity (this); @@ -1001,6 +997,7 @@ void MIME_Entity::DecodeDataLine(int len, const char* data, int trailing_CRLF) DecodeBinary(len, data, trailing_CRLF); break; } + FlushData(); } void MIME_Entity::DecodeBinary(int len, const char* data, int trailing_CRLF) @@ -1179,6 +1176,15 @@ void MIME_Entity::DataOctets(int len, const char* data) } } +void MIME_Entity::FlushData() + { + if ( data_buf_offset > 0 ) + { + SubmitData(data_buf_offset, data_buf_data); + data_buf_offset = -1; + } + } + void MIME_Entity::SubmitHeader(MIME_Header* h) { message->SubmitHeader(h); diff --git a/src/analyzer/protocol/mime/MIME.h b/src/analyzer/protocol/mime/MIME.h index 2b2f88105d..b113c40c66 100644 --- a/src/analyzer/protocol/mime/MIME.h +++ b/src/analyzer/protocol/mime/MIME.h @@ -133,6 +133,7 @@ protected: int GetDataBuffer(); void DataOctet(char ch); void DataOctets(int len, const char* data); + void FlushData(); virtual void SubmitData(int len, const char* buf); virtual void SubmitHeader(MIME_Header* h); diff --git a/src/file_analysis/File.cc b/src/file_analysis/File.cc index 50d7d48336..4509fc7d42 100644 --- a/src/file_analysis/File.cc +++ b/src/file_analysis/File.cc @@ -437,7 +437,7 @@ void File::EndOfFile() void File::Gap(uint64 offset, uint64 len) { DBG_LOG(DBG_FILE_ANALYSIS, "[%s] Gap of size %" PRIu64 " at offset %" PRIu64, - id.c_str(), offset, len); + id.c_str(), len, offset); analyzers.DrainModifications(); diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out index cbd60840bf..bc0ccff221 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out @@ -5,16 +5,22 @@ FILE_BOF_BUFFER MIME_TYPE text/plain FILE_OVER_NEW_CONNECTION -file_stream, file #0, 1500, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea -file_chunk, file #0, 1500, 0, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea +file_stream, file #0, 1146, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J +file_chunk, file #0, 1146, 0, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J +file_stream, file #0, 354, rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea +file_chunk, file #0, 354, 1146, rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea file_stream, file #0, 1024, se script, which could pick out the wrong^J tag. (Robin Sommer)^J^J0.21 | 2011-10-27 17:40:45 -0700^J^J * Fixing bro-cut's usage message and argument error handling. (Robin Sommer)^J^J * Bugfix in update-changes script. (Robin Sommer)^J^J * update-changes now ignores commits it did itself. (Robin Sommer)^J^J * Fix a bug in the update-changes script. (Robin Sommer)^J^J * bro-cut now always installs to $prefix/bin by `make install`. (Jon Siwek)^J^J * Options to adjust time format for bro-cut. (Robin Sommer)^J^J The default with -d is now ISO format. The new option "-D "^J specifies a custom strftime()-style format string. Alternatively,^J the environment variable BRO_CUT_TIMEFMT can set the format as^J well.^J^J * bro-cut now understands the field separator header. (Robin Sommer)^J^J * Renaming options -h/-H -> -c/-C, and doing some general cleanup.^J^J0.2 | 2011-10-25 19:53:57 -0700^J^J * Adding support for replacing version string in a setup.py. (Robin^J Sommer)^J^J * Change generated root cert DN indices f file_chunk, file #0, 1024, 1500, se script, which could pick out the wrong^J tag. (Robin Sommer)^J^J0.21 | 2011-10-27 17:40:45 -0700^J^J * Fixing bro-cut's usage message and argument error handling. (Robin Sommer)^J^J * Bugfix in update-changes script. (Robin Sommer)^J^J * update-changes now ignores commits it did itself. (Robin Sommer)^J^J * Fix a bug in the update-changes script. (Robin Sommer)^J^J * bro-cut now always installs to $prefix/bin by `make install`. (Jon Siwek)^J^J * Options to adjust time format for bro-cut. (Robin Sommer)^J^J The default with -d is now ISO format. The new option "-D "^J specifies a custom strftime()-style format string. Alternatively,^J the environment variable BRO_CUT_TIMEFMT can set the format as^J well.^J^J * bro-cut now understands the field separator header. (Robin Sommer)^J^J * Renaming options -h/-H -> -c/-C, and doing some general cleanup.^J^J0.2 | 2011-10-25 19:53:57 -0700^J^J * Adding support for replacing version string in a setup.py. (Robin^J Sommer)^J^J * Change generated root cert DN indices f -file_stream, file #0, 476, ormat for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tools/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the -file_chunk, file #0, 476, 2524, ormat for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tools/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the +file_stream, file #0, 70, ormat for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tool +file_chunk, file #0, 70, 2524, ormat for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tool +file_stream, file #0, 406, s/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the +file_chunk, file #0, 406, 2594, s/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the file_stream, file #0, 1024, copied source (Jon Siwek)^J^J * Small tweak to make-release for forced git-clean. (Jon Siwek)^J^J * Fix to not let updates scripts loose their executable permissions.^J (Robin Sommer)^J^J * devel-tools/update-changes now looks for a 'release' tag to^J idenfify the stable version, and 'beta' for the beta versions.^J (Robin Sommer).^J^J * Distribution cleanup. (Robin Sommer)^J^J * New script devel-tools/make-release to create source tar balls.^J (Robin Sommer)^J^J * Removing bdcat. With the new log format, this isn't very useful^J anymore. (Robin Sommer)^J^J * Adding script that shows all pending git fastpath commits. (Robin^J Sommer)^J^J * Script to measure CPU time by loading an increasing set of^J scripts. (Robin Sommer)^J^J * extract-conn script now deals wit *.gz files. (Robin Sommer)^J^J * Tiny update to output a valid CA list file for SSL cert^J validation. (Seth Hall)^J^J * Adding "install-aux" target. Addresses #622. (Jon Siwek)^J^J * Distribution cleanup. (Jon Siwek and Robin Sommer)^J^J * FindPCAP file_chunk, file #0, 1024, 3000, copied source (Jon Siwek)^J^J * Small tweak to make-release for forced git-clean. (Jon Siwek)^J^J * Fix to not let updates scripts loose their executable permissions.^J (Robin Sommer)^J^J * devel-tools/update-changes now looks for a 'release' tag to^J idenfify the stable version, and 'beta' for the beta versions.^J (Robin Sommer).^J^J * Distribution cleanup. (Robin Sommer)^J^J * New script devel-tools/make-release to create source tar balls.^J (Robin Sommer)^J^J * Removing bdcat. With the new log format, this isn't very useful^J anymore. (Robin Sommer)^J^J * Adding script that shows all pending git fastpath commits. (Robin^J Sommer)^J^J * Script to measure CPU time by loading an increasing set of^J scripts. (Robin Sommer)^J^J * extract-conn script now deals wit *.gz files. (Robin Sommer)^J^J * Tiny update to output a valid CA list file for SSL cert^J validation. (Seth Hall)^J^J * Adding "install-aux" target. Addresses #622. (Jon Siwek)^J^J * Distribution cleanup. (Jon Siwek and Robin Sommer)^J^J * FindPCAP -file_stream, file #0, 476, now links against thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J -file_chunk, file #0, 476, 4024, now links against thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J +file_stream, file #0, 18, now links against +file_chunk, file #0, 18, 4024, now links against +file_stream, file #0, 458, thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J +file_chunk, file #0, 458, 4042, thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J file_stream, file #0, 205, ^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J file_chunk, file #0, 205, 4500, ^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J FILE_STATE_REMOVE diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log index 44a90b9ee6..b836d14e47 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path files -#open 2014-07-21-14-26-07 +#open 2014-09-08-21-50-32 #fields ts fuid tx_hosts rx_hosts conn_uids source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid md5 sha1 sha256 extracted #types time string set[addr] set[addr] set[string] string count set[string] string string interval bool bool count count count count bool string string string string string -1362692527.009721 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 MD5 text/plain - 0.000054 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac - - - -#close 2014-07-21-14-26-07 +1362692527.009512 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 MD5 text/plain - 0.000263 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac - - - +#close 2014-09-08-21-50-32 diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log index cc185a4f1b..daf862e3b9 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path files -#open 2014-04-01-23-13-35 +#open 2014-09-08-21-55-01 #fields ts fuid tx_hosts rx_hosts conn_uids source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid md5 sha1 sha256 extracted #types time string set[addr] set[addr] set[string] string count set[string] string string interval bool bool count count count count bool string string string string string -1362692527.009721 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 SHA256,DATA_EVENT,MD5,EXTRACT,SHA1 text/plain - 0.000054 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac 1dd7ac0398df6cbc0696445a91ec681facf4dc47 4e7c7ef0984119447e743e3ec77e1de52713e345cde03fe7df753a35849bed18 FakNcS1Jfe01uljb3-file -#close 2014-04-01-23-13-35 +1362692527.009512 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 SHA256,DATA_EVENT,MD5,EXTRACT,SHA1 text/plain - 0.000263 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac 1dd7ac0398df6cbc0696445a91ec681facf4dc47 4e7c7ef0984119447e743e3ec77e1de52713e345cde03fe7df753a35849bed18 FakNcS1Jfe01uljb3-file +#close 2014-09-08-21-55-01 diff --git a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log index 6de44b1fbf..3ef7de657c 100644 --- a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log +++ b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log @@ -55,6 +55,18 @@ 1254722770.692743 mime_one_header 1254722770.692743 mime_one_header 1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle 1254722770.692743 mime_end_entity 1254722770.692743 get_file_handle 1254722770.692743 file_new @@ -64,9 +76,83 @@ 1254722770.692743 mime_begin_entity 1254722770.692743 mime_one_header 1254722770.692743 mime_one_header +1254722770.692743 get_file_handle +1254722770.692743 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle 1254722770.692786 get_file_handle 1254722770.692786 file_new 1254722770.692786 file_over_new_connection +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692786 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle 1254722770.692804 get_file_handle 1254722770.692804 mime_end_entity 1254722770.692804 get_file_handle @@ -79,20 +165,331 @@ 1254722770.692804 mime_one_header 1254722770.692804 mime_one_header 1254722770.692804 mime_one_header +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692804 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle 1254722770.692823 get_file_handle 1254722770.692823 file_new 1254722770.692823 file_over_new_connection 1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle +1254722770.692823 get_file_handle 1254722770.695115 new_connection 1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.469814 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle +1254722771.494181 get_file_handle 1254722771.494181 get_file_handle 1254722771.494181 get_file_handle 1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.494199 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle +1254722771.834628 get_file_handle 1254722771.834628 get_file_handle 1254722771.834655 get_file_handle 1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.834655 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle +1254722771.858316 get_file_handle 1254722771.858316 get_file_handle -1254722771.858334 get_file_handle 1254722771.858334 mime_end_entity 1254722771.858334 get_file_handle 1254722771.858334 file_state_remove diff --git a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log index b8f576e497..c1ca12f400 100644 --- a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log +++ b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log @@ -296,6 +296,66 @@ [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [2] is_orig: bool = F +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [2] is_orig: bool = F + 1254722770.692743 mime_end_entity [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] @@ -331,19 +391,389 @@ [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TRANSFER-ENCODING, value=quoted-printable] +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692743 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + +1254722770.692786 get_file_handle + [0] tag: enum = Analyzer::ANALYZER_SMTP + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [2] is_orig: bool = F + 1254722770.692786 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722770.692786 file_new - [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]^J}, last_active=1254722770.692786, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^M^J^M^J^M^J^M^J^M^J