diff --git a/CHANGES b/CHANGES index ec9e145188..286e9abe76 100644 --- a/CHANGES +++ b/CHANGES @@ -1,52 +1,96 @@ +1.6-dev.39 Mon Jan 31 16:42:23 PST 2011 + +- Login's confused messages now go through weird.bro. (Robin Sommer) + +1.6-dev.36 Mon Jan 31 08:45:35 PST 2011 + +- Adding more configure options for finding dependencies, (Jon Siwek) + + --with-flex=PATH path to flex executable + --with-bison=PATH path to bison executable + --with-perl=PATH path to perl executable + --with-python=PATH path to Python interpreter + --with-python-lib=PATH path to libpython + --with-python-inc=PATH path to Python headers + --with-swig=PATH path to SWIG executable + +- Fixing typo in PCAPTests.cmake (Jon Siwek) + + +1.6-dev.33 Mon Jan 24 15:29:04 PST 2011 + +- Fixing bug in SMB analyzer. (Robin Sommer) + +- Configure wrapper now deletes previous CMake cache (Jon Siwek) + +- Fix for the --with-binpac configure option. (Jon Siwek) + +1.6-dev.30 Thu Jan 20 16:32:43 PST 2011 + +- Changed configure wrapper to create config.status. (Jon Siwek) + +1.6-dev.29 Thu Jan 20 16:29:56 PST 2011 + +- Fixing little problem with initialization of Bro-to-Bro event + communication. (Christian Kreibich) + + +1.6-dev.27 Thu Jan 20 13:52:25 PST 2011 + +- Fine-tuning of the HTTP analyzer in terms of raising protocol + violations and interrupted transfers. (Gregor Maier) + + 1.6-dev.21 Wed Jan 19 17:36:02 PST 2011 - * Added 4 new BiFs and a new record type for testing the entropy - of strings. (Seth Hall) +- Added 4 new BiFs and a new record type for testing the entropy of + strings. (Seth Hall) - find_entropy(data: string): entropy_test_result - This is a one shot function that accepts a string - and returns the result of the entropy calculations. + find_entropy(data: string): entropy_test_result + This is a one shot function that accepts a string and + returns the result of the entropy calculations. - entropy_test_init(index: any): bool - This and the next two functions are for calculating - entropy piece-wise. It only needs an index which can - be any type of variable. It needs to be something - that uniquely identifies the data stream that is - currently having it's entropy calculated. + entropy_test_init(index: any): bool + This and the next two functions are for calculating entropy + piece-wise. It only needs an index which can be any type of + variable. It needs to be something that uniquely identifies + the data stream that is currently having it's entropy + calculated. - entropy_test_add(index: any, data: string): bool - This function is used to add data into the entropy - calculation. It takes the index used in the function - above and the data that you are adding and returns - true if everything seemed to work, false otherwise. + entropy_test_add(index: any, data: string): bool + This function is used to add data into the entropy + calculation. It takes the index used in the function above + and the data that you are adding and returns true if + everything seemed to work, false otherwise. - entropy_test_finish(index: any): entropy_test_result - Calling this function indicates that all of the - desired data has been inserted into the - entropy_test_add function and the entropy should be - calculated. This function *must* be called in order - to clean up an internal state tracking variable. If - this is never called on an index, it will result in - a memory leak. + entropy_test_finish(index: any): entropy_test_result + Calling this function indicates that all of the desired data + has been inserted into the entropy_test_add function and the + entropy should be calculated. This function *must* be called + in order to clean up an internal state tracking variable. + If this is never called on an index, it will result in a + memory leak. - The entropy_test_result values have several measures of the - entropy, but a good one to work with is the "entropy" attribute. - It's a double and as the value approaches 8.0 it can be - considered more and more random. For example, a value of 7.832 - would be quite random but a value of 4.671 is not very random. + The entropy_test_result values have several measures of the + entropy, but a good one to work with is the "entropy" attribute. + It's a double and as the value approaches 8.0 it can be considered + more and more random. For example, a value of 7.832 would be + quite random but a value of 4.671 is not very random. 1.6-dev.20 Wed Jan 19 17:30:11 PST 2011 - * BRO_DNS_FAKE is now listed in the --help output. (Seth Hall) +- BRO_DNS_FAKE is now listed in the --help output. (Seth Hall) + 1.6-dev.18 Wed Jan 19 16:37:13 PST 2011 - * Removing unnecessary expire timer from http_sessions. (Gregor - Maier) +- Removing unnecessary expire timer from http_sessions. (Gregor + Maier) + 1.6-dev.16 Sat Jan 15 14:14:21 PST 2011 -- Updates to the build system (Jonathan Siwek) +- Updates to the build system. (Jonathan Siwek) * ``make dist`` is now available to be used with the top-level Makefile for creating source packages according to #344. @@ -67,6 +111,7 @@ configured/built now works (although, a harmless error message about not being able to write the install manifest may occur). + 1.6-dev.3 Wed Dec 8 04:09:38 PST 2010 - Merge with Subversion repository as of r7137. Incorporated change: diff --git a/CMakeLists.txt b/CMakeLists.txt index 005c5aec15..e048305c8d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -77,7 +77,8 @@ FindRequiredPackage(PCAP) FindRequiredPackage(OpenSSL) FindRequiredPackage(BIND) -if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/aux/binpac/CMakeLists.txt) +if (NOT BinPAC_ROOT_DIR AND + EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/aux/binpac/CMakeLists.txt) add_subdirectory(aux/binpac) endif () FindRequiredPackage(BinPAC) diff --git a/VERSION b/VERSION index 1d53112687..80eb02f177 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.6-dev.21 +1.6-dev.39 diff --git a/aux/binpac b/aux/binpac index 898cfd5ddc..26d02716f9 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 898cfd5ddc8cd356e4052c0bd699e51812a91e98 +Subproject commit 26d02716f9090651f319a4bfdf8ede49b3a7b53a diff --git a/aux/bro-aux b/aux/bro-aux index d741ee2ebd..afa0a0d8b3 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit d741ee2ebd6576d9329218bfb53941b4de5375b6 +Subproject commit afa0a0d8b3fdfa5306507948f08ac9f07696eb21 diff --git a/aux/broccoli b/aux/broccoli index c745d747ec..2b8a1c9c32 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit c745d747ec65f608bead605fc26f84ca44be21c9 +Subproject commit 2b8a1c9c32dab2da9ebb54238c1b60e40bb8688f diff --git a/aux/broctl b/aux/broctl index 4133635936..572efd43cf 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 41336359365238036fd63f8bf8d2624da71c200b +Subproject commit 572efd43cf52e4c41b32a9c5a4a015f783370b41 diff --git a/cmake/PCAPTests.cmake b/cmake/PCAPTests.cmake index 83f79dec53..1b62d3ab57 100644 --- a/cmake/PCAPTests.cmake +++ b/cmake/PCAPTests.cmake @@ -2,7 +2,7 @@ include(CheckFunctionExists) include(CheckCSourceCompiles) include(CheckIncludeFiles) -set(CMAKE_REQUIRED_INCLUDES ${LIBPCAP_INCLUDE_DIR}) +set(CMAKE_REQUIRED_INCLUDES ${PCAP_INCLUDE_DIR}) set(CMAKE_REQUIRED_LIBRARIES ${PCAP_LIBRARY}) check_include_files(pcap-int.h HAVE_PCAP_INT_H) diff --git a/configure b/configure index dce70e6534..f1d864640d 100755 --- a/configure +++ b/configure @@ -2,6 +2,8 @@ # Convenience wrapper for easily viewing/setting options that # the project's CMake scripts will recognize +command="$0 $*" + # check for `cmake` command type cmake > /dev/null 2>&1 || { echo "\ @@ -38,11 +40,18 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --with-bind=PATH path to BIND install root --with-pcap=PATH path to libpcap install root --with-binpac=PATH path to BinPAC install root + --with-flex=PATH path to flex executable + --with-bison=PATH path to bison executable + --with-perl=PATH path to perl executable Optional Packages in Non-Standard Locations: --with-libmagic=PATH path to libmagic install root --with-geoip=PATH path to the libGeoIP install root --with-perftools=PATH path to Google Perftools install root + --with-python=PATH path to Python interpreter + --with-python-lib=PATH path to libpython + --with-python-inc=PATH path to Python headers + --with-swig=PATH path to SWIG executable Packaging Options (for developers): --ignore-dirs=PATHS paths to ignore when creating source package @@ -155,6 +164,15 @@ while [ $# -ne 0 ]; do --with-binpac=*) append_cache_entry BinPAC_ROOT_DIR PATH $optarg ;; + --with-flex=*) + append_cache_entry FLEX_EXECUTABLE PATH $optarg + ;; + --with-bison=*) + append_cache_entry BISON_EXECUTABLE PATH $optarg + ;; + --with-perl=*) + append_cache_entry PERL_EXECUTABLE PATH $optarg + ;; --with-libmagic=*) append_cache_entry LibMagic_ROOT_DIR PATH $optarg ;; @@ -164,6 +182,19 @@ while [ $# -ne 0 ]; do --with-perftools=*) append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg ;; + --with-python=*) + append_cache_entry PYTHON_EXECUTABLE PATH $optarg + ;; + --with-python-lib=*) + append_cache_entry PYTHON_LIBRARY PATH $optarg + ;; + --with-python-inc=*) + append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg + append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg + ;; + --with-swig=*) + append_cache_entry SWIG_EXECUTABLE PATH $optarg + ;; --ignore-dirs=*) append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING $optarg ;; @@ -187,21 +218,9 @@ done if [ -d $builddir ]; then # If build directory exists, check if it has a CMake cache if [ -f $builddir/CMakeCache.txt ]; then - # If the Cmake cache exists, then check that it thinks - # the source tree exists where it's currently located - cmakehomedir=`grep CMAKE_HOME_DIRECTORY $builddir/CMakeCache.txt | \ - sed 's/CMAKE_HOME_DIRECTORY:INTERNAL=//g'` - if [ "$cmakehomedir" != "$sourcedir" ]; then - # The source tree moved since the build was last configured - echo "\ -The source tree has been moved from: - $cmakehomedir -to: - $sourcedir -To reconfigure in the new source directory, please delete: - $builddir/CMakeCache.txt" >&2 - exit 1 - fi + # If the CMake cache exists, delete it so that this configuration + # is not tainted by a previous one + rm -f $builddir/CMakeCache.txt fi else # Create build directory @@ -217,3 +236,7 @@ if [ -n "$CMakeGenerator" ]; then else cmake $CMakeCacheEntries $sourcedir fi + +echo "# This is the command used to configure this build" > config.status +echo $command >> config.status +chmod u+x config.status diff --git a/policy/http-header.bro b/policy/http-header.bro index 3d676488ff..259031b024 100644 --- a/policy/http-header.bro +++ b/policy/http-header.bro @@ -2,6 +2,8 @@ # Prints out detailed HTTP headers. +@load http + module HTTP; export { diff --git a/policy/login.bro b/policy/login.bro index 26d32ca08c..9d45249bb1 100644 --- a/policy/login.bro +++ b/policy/login.bro @@ -544,10 +544,7 @@ event login_confused(c: connection, msg: string, line: string) append_addl(c, ""); - if ( line == "" ) - print Weird::weird_file, fmt("%.6f %s %s", network_time(), id_string(c$id), msg); - else - print Weird::weird_file, fmt("%.6f %s %s (%s)", network_time(), id_string(c$id), msg, line); + event conn_weird_addl(msg, c, line); set_record_packets(c$id, T); } diff --git a/src/Attr.cc b/src/Attr.cc index 5a83d0501b..e03367c41e 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -327,6 +327,41 @@ void Attributes::CheckAttr(Attr* a) } } +bool Attributes::operator==(const Attributes& other) const + { + if ( ! attrs ) + return other.attrs; + + if ( ! other.attrs ) + return false; + + loop_over_list(*attrs, i) + { + Attr* a = (*attrs)[i]; + Attr* o = other.FindAttr(a->Tag()); + + if ( ! o ) + return false; + + if ( ! (*a == *o) ) + return false; + } + + loop_over_list(*other.attrs, j) + { + Attr* o = (*other.attrs)[j]; + Attr* a = FindAttr(o->Tag()); + + if ( ! a ) + return false; + + if ( ! (*a == *o) ) + return false; + } + + return true; + } + bool Attributes::Serialize(SerialInfo* info) const { return SerialObj::Serialize(info); diff --git a/src/Attr.h b/src/Attr.h index 73fb101841..26231baeb4 100644 --- a/src/Attr.h +++ b/src/Attr.h @@ -52,6 +52,20 @@ public: void Describe(ODesc* d) const; + bool operator==(const Attr& other) const + { + if ( tag != other.tag ) + return false; + + if ( expr || other.expr ) + // If any has an expression and they aren't the same object, we + // declare them unequal, as we can't really find out if the two + // expressions are equivalent. + return (expr == other.expr); + + return true; + } + protected: void AddTag(ODesc* d) const; @@ -79,6 +93,8 @@ public: bool Serialize(SerialInfo* info) const; static Attributes* Unserialize(UnserialInfo* info); + bool operator==(const Attributes& other) const; + protected: Attributes() { type = 0; attrs = 0; } void CheckAttr(Attr* attr); diff --git a/src/CompHash.cc b/src/CompHash.cc index 2e0870303c..cc4f440e06 100644 --- a/src/CompHash.cc +++ b/src/CompHash.cc @@ -65,11 +65,22 @@ CompositeHash::~CompositeHash() // Computes the piece of the hash for Val*, returning the new kp. char* CompositeHash::SingleValHash(int type_check, char* kp0, - BroType* bt, Val* v) const + BroType* bt, Val* v, bool optional) const { char* kp1 = 0; InternalTypeTag t = bt->InternalType(); + if ( optional ) + { + // Add a marker saying whether the optional field is set. + char* kp = AlignAndPadType(kp0); + *kp = ( v ? 1 : 0); + kp0 = reinterpret_cast(kp+1); + + if ( ! v ) + return kp0; + } + if ( type_check ) { InternalTypeTag vt = v->Type()->InternalType(); @@ -163,12 +174,16 @@ char* CompositeHash::SingleValHash(int type_check, char* kp0, for ( int i = 0; i < num_fields; ++i ) { Val* rv_i = rv->Lookup(i); - if ( ! rv_i ) + + Attributes* a = rt->FieldDecl(i)->attrs; + bool optional = (a && a->FindAttr(ATTR_OPTIONAL)); + + if ( ! (rv_i || optional) ) return 0; if ( ! (kp = SingleValHash(type_check, kp, rt->FieldType(i), - rv_i)) ) + rv_i, optional)) ) return 0; } @@ -248,7 +263,7 @@ HashKey* CompositeHash::ComputeHash(const Val* v, int type_check) const char* kp = k; loop_over_list(*tl, i) { - kp = SingleValHash(type_check, kp, (*tl)[i], (*vl)[i]); + kp = SingleValHash(type_check, kp, (*tl)[i], (*vl)[i], false); if ( ! kp ) return 0; } @@ -315,10 +330,13 @@ HashKey* CompositeHash::ComputeSingletonHash(const Val* v, int type_check) const } int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v, - int type_check, int sz) const + int type_check, int sz, bool optional) const { InternalTypeTag t = bt->InternalType(); + if ( optional ) + sz = SizeAlign(sz, sizeof(char)); + if ( type_check && v ) { InternalTypeTag vt = v->Type()->InternalType(); @@ -369,9 +387,12 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v, for ( int i = 0; i < num_fields; ++i ) { + Attributes* a = rt->FieldDecl(i)->attrs; + bool optional = (a && a->FindAttr(ATTR_OPTIONAL)); + sz = SingleTypeKeySize(rt->FieldType(i), rv ? rv->Lookup(i) : 0, - type_check, sz); + type_check, sz, optional); if ( ! sz ) return 0; } @@ -418,7 +439,7 @@ int CompositeHash::ComputeKeySize(const Val* v, int type_check) const loop_over_list(*tl, i) { sz = SingleTypeKeySize((*tl)[i], v ? v->AsListVal()->Index(i) : 0, - type_check, sz); + type_check, sz, false); if ( ! sz ) return 0; } @@ -495,20 +516,20 @@ ListVal* CompositeHash::RecoverVals(const HashKey* k) const loop_over_list(*tl, i) { Val* v; - kp = RecoverOneVal(k, kp, k_end, (*tl)[i], v); + kp = RecoverOneVal(k, kp, k_end, (*tl)[i], v, false); ASSERT(v); l->Append(v); } if ( kp != k_end ) - internal_error("under-ran key in CompositeHash::DescribeKey"); + internal_error("under-ran key in CompositeHash::DescribeKey %ld", k_end - kp); return l; } const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0, const char* const k_end, BroType* t, - Val*& pval) const + Val*& pval, bool optional) const { // k->Size() == 0 for a single empty string. if ( kp0 >= k_end && k->Size() > 0 ) @@ -516,9 +537,20 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0, TypeTag tag = t->Tag(); InternalTypeTag it = t->InternalType(); - const char* kp1 = 0; + if ( optional ) + { + const char* kp = AlignType(kp0); + kp0 = kp1 = reinterpret_cast(kp+1); + + if ( ! *kp ) + { + pval = 0; + return kp0; + } + } + switch ( it ) { case TYPE_INTERNAL_INT: { @@ -647,9 +679,13 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0, for ( i = 0; i < num_fields; ++i ) { Val* v; + + Attributes* a = rt->FieldDecl(i)->attrs; + bool optional = (a && a->FindAttr(ATTR_OPTIONAL)); + kp = RecoverOneVal(k, kp, k_end, - rt->FieldType(i), v); - if ( ! v ) + rt->FieldType(i), v, optional); + if ( ! (v || optional) ) { internal_error("didn't recover expected number of fields from HashKey"); pval = 0; diff --git a/src/CompHash.h b/src/CompHash.h index a0632e1bfe..12ab9f7422 100644 --- a/src/CompHash.h +++ b/src/CompHash.h @@ -30,7 +30,7 @@ protected: // Computes the piece of the hash for Val*, returning the new kp. // Used as a helper for ComputeHash in the non-singleton case. char* SingleValHash(int type_check, char* kp, - BroType* bt, Val* v) const; + BroType* bt, Val* v, bool optional) const; // Recovers just one Val of possibly many; called from RecoverVals. // Upon return, pval will point to the recovered Val of type t. @@ -38,7 +38,7 @@ protected: // upon errors, so there is no return value for invalid input. const char* RecoverOneVal(const HashKey* k, const char* kp, const char* const k_end, - BroType* t, Val*& pval) const; + BroType* t, Val*& pval, bool optional) const; // Rounds the given pointer up to the nearest multiple of the // given size, if not already a multiple. @@ -77,7 +77,7 @@ protected: int ComputeKeySize(const Val* v = 0, int type_check = 1) const; int SingleTypeKeySize(BroType*, const Val*, - int type_check, int sz) const; + int type_check, int sz, bool optional) const; TypeList* type; char* key; // space for composite key diff --git a/src/Expr.cc b/src/Expr.cc index dbfca7c9cb..5788cdeb7b 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -2531,16 +2531,35 @@ bool AssignExpr::TypeCheck() return true; } + if ( op1->Type()->Tag() == TYPE_RECORD && + op2->Type()->Tag() == TYPE_RECORD ) + { + if ( same_type(op1->Type(), op2->Type()) ) + { + RecordType* rt1 = op1->Type()->AsRecordType(); + RecordType* rt2 = op2->Type()->AsRecordType(); + + // Make sure the attributes match as well. + for ( int i = 0; i < rt1->NumFields(); ++i ) + { + const TypeDecl* td1 = rt1->FieldDecl(i); + const TypeDecl* td2 = rt2->FieldDecl(i); + + if ( same_attrs(td1->attrs, td2->attrs) ) + // Everything matches. + return true; + } + } + + // Need to coerce. + op2 = new RecordCoerceExpr(op2, op1->Type()->AsRecordType()); + return true; + } + if ( ! same_type(op1->Type(), op2->Type()) ) { - if ( op1->Type()->Tag() == TYPE_RECORD && - op2->Type()->Tag() == TYPE_RECORD ) - op2 = new RecordCoerceExpr(op2, op1->Type()->AsRecordType()); - else - { - ExprError("type clash in assignment"); - return false; - } + ExprError("type clash in assignment"); + return false; } return true; @@ -5308,21 +5327,39 @@ int check_and_promote_expr(Expr*& e, BroType* t) return 1; } - else if ( ! same_type(t, et) ) + if ( t->Tag() == TYPE_RECORD && et->Tag() == TYPE_RECORD ) { - if ( t->Tag() == TYPE_RECORD && et->Tag() == TYPE_RECORD ) - { - RecordType* t_r = t->AsRecordType(); - RecordType* et_r = et->AsRecordType(); + RecordType* t_r = t->AsRecordType(); + RecordType* et_r = et->AsRecordType(); - if ( record_promotion_compatible(t_r, et_r) ) + if ( same_type(t, et) ) + { + // Make sure the attributes match as well. + for ( int i = 0; i < t_r->NumFields(); ++i ) { - e = new RecordCoerceExpr(e, t_r); - return 1; + const TypeDecl* td1 = t_r->FieldDecl(i); + const TypeDecl* td2 = et_r->FieldDecl(i); + + if ( same_attrs(td1->attrs, td2->attrs) ) + // Everything matches perfectly. + return 1; } } - else if ( t->Tag() == TYPE_TABLE && et->Tag() == TYPE_TABLE && + if ( record_promotion_compatible(t_r, et_r) ) // Note: This is always true currently. + { + e = new RecordCoerceExpr(e, t_r); + return 1; + } + + t->Error("incompatible record types", e); + return 0; + } + + + if ( ! same_type(t, et) ) + { + if ( t->Tag() == TYPE_TABLE && et->Tag() == TYPE_TABLE && et->AsTableType()->IsUnspecifiedTable() ) { e = new TableCoerceExpr(e, t->AsTableType()); diff --git a/src/HTTP.cc b/src/HTTP.cc index 0cccf75103..a8f4481216 100644 --- a/src/HTTP.cc +++ b/src/HTTP.cc @@ -16,16 +16,20 @@ const bool DEBUG_http = false; +// The EXPECT_*_NOTHING states are used to prevent further parsing. Used if a +// message was interrupted. enum { EXPECT_REQUEST_LINE, EXPECT_REQUEST_MESSAGE, EXPECT_REQUEST_TRAILER, + EXPECT_REQUEST_NOTHING, }; enum { EXPECT_REPLY_LINE, EXPECT_REPLY_MESSAGE, EXPECT_REPLY_TRAILER, + EXPECT_REPLY_NOTHING, }; HTTP_Entity::HTTP_Entity(HTTP_Message *arg_message, MIME_Entity* parent_entity, int arg_expect_body) @@ -851,7 +855,23 @@ void HTTP_Analyzer::DeliverStream(int len, const u_char* data, bool is_orig) HTTP_Event("crud_trailing_HTTP_request", new_string_val(line, end_of_line)); else - ProtocolViolation("not a http request line"); + { + // We do see HTTP requests with a + // trailing EOL that's not accounted + // for by the content-length. This + // will lead to a call to this method + // with len==0 while we are expecting + // a new request. Since HTTP servers + // handle such requests gracefully, + // we should do so as well. + if ( len == 0 ) + Weird("empty_http_request"); + else + { + ProtocolViolation("not a http request line"); + request_state = EXPECT_REQUEST_NOTHING; + } + } } break; @@ -861,6 +881,9 @@ void HTTP_Analyzer::DeliverStream(int len, const u_char* data, bool is_orig) case EXPECT_REQUEST_TRAILER: break; + + case EXPECT_REQUEST_NOTHING: + break; } } else @@ -873,6 +896,8 @@ void HTTP_Analyzer::DeliverStream(int len, const u_char* data, bool is_orig) if ( unanswered_requests.empty() ) Weird("unmatched_HTTP_reply"); + else + ProtocolConfirmation(); reply_state = EXPECT_REPLY_MESSAGE; reply_ongoing = 1; @@ -884,8 +909,11 @@ void HTTP_Analyzer::DeliverStream(int len, const u_char* data, bool is_orig) ExpectReplyMessageBody(), len); } - else + else + { ProtocolViolation("not a http reply line"); + reply_state = EXPECT_REPLY_NOTHING; + } break; @@ -895,6 +923,9 @@ void HTTP_Analyzer::DeliverStream(int len, const u_char* data, bool is_orig) case EXPECT_REPLY_TRAILER: break; + + case EXPECT_REPLY_NOTHING: + break; } } } @@ -1042,6 +1073,7 @@ int HTTP_Analyzer::HTTP_RequestLine(const char* line, const char* end_of_line) // HTTP methods for distributed authoring. "PROPFIND", "PROPPATCH", "MKCOL", "DELETE", "PUT", "COPY", "MOVE", "LOCK", "UNLOCK", + "POLL", "REPORT", "SUBSCRIBE", "BMOVE", "SEARCH", @@ -1256,7 +1288,10 @@ void HTTP_Analyzer::RequestMade(const int interrupted, const char* msg) num_request_lines = 0; - request_state = EXPECT_REQUEST_LINE; + if ( interrupted ) + request_state = EXPECT_REQUEST_NOTHING; + else + request_state = EXPECT_REQUEST_LINE; } void HTTP_Analyzer::ReplyMade(const int interrupted, const char* msg) @@ -1285,7 +1320,10 @@ void HTTP_Analyzer::ReplyMade(const int interrupted, const char* msg) reply_reason_phrase = 0; } - reply_state = EXPECT_REPLY_LINE; + if ( interrupted ) + reply_state = EXPECT_REPLY_NOTHING; + else + reply_state = EXPECT_REPLY_LINE; } void HTTP_Analyzer::RequestClash(Val* /* clash_val */) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index a9329cc9cb..51add7c3df 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -823,14 +823,9 @@ bool RemoteSerializer::SendCall(SerialInfo* info, PeerID id, if ( ! peer ) return false; - // Do not send events back to originating peer. - if ( current_peer == peer ) - return true; - return SendCall(info, peer, name, vl); } - bool RemoteSerializer::SendCall(SerialInfo* info, Peer* peer, const char* name, val_list* vl) { @@ -1841,10 +1836,9 @@ bool RemoteSerializer::EnterPhaseRunning(Peer* peer) if ( in_sync == peer ) in_sync = 0; - current_peer->phase = Peer::RUNNING; + peer->phase = Peer::RUNNING; Log(LogInfo, "phase: running", peer); - - RaiseEvent(remote_connection_handshake_done, current_peer); + RaiseEvent(remote_connection_handshake_done, peer); if ( remote_trace_sync_interval ) { @@ -2008,12 +2002,11 @@ bool RemoteSerializer::HandshakeDone(Peer* peer) return false; #endif - if ( ! (current_peer->caps & Peer::PID_64BIT) ) - Log(LogInfo, "peer does not support 64bit PIDs; using compatibility mode", current_peer); + if ( ! (peer->caps & Peer::PID_64BIT) ) + Log(LogInfo, "peer does not support 64bit PIDs; using compatibility mode", peer); - if ( (current_peer->caps & Peer::NEW_CACHE_STRATEGY) ) - Log(LogInfo, "peer supports keep-in-cache; using that", - current_peer); + if ( (peer->caps & Peer::NEW_CACHE_STRATEGY) ) + Log(LogInfo, "peer supports keep-in-cache; using that", peer); if ( peer->sync_requested != Peer::NONE ) { @@ -2030,7 +2023,7 @@ bool RemoteSerializer::HandshakeDone(Peer* peer) { Log(LogError, "misconfiguration: authoritative state on both sides", current_peer); - CloseConnection(current_peer); + CloseConnection(peer); return false; } diff --git a/src/SMB.cc b/src/SMB.cc index 7ee6986d3d..db4d4608b2 100644 --- a/src/SMB.cc +++ b/src/SMB.cc @@ -480,8 +480,8 @@ int SMB_Session::ParseTreeConnectAndx(binpac::SMB::SMB_header const& hdr, r->Assign(0, new Val(req.flags(), TYPE_COUNT)); r->Assign(1, new StringVal(req.password_length(), (const char*) req.password())); - r->Assign(3, new StringVal(path)); - r->Assign(4, new StringVal(service)); + r->Assign(2, new StringVal(path)); + r->Assign(3, new StringVal(service)); if ( strstr_n(norm_path->Len(), norm_path->Bytes(), 5, (const u_char*) "\\IPC$") != -1 ) diff --git a/src/Type.cc b/src/Type.cc index ec7c8e510b..db74781072 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -1432,6 +1432,17 @@ int same_type(const BroType* t1, const BroType* t2, int is_init) return 0; } +int same_attrs(const Attributes* a1, const Attributes* a2) + { + if ( ! a1 ) + return (a2 != 0); + + if ( ! a2 ) + return 0; + + return (*a1 == *a2); + } + int record_promotion_compatible(const RecordType* /* super_rec */, const RecordType* /* sub_rec */) { diff --git a/src/Type.h b/src/Type.h index ff4d3df9e6..7e890d9e07 100644 --- a/src/Type.h +++ b/src/Type.h @@ -509,6 +509,9 @@ inline BroType* error_type() { return base_type(TYPE_ERROR); } // test is done in the context of an initialization. extern int same_type(const BroType* t1, const BroType* t2, int is_init=0); +// True if the two attribute lists are equivalent. +extern int same_attrs(const Attributes* a1, const Attributes* a2); + // Returns true if the record sub_rec can be promoted to the record // super_rec. extern int record_promotion_compatible(const RecordType* super_rec, diff --git a/src/bro.bif b/src/bro.bif index ec14775a69..a573e6a716 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -1459,12 +1459,17 @@ function skip_http_entity_data%(c: connection, is_orig: bool%): any { Analyzer* ha = c->FindAnalyzer(id); - if ( ha->GetTag() == AnalyzerTag::HTTP ) - static_cast(ha)->SkipEntityData(is_orig); + if ( ha ) + { + if ( ha->GetTag() == AnalyzerTag::HTTP ) + static_cast(ha)->SkipEntityData(is_orig); + else + run_time("non-HTTP analyzer associated with connection record"); + } else - run_time("non-HTTP analyzer associated with connection record"); - } + run_time("could not find analyzer for skip_http_entity_data"); + } else run_time("no analyzer associated with connection record");