From f20d5050075d0e3f55fa661d17db9c92fd645356 Mon Sep 17 00:00:00 2001 From: Smoot Date: Tue, 4 Jun 2024 11:52:07 +0200 Subject: [PATCH 01/89] Update reporter.bif to describe special case of errors in init Originally proposed in zeek/zeek-docs#257, but reverted via 9f9ebde62380a3012a1471d9ff1c1c91c7aa69da. --- src/reporter.bif | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/reporter.bif b/src/reporter.bif index e1799bd163..3905d14936 100644 --- a/src/reporter.bif +++ b/src/reporter.bif @@ -43,8 +43,9 @@ function Reporter::warning%(msg: string%): bool return zeek::val_mgr->True(); %} -## Generates a non-fatal error indicative of a definite problem that should -## be addressed. Program execution does not terminate. +## Generates a usually non-fatal error indicative of a definite problem that +## should be addressed. Program execution does not terminate unless the error +## is reported during initialization (e.g., :zeek:see:`zeek_init`). ## ## msg: The error message to report. ## From b0d9a841f5c15bdb335200b4986f06e0b246ea30 Mon Sep 17 00:00:00 2001 From: Vern Paxson Date: Sat, 1 Jun 2024 12:19:52 -0700 Subject: [PATCH 02/89] improved error cascade for invalid attributes --- src/Attr.cc | 198 ++++++++---------- src/Attr.h | 6 +- .../out | 3 +- 3 files changed, 90 insertions(+), 117 deletions(-) diff --git a/src/Attr.cc b/src/Attr.cc index e080bfcfd6..b8c87b7ff0 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -221,8 +221,13 @@ void Attributes::AddAttr(AttrPtr attr, bool is_redef) { // instantiator of the object specified a null type, however, then // that's a signal to skip the checking. If the type is error, // there's no point checking attributes either. - if ( type && ! IsErrorType(type->Tag()) ) - CheckAttr(attr.get()); + if ( type && ! IsErrorType(type->Tag()) ) { + if ( ! CheckAttr(attr.get()) ) { + // Get rid of it, so we don't get error cascades down the line. + RemoveAttr(attr->Tag()); + return; + } + } // For ADD_FUNC or DEL_FUNC, add in an implicit REDEF, since // those attributes only have meaning for a redefinable value. @@ -285,7 +290,7 @@ void Attributes::DescribeReST(ODesc* d, bool shorten) const { } } -void Attributes::CheckAttr(Attr* a) { +bool Attributes::CheckAttr(Attr* a) { switch ( a->Tag() ) { case ATTR_DEPRECATED: case ATTR_REDEF: @@ -294,7 +299,7 @@ void Attributes::CheckAttr(Attr* a) { case ATTR_OPTIONAL: if ( global_var ) - Error("&optional is not valid for global variables"); + return AttrError("&optional is not valid for global variables"); break; case ATTR_ADD_FUNC: @@ -304,61 +309,53 @@ void Attributes::CheckAttr(Attr* a) { const auto& at = a->GetExpr()->GetType(); if ( at->Tag() != TYPE_FUNC ) { a->GetExpr()->Error(is_add ? "&add_func must be a function" : "&delete_func must be a function"); - break; + return false; } FuncType* aft = at->AsFuncType(); if ( ! same_type(aft->Yield(), type) ) { a->GetExpr()->Error(is_add ? "&add_func function must yield same type as variable" : "&delete_func function must yield same type as variable"); - break; + return false; } } break; case ATTR_DEFAULT_INSERT: { - if ( ! type->IsTable() ) { - Error("&default_insert only applicable to tables"); - break; - } + if ( ! type->IsTable() ) + return AttrError("&default_insert only applicable to tables"); - if ( Find(ATTR_DEFAULT) ) { - Error("&default and &default_insert cannot be used together"); - break; - } + if ( Find(ATTR_DEFAULT) ) + return AttrError("&default and &default_insert cannot be used together"); std::string err_msg; if ( ! check_default_attr(a, type, global_var, in_record, err_msg) && ! err_msg.empty() ) - Error(err_msg.c_str()); + return AttrError(err_msg.c_str()); break; } case ATTR_DEFAULT: { - if ( Find(ATTR_DEFAULT_INSERT) ) { - Error("&default and &default_insert cannot be used together"); - break; - } + if ( Find(ATTR_DEFAULT_INSERT) ) + return AttrError("&default and &default_insert cannot be used together"); std::string err_msg; if ( ! check_default_attr(a, type, global_var, in_record, err_msg) && ! err_msg.empty() ) - Error(err_msg.c_str()); + return AttrError(err_msg.c_str()); break; } case ATTR_EXPIRE_READ: { if ( Find(ATTR_BROKER_STORE) ) - Error("&broker_store and &read_expire cannot be used simultaneously"); + return AttrError("&broker_store and &read_expire cannot be used simultaneously"); if ( Find(ATTR_BACKEND) ) - Error("&backend and &read_expire cannot be used simultaneously"); + return AttrError("&backend and &read_expire cannot be used simultaneously"); } // fallthrough case ATTR_EXPIRE_WRITE: case ATTR_EXPIRE_CREATE: { - if ( type->Tag() != TYPE_TABLE ) { - Error("expiration only applicable to sets/tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("expiration only applicable to sets/tables"); int num_expires = 0; @@ -368,57 +365,49 @@ void Attributes::CheckAttr(Attr* a) { num_expires++; } - if ( num_expires > 1 ) { - Error( + if ( num_expires > 1 ) + return AttrError( "set/table can only have one of &read_expire, &write_expire, " "&create_expire"); - break; - } } #if 0 //### not easy to test this w/o knowing the ID. if ( ! global_var ) - Error("expiration not supported for local variables"); + return AttrError("expiration not supported for local variables"); #endif break; case ATTR_EXPIRE_FUNC: { - if ( type->Tag() != TYPE_TABLE ) { - Error("expiration only applicable to tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("expiration only applicable to tables"); type->AsTableType()->CheckExpireFuncCompatibility({NewRef{}, a}); if ( Find(ATTR_BROKER_STORE) ) - Error("&broker_store and &expire_func cannot be used simultaneously"); + return AttrError("&broker_store and &expire_func cannot be used simultaneously"); if ( Find(ATTR_BACKEND) ) - Error("&backend and &expire_func cannot be used simultaneously"); + return AttrError("&backend and &expire_func cannot be used simultaneously"); break; } case ATTR_ON_CHANGE: { - if ( type->Tag() != TYPE_TABLE ) { - Error("&on_change only applicable to sets/tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("&on_change only applicable to sets/tables"); const auto& change_func = a->GetExpr(); if ( change_func->GetType()->Tag() != TYPE_FUNC || change_func->GetType()->AsFuncType()->Flavor() != FUNC_FLAVOR_FUNCTION ) - Error("&on_change attribute is not a function"); + return AttrError("&on_change attribute is not a function"); const FuncType* c_ft = change_func->GetType()->AsFuncType(); - if ( c_ft->Yield()->Tag() != TYPE_VOID ) { - Error("&on_change must not return a value"); - break; - } + if ( c_ft->Yield()->Tag() != TYPE_VOID ) + return AttrError("&on_change must not return a value"); const TableType* the_table = type->AsTableType(); @@ -427,107 +416,85 @@ void Attributes::CheckAttr(Attr* a) { const auto& args = c_ft->ParamList()->GetTypes(); const auto& t_indexes = the_table->GetIndexTypes(); - if ( args.size() != (type->IsSet() ? 2 : 3) + t_indexes.size() ) { - Error("&on_change function has incorrect number of arguments"); - break; - } + if ( args.size() != (type->IsSet() ? 2 : 3) + t_indexes.size() ) + return AttrError("&on_change function has incorrect number of arguments"); - if ( ! same_type(args[0], the_table->AsTableType()) ) { - Error("&on_change: first argument must be of same type as table"); - break; - } + if ( ! same_type(args[0], the_table->AsTableType()) ) + return AttrError("&on_change: first argument must be of same type as table"); // can't check exact type here yet - the data structures don't exist yet. - if ( args[1]->Tag() != TYPE_ENUM ) { - Error("&on_change: second argument must be a TableChange enum"); - break; - } + if ( args[1]->Tag() != TYPE_ENUM ) + return AttrError("&on_change: second argument must be a TableChange enum"); for ( size_t i = 0; i < t_indexes.size(); i++ ) { - if ( ! same_type(args[2 + i], t_indexes[i]) ) { - Error("&on_change: index types do not match table"); - break; - } + if ( ! same_type(args[2 + i], t_indexes[i]) ) + return AttrError("&on_change: index types do not match table"); } if ( ! type->IsSet() ) - if ( ! same_type(args[2 + t_indexes.size()], the_table->Yield()) ) { - Error("&on_change: value type does not match table"); - break; - } + if ( ! same_type(args[2 + t_indexes.size()], the_table->Yield()) ) + return AttrError("&on_change: value type does not match table"); } break; case ATTR_BACKEND: { - if ( ! global_var || type->Tag() != TYPE_TABLE ) { - Error("&backend only applicable to global sets/tables"); - break; - } + if ( ! global_var || type->Tag() != TYPE_TABLE ) + return AttrError("&backend only applicable to global sets/tables"); // cannot do better equality check - the Broker types are not // actually existing yet when we are here. We will do that // later - before actually attaching to a broker store - if ( a->GetExpr()->GetType()->Tag() != TYPE_ENUM ) { - Error("&backend must take an enum argument"); - break; - } + if ( a->GetExpr()->GetType()->Tag() != TYPE_ENUM ) + return AttrError("&backend must take an enum argument"); // Only support atomic types for the moment, unless // explicitly overridden if ( ! type->AsTableType()->IsSet() && ! input::Manager::IsCompatibleType(type->AsTableType()->Yield().get(), true) && - ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) { - Error("&backend only supports atomic types as table value"); - } + ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) + return AttrError("&backend only supports atomic types as table value"); if ( Find(ATTR_EXPIRE_FUNC) ) - Error("&backend and &expire_func cannot be used simultaneously"); + return AttrError("&backend and &expire_func cannot be used simultaneously"); if ( Find(ATTR_EXPIRE_READ) ) - Error("&backend and &read_expire cannot be used simultaneously"); + return AttrError("&backend and &read_expire cannot be used simultaneously"); if ( Find(ATTR_BROKER_STORE) ) - Error("&backend and &broker_store cannot be used simultaneously"); + return AttrError("&backend and &broker_store cannot be used simultaneously"); break; } case ATTR_BROKER_STORE: { - if ( type->Tag() != TYPE_TABLE ) { - Error("&broker_store only applicable to sets/tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("&broker_store only applicable to sets/tables"); - if ( a->GetExpr()->GetType()->Tag() != TYPE_STRING ) { - Error("&broker_store must take a string argument"); - break; - } + if ( a->GetExpr()->GetType()->Tag() != TYPE_STRING ) + return AttrError("&broker_store must take a string argument"); // Only support atomic types for the moment, unless // explicitly overridden if ( ! type->AsTableType()->IsSet() && ! input::Manager::IsCompatibleType(type->AsTableType()->Yield().get(), true) && - ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) { - Error("&broker_store only supports atomic types as table value"); - } + ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) + return AttrError("&broker_store only supports atomic types as table value"); if ( Find(ATTR_EXPIRE_FUNC) ) - Error("&broker_store and &expire_func cannot be used simultaneously"); + return AttrError("&broker_store and &expire_func cannot be used simultaneously"); if ( Find(ATTR_EXPIRE_READ) ) - Error("&broker_store and &read_expire cannot be used simultaneously"); + return AttrError("&broker_store and &read_expire cannot be used simultaneously"); if ( Find(ATTR_BACKEND) ) - Error("&backend and &broker_store cannot be used simultaneously"); + return AttrError("&backend and &broker_store cannot be used simultaneously"); break; } - case ATTR_BROKER_STORE_ALLOW_COMPLEX: { - if ( type->Tag() != TYPE_TABLE ) { - Error("&broker_allow_complex_type only applicable to sets/tables"); - break; - } - } + case ATTR_BROKER_STORE_ALLOW_COMPLEX: + if ( type->Tag() != TYPE_TABLE ) + return AttrError("&broker_allow_complex_type only applicable to sets/tables"); + break; case ATTR_TRACKED: // FIXME: Check here for global ID? @@ -535,49 +502,52 @@ void Attributes::CheckAttr(Attr* a) { case ATTR_RAW_OUTPUT: if ( type->Tag() != TYPE_FILE ) - Error("&raw_output only applicable to files"); + return AttrError("&raw_output only applicable to files"); break; - case ATTR_PRIORITY: Error("&priority only applicable to event bodies"); break; + case ATTR_PRIORITY: return AttrError("&priority only applicable to event bodies"); case ATTR_GROUP: if ( type->Tag() != TYPE_FUNC || type->AsFuncType()->Flavor() != FUNC_FLAVOR_EVENT ) - Error("&group only applicable to events"); + return AttrError("&group only applicable to events"); break; case ATTR_ERROR_HANDLER: if ( type->Tag() != TYPE_FUNC || type->AsFuncType()->Flavor() != FUNC_FLAVOR_EVENT ) - Error("&error_handler only applicable to events"); + return AttrError("&error_handler only applicable to events"); break; case ATTR_LOG: if ( ! threading::Value::IsCompatibleType(type.get()) ) - Error("&log applied to a type that cannot be logged"); + return AttrError("&log applied to a type that cannot be logged"); break; case ATTR_TYPE_COLUMN: { - if ( type->Tag() != TYPE_PORT ) { - Error("type_column tag only applicable to ports"); - break; - } + if ( type->Tag() != TYPE_PORT ) + return AttrError("type_column tag only applicable to ports"); const auto& atype = a->GetExpr()->GetType(); - if ( atype->Tag() != TYPE_STRING ) { - Error("type column needs to have a string argument"); - break; - } + if ( atype->Tag() != TYPE_STRING ) + return AttrError("type column needs to have a string argument"); break; } case ATTR_ORDERED: if ( type->Tag() != TYPE_TABLE ) - Error("&ordered only applicable to tables"); + return AttrError("&ordered only applicable to tables"); break; default: BadTag("Attributes::CheckAttr", attr_name(a->Tag())); } + + return true; +} + +bool Attributes::AttrError(const char* msg) { + Error(msg); + return false; } bool Attributes::operator==(const Attributes& other) const { diff --git a/src/Attr.h b/src/Attr.h index 94c04a380c..06da50eb85 100644 --- a/src/Attr.h +++ b/src/Attr.h @@ -131,7 +131,11 @@ public: detail::TraversalCode Traverse(detail::TraversalCallback* cb) const; protected: - void CheckAttr(Attr* attr); + // Returns true if the attribute is okay, false if not. + bool CheckAttr(Attr* attr); + + // Reports an attribute error and returns false (handy for CheckAttr()). + bool AttrError(const char* msg); TypePtr type; std::vector attrs; diff --git a/testing/btest/Baseline/language.attr-default-global-set-error/out b/testing/btest/Baseline/language.attr-default-global-set-error/out index 29a41aa656..431b2d445b 100644 --- a/testing/btest/Baseline/language.attr-default-global-set-error/out +++ b/testing/btest/Baseline/language.attr-default-global-set-error/out @@ -1,7 +1,6 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. error in <...>/attr-default-global-set-error.zeek, line 4: &default is not valid for global variables except for tables (&default=0) error in <...>/attr-default-global-set-error.zeek, line 9: &default is not valid for global variables except for tables (&default=10) -error in <...>/attr-default-global-set-error.zeek, line 9: Duplicate &default attribute is ambiguous error in <...>/attr-default-global-set-error.zeek, line 9: &default is not valid for global variables except for tables (&default=9) -error in <...>/attr-default-global-set-error.zeek, line 9: &optional is not valid for global variables (&default=9, &optional) +error in <...>/attr-default-global-set-error.zeek, line 9: &optional is not valid for global variables (&optional) error in <...>/attr-default-global-set-error.zeek, line 10: &default is not valid for global variables except for tables (&default=set()) From aab5324e20c9f806ed1728b7f3107481122fb459 Mon Sep 17 00:00:00 2001 From: Vern Paxson Date: Sat, 1 Jun 2024 12:21:54 -0700 Subject: [PATCH 03/89] addressed some Coverity nits --- src/Expr.cc | 4 ++-- src/script_opt/Reduce.cc | 2 +- src/script_opt/ZAM/Compile.h | 2 +- src/script_opt/ZAM/Expr.cc | 4 ++-- src/script_opt/ZAM/Low-Level.cc | 4 +++- src/script_opt/ZAM/OPs/ZAM.op | 2 +- src/script_opt/ZAM/Stmt.cc | 2 +- 7 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/Expr.cc b/src/Expr.cc index d715c66227..7231e888dc 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -2793,7 +2793,7 @@ void FieldExpr::Assign(Frame* f, ValPtr v) { if ( IsError() ) return; - Assign(op->Eval(f), v); + Assign(op->Eval(f), std::move(v)); } void FieldExpr::Assign(ValPtr lhs, ValPtr rhs) { @@ -2807,7 +2807,7 @@ ValPtr FieldExpr::Delete(Frame* f) { return nullptr; auto former = op_v->AsRecordVal()->GetField(field); - Assign(op_v, nullptr); + Assign(std::move(op_v), nullptr); // In the future we could return a value, such as former, here. return nullptr; } diff --git a/src/script_opt/Reduce.cc b/src/script_opt/Reduce.cc index fbf5bc7eb1..801353efce 100644 --- a/src/script_opt/Reduce.cc +++ b/src/script_opt/Reduce.cc @@ -384,7 +384,7 @@ NameExprPtr Reducer::GetRetVar(TypePtr type) { return nullptr; IDPtr ret_id = install_ID("@retvar", "", false, false); - ret_id->SetType(type); + ret_id->SetType(std::move(type)); ret_id->GetOptInfo()->SetTemp(); ret_vars.insert(ret_id.get()); diff --git a/src/script_opt/ZAM/Compile.h b/src/script_opt/ZAM/Compile.h index a952f2b8f2..84e455418c 100644 --- a/src/script_opt/ZAM/Compile.h +++ b/src/script_opt/ZAM/Compile.h @@ -321,7 +321,7 @@ private: // Returns a handle to state associated with building // up a list of values. - OpaqueVals* BuildVals(const ListExprPtr&); + std::unique_ptr BuildVals(const ListExprPtr&); // "stride" is how many slots each element of l will consume. ZInstAux* InternalBuildVals(const ListExpr* l, int stride = 1); diff --git a/src/script_opt/ZAM/Expr.cc b/src/script_opt/ZAM/Expr.cc index 6c18c02318..f1e660efb7 100644 --- a/src/script_opt/ZAM/Expr.cc +++ b/src/script_opt/ZAM/Expr.cc @@ -101,7 +101,7 @@ const ZAMStmt ZAMCompiler::CompileAdd(const AggrAddExpr* e) { return AddStmt1VC(aggr, e1->AsConstExpr()); } - return AddStmtVO(aggr, BuildVals(indices)); + return AddStmtVO(aggr, BuildVals(indices).get()); } const ZAMStmt ZAMCompiler::CompileDel(const AggrDelExpr* e) { @@ -128,7 +128,7 @@ const ZAMStmt ZAMCompiler::CompileDel(const AggrDelExpr* e) { if ( index_list->Tag() != EXPR_LIST ) reporter->InternalError("non-list in \"delete\""); - auto internal_ind = std::unique_ptr(BuildVals(index_list->AsListExprPtr())); + auto internal_ind = BuildVals(index_list->AsListExprPtr()); return DelTableVO(aggr, internal_ind.get()); } diff --git a/src/script_opt/ZAM/Low-Level.cc b/src/script_opt/ZAM/Low-Level.cc index fa1048431d..52981cb5ee 100644 --- a/src/script_opt/ZAM/Low-Level.cc +++ b/src/script_opt/ZAM/Low-Level.cc @@ -24,7 +24,9 @@ const ZAMStmt ZAMCompiler::LastInst() { return ZAMStmt(insts1.size() - 1); } const ZAMStmt ZAMCompiler::ErrorStmt() { return ZAMStmt(0); } -OpaqueVals* ZAMCompiler::BuildVals(const ListExprPtr& l) { return new OpaqueVals(InternalBuildVals(l.get())); } +std::unique_ptr ZAMCompiler::BuildVals(const ListExprPtr& l) { + return std::make_unique(InternalBuildVals(l.get())); +} ZInstAux* ZAMCompiler::InternalBuildVals(const ListExpr* l, int stride) { auto exprs = l->Exprs(); diff --git a/src/script_opt/ZAM/OPs/ZAM.op b/src/script_opt/ZAM/OPs/ZAM.op index 2904afb4a5..a44927af6c 100644 --- a/src/script_opt/ZAM/OPs/ZAM.op +++ b/src/script_opt/ZAM/OPs/ZAM.op @@ -2414,7 +2414,7 @@ internal-op Subnet-To-Addr type VV eval auto addr_v = make_intrusive(frame[z.v2].subnet_val->Prefix()); Unref(frame[z.v1].addr_val); - frame[z.v1] = ZVal(addr_v); + frame[z.v1] = ZVal(std::move(addr_v)); internal-op Sub-Bytes type VVVV diff --git a/src/script_opt/ZAM/Stmt.cc b/src/script_opt/ZAM/Stmt.cc index 2749729b1c..5e71bdc05e 100644 --- a/src/script_opt/ZAM/Stmt.cc +++ b/src/script_opt/ZAM/Stmt.cc @@ -78,7 +78,7 @@ const ZAMStmt ZAMCompiler::CompilePrint(const PrintStmt* ps) { return Print1C(e0->AsConstExpr()); } - return PrintO(BuildVals(l)); + return PrintO(BuildVals(l).get()); } const ZAMStmt ZAMCompiler::CompileExpr(const ExprStmt* es) { From 50b1f6e013f3f8d25346f1fd8c8ee46e944548d5 Mon Sep 17 00:00:00 2001 From: Vern Paxson Date: Sat, 1 Jun 2024 12:22:44 -0700 Subject: [PATCH 04/89] updated list of BiFs for script optimization --- src/script_opt/FuncInfo.cc | 1 + .../Baseline.zam/opt.ZAM-bif-tracking/output | 2 +- testing/btest/opt/ZAM-bif-tracking.zeek | 1081 ++++++++--------- 3 files changed, 534 insertions(+), 550 deletions(-) diff --git a/src/script_opt/FuncInfo.cc b/src/script_opt/FuncInfo.cc index fd39c4779b..ed2ec6466f 100644 --- a/src/script_opt/FuncInfo.cc +++ b/src/script_opt/FuncInfo.cc @@ -344,6 +344,7 @@ static std::unordered_map func_attrs = { {"lookup_addr", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_autonomous_system", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_connection", ATTR_NO_ZEEK_SIDE_EFFECTS}, + {"lookup_connection_analyzer_id", ATTR_NO_ZEEK_SIDE_EFFECTS}, {"lookup_hostname", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_hostname_txt", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_location", ATTR_NO_SCRIPT_SIDE_EFFECTS}, diff --git a/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output b/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output index 4fab29b90a..542c7fe5c1 100644 --- a/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output +++ b/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -551 seen BiFs, 0 unseen BiFs (), 0 new BiFs () +534 seen BiFs, 0 unseen BiFs (), 0 new BiFs () diff --git a/testing/btest/opt/ZAM-bif-tracking.zeek b/testing/btest/opt/ZAM-bif-tracking.zeek index 829f2d22d9..a38bcc5e6d 100644 --- a/testing/btest/opt/ZAM-bif-tracking.zeek +++ b/testing/btest/opt/ZAM-bif-tracking.zeek @@ -8,557 +8,540 @@ # As new ones are added or old ones removed, attend to updating FuncInfo.cc # for ZAM, and then update the list here. global known_BiFs = set( - "Analyzer::__disable_all_analyzers", - "Analyzer::__disable_analyzer", - "Analyzer::__enable_analyzer", - "Analyzer::__has_tag", - "Analyzer::__name", - "Analyzer::__register_for_port", - "Analyzer::__schedule_analyzer", - "Analyzer::__tag", - "Broker::__append", - "Broker::__auto_publish", - "Broker::__auto_unpublish", - "Broker::__clear", - "Broker::__close", - "Broker::__create_clone", - "Broker::__create_master", - "Broker::__data", - "Broker::__data_type", - "Broker::__decrement", - "Broker::__erase", - "Broker::__exists", - "Broker::__flush_logs", - "Broker::__forward", - "Broker::__get", - "Broker::__get_index_from_value", - "Broker::__increment", - "Broker::__insert_into_set", - "Broker::__insert_into_table", - "Broker::__is_closed", - "Broker::__keys", - "Broker::__listen", - "Broker::__node_id", - "Broker::__opaque_clone_through_serialization", - "Broker::__peer", - "Broker::__peer_no_retry", - "Broker::__peers", - "Broker::__pop", - "Broker::__publish_id", - "Broker::__push", - "Broker::__put", - "Broker::__put_unique", - "Broker::__record_assign", - "Broker::__record_create", - "Broker::__record_iterator", - "Broker::__record_iterator_last", - "Broker::__record_iterator_next", - "Broker::__record_iterator_value", - "Broker::__record_lookup", - "Broker::__record_size", - "Broker::__remove_from", - "Broker::__set_clear", - "Broker::__set_contains", - "Broker::__set_create", - "Broker::__set_insert", - "Broker::__set_iterator", - "Broker::__set_iterator_last", - "Broker::__set_iterator_next", - "Broker::__set_iterator_value", - "Broker::__set_metrics_export_endpoint_name", - "Broker::__set_metrics_export_interval", - "Broker::__set_metrics_export_prefixes", - "Broker::__set_metrics_export_topic", - "Broker::__set_metrics_import_topics", - "Broker::__set_remove", - "Broker::__set_size", - "Broker::__store_name", - "Broker::__subscribe", - "Broker::__table_clear", - "Broker::__table_contains", - "Broker::__table_create", - "Broker::__table_insert", - "Broker::__table_iterator", - "Broker::__table_iterator_last", - "Broker::__table_iterator_next", - "Broker::__table_iterator_value", - "Broker::__table_lookup", - "Broker::__table_remove", - "Broker::__table_size", - "Broker::__unpeer", - "Broker::__unsubscribe", - "Broker::__vector_clear", - "Broker::__vector_create", - "Broker::__vector_insert", - "Broker::__vector_iterator", - "Broker::__vector_iterator_last", - "Broker::__vector_iterator_next", - "Broker::__vector_iterator_value", - "Broker::__vector_lookup", - "Broker::__vector_remove", - "Broker::__vector_replace", - "Broker::__vector_size", - "Broker::make_event", - "Broker::publish", - "Cluster::publish_hrw", - "Cluster::publish_rr", - "FileExtract::__set_limit", - "Files::__add_analyzer", - "Files::__analyzer_enabled", - "Files::__analyzer_name", - "Files::__disable_analyzer", - "Files::__disable_reassembly", - "Files::__enable_analyzer", - "Files::__enable_reassembly", - "Files::__file_exists", - "Files::__lookup_file", - "Files::__remove_analyzer", - "Files::__set_reassembly_buffer", - "Files::__set_timeout_interval", - "Files::__stop", - "Input::__create_analysis_stream", - "Input::__create_event_stream", - "Input::__create_table_stream", - "Input::__force_update", - "Input::__remove_stream", - "Log::__add_filter", - "Log::__create_stream", - "Log::__delay", - "Log::__delay_finish", - "Log::__disable_stream", - "Log::__enable_stream", - "Log::__flush", - "Log::__get_delay_queue_size", - "Log::__remove_filter", - "Log::__remove_stream", - "Log::__set_buf", - "Log::__set_max_delay_interval", - "Log::__set_max_delay_queue_size", - "Log::__write", - "Option::any_set_to_any_vec", - "Option::set", - "Option::set_change_handler", - "PacketAnalyzer::GTPV1::remove_gtpv1_connection", - "PacketAnalyzer::TEREDO::remove_teredo_connection", - "PacketAnalyzer::__disable_analyzer", - "PacketAnalyzer::__enable_analyzer", - "PacketAnalyzer::__set_ignore_checksums_nets", - "PacketAnalyzer::register_packet_analyzer", - "PacketAnalyzer::register_protocol_detection", - "PacketAnalyzer::try_register_packet_analyzer_by_name", - "Pcap::error", - "Pcap::findalldevs", - "Pcap::get_filter_state", - "Pcap::get_filter_state_string", - "Pcap::install_pcap_filter", - "Pcap::precompile_pcap_filter", - "Reporter::conn_weird", - "Reporter::error", - "Reporter::fatal", - "Reporter::fatal_error_with_core", - "Reporter::file_weird", - "Reporter::flow_weird", - "Reporter::get_weird_sampling_duration", - "Reporter::get_weird_sampling_global_list", - "Reporter::get_weird_sampling_rate", - "Reporter::get_weird_sampling_threshold", - "Reporter::get_weird_sampling_whitelist", - "Reporter::info", - "Reporter::net_weird", - "Reporter::set_weird_sampling_duration", - "Reporter::set_weird_sampling_global_list", - "Reporter::set_weird_sampling_rate", - "Reporter::set_weird_sampling_threshold", - "Reporter::set_weird_sampling_whitelist", - "Reporter::warning", - "Spicy::__resource_usage", - "Spicy::__toggle_analyzer", - "Supervisor::__create", - "Supervisor::__destroy", - "Supervisor::__init_cluster", - "Supervisor::__is_supervised", - "Supervisor::__is_supervisor", - "Supervisor::__node", - "Supervisor::__restart", - "Supervisor::__status", - "Supervisor::__stem_pid", - "Telemetry::__collect_histogram_metrics", - "Telemetry::__collect_metrics", - "Telemetry::__dbl_counter_family", - "Telemetry::__dbl_counter_inc", - "Telemetry::__dbl_counter_metric_get_or_add", - "Telemetry::__dbl_counter_value", - "Telemetry::__dbl_gauge_dec", - "Telemetry::__dbl_gauge_family", - "Telemetry::__dbl_gauge_inc", - "Telemetry::__dbl_gauge_metric_get_or_add", - "Telemetry::__dbl_gauge_value", - "Telemetry::__dbl_histogram_family", - "Telemetry::__dbl_histogram_metric_get_or_add", - "Telemetry::__dbl_histogram_observe", - "Telemetry::__dbl_histogram_sum", - "Telemetry::__int_counter_family", - "Telemetry::__int_counter_inc", - "Telemetry::__int_counter_metric_get_or_add", - "Telemetry::__int_counter_value", - "Telemetry::__int_gauge_dec", - "Telemetry::__int_gauge_family", - "Telemetry::__int_gauge_inc", - "Telemetry::__int_gauge_metric_get_or_add", - "Telemetry::__int_gauge_value", - "Telemetry::__int_histogram_family", - "Telemetry::__int_histogram_metric_get_or_add", - "Telemetry::__int_histogram_observe", - "Telemetry::__int_histogram_sum", + "Analyzer::__disable_all_analyzers", + "Analyzer::__disable_analyzer", + "Analyzer::__enable_analyzer", + "Analyzer::__has_tag", + "Analyzer::__name", + "Analyzer::__register_for_port", + "Analyzer::__schedule_analyzer", + "Analyzer::__tag", + "Broker::__append", + "Broker::__auto_publish", + "Broker::__auto_unpublish", + "Broker::__clear", + "Broker::__close", + "Broker::__create_clone", + "Broker::__create_master", + "Broker::__data", + "Broker::__data_type", + "Broker::__decrement", + "Broker::__erase", + "Broker::__exists", + "Broker::__flush_logs", + "Broker::__forward", + "Broker::__get", + "Broker::__get_index_from_value", + "Broker::__increment", + "Broker::__insert_into_set", + "Broker::__insert_into_table", + "Broker::__is_closed", + "Broker::__keys", + "Broker::__listen", + "Broker::__node_id", + "Broker::__opaque_clone_through_serialization", + "Broker::__peer", + "Broker::__peer_no_retry", + "Broker::__peers", + "Broker::__pop", + "Broker::__publish_id", + "Broker::__push", + "Broker::__put", + "Broker::__put_unique", + "Broker::__record_assign", + "Broker::__record_create", + "Broker::__record_iterator", + "Broker::__record_iterator_last", + "Broker::__record_iterator_next", + "Broker::__record_iterator_value", + "Broker::__record_lookup", + "Broker::__record_size", + "Broker::__remove_from", + "Broker::__set_clear", + "Broker::__set_contains", + "Broker::__set_create", + "Broker::__set_insert", + "Broker::__set_iterator", + "Broker::__set_iterator_last", + "Broker::__set_iterator_next", + "Broker::__set_iterator_value", + "Broker::__set_remove", + "Broker::__set_size", + "Broker::__store_name", + "Broker::__subscribe", + "Broker::__table_clear", + "Broker::__table_contains", + "Broker::__table_create", + "Broker::__table_insert", + "Broker::__table_iterator", + "Broker::__table_iterator_last", + "Broker::__table_iterator_next", + "Broker::__table_iterator_value", + "Broker::__table_lookup", + "Broker::__table_remove", + "Broker::__table_size", + "Broker::__unpeer", + "Broker::__unsubscribe", + "Broker::__vector_clear", + "Broker::__vector_create", + "Broker::__vector_insert", + "Broker::__vector_iterator", + "Broker::__vector_iterator_last", + "Broker::__vector_iterator_next", + "Broker::__vector_iterator_value", + "Broker::__vector_lookup", + "Broker::__vector_remove", + "Broker::__vector_replace", + "Broker::__vector_size", + "Broker::make_event", + "Broker::publish", + "Cluster::publish_hrw", + "Cluster::publish_rr", + "FileExtract::__set_limit", + "Files::__add_analyzer", + "Files::__analyzer_enabled", + "Files::__analyzer_name", + "Files::__disable_analyzer", + "Files::__disable_reassembly", + "Files::__enable_analyzer", + "Files::__enable_reassembly", + "Files::__file_exists", + "Files::__lookup_file", + "Files::__remove_analyzer", + "Files::__set_reassembly_buffer", + "Files::__set_timeout_interval", + "Files::__stop", + "Input::__create_analysis_stream", + "Input::__create_event_stream", + "Input::__create_table_stream", + "Input::__force_update", + "Input::__remove_stream", + "Log::__add_filter", + "Log::__create_stream", + "Log::__delay", + "Log::__delay_finish", + "Log::__disable_stream", + "Log::__enable_stream", + "Log::__flush", + "Log::__get_delay_queue_size", + "Log::__remove_filter", + "Log::__remove_stream", + "Log::__set_buf", + "Log::__set_max_delay_interval", + "Log::__set_max_delay_queue_size", + "Log::__write", + "Option::any_set_to_any_vec", + "Option::set", + "Option::set_change_handler", + "PacketAnalyzer::GTPV1::remove_gtpv1_connection", + "PacketAnalyzer::TEREDO::remove_teredo_connection", + "PacketAnalyzer::__disable_analyzer", + "PacketAnalyzer::__enable_analyzer", + "PacketAnalyzer::__set_ignore_checksums_nets", + "PacketAnalyzer::register_packet_analyzer", + "PacketAnalyzer::register_protocol_detection", + "PacketAnalyzer::try_register_packet_analyzer_by_name", + "Pcap::error", + "Pcap::findalldevs", + "Pcap::get_filter_state", + "Pcap::get_filter_state_string", + "Pcap::install_pcap_filter", + "Pcap::precompile_pcap_filter", + "Reporter::conn_weird", + "Reporter::error", + "Reporter::fatal", + "Reporter::fatal_error_with_core", + "Reporter::file_weird", + "Reporter::flow_weird", + "Reporter::get_weird_sampling_duration", + "Reporter::get_weird_sampling_global_list", + "Reporter::get_weird_sampling_rate", + "Reporter::get_weird_sampling_threshold", + "Reporter::get_weird_sampling_whitelist", + "Reporter::info", + "Reporter::net_weird", + "Reporter::set_weird_sampling_duration", + "Reporter::set_weird_sampling_global_list", + "Reporter::set_weird_sampling_rate", + "Reporter::set_weird_sampling_threshold", + "Reporter::set_weird_sampling_whitelist", + "Reporter::warning", + "Spicy::__resource_usage", + "Spicy::__toggle_analyzer", + "Supervisor::__create", + "Supervisor::__destroy", + "Supervisor::__init_cluster", + "Supervisor::__is_supervised", + "Supervisor::__is_supervisor", + "Supervisor::__node", + "Supervisor::__restart", + "Supervisor::__status", + "Supervisor::__stem_pid", + "Telemetry::__collect_histogram_metrics", + "Telemetry::__collect_metrics", + "Telemetry::__counter_family", + "Telemetry::__counter_inc", + "Telemetry::__counter_metric_get_or_add", + "Telemetry::__counter_value", + "Telemetry::__gauge_dec", + "Telemetry::__gauge_family", + "Telemetry::__gauge_inc", + "Telemetry::__gauge_metric_get_or_add", + "Telemetry::__gauge_value", + "Telemetry::__histogram_family", + "Telemetry::__histogram_metric_get_or_add", + "Telemetry::__histogram_observe", + "Telemetry::__histogram_sum", "WebSocket::__configure_analyzer", - "__init_primary_bifs", - "__init_secondary_bifs", - "active_file", - "addr_to_counts", - "addr_to_ptr_name", - "addr_to_subnet", - "all_set", - "anonymize_addr", - "any_set", - "backtrace", - "bare_mode", - "bloomfilter_add", - "bloomfilter_basic_init", - "bloomfilter_basic_init2", - "bloomfilter_clear", - "bloomfilter_counting_init", - "bloomfilter_decrement", - "bloomfilter_internal_state", - "bloomfilter_intersect", - "bloomfilter_lookup", - "bloomfilter_merge", - "bytestring_to_count", - "bytestring_to_double", - "bytestring_to_float", - "bytestring_to_hexstr", - "calc_next_rotate", - "cat", - "cat_sep", - "ceil", - "check_subnet", - "clean", - "clear_table", - "close", - "community_id_v1", - "compress_path", - "connection_exists", - "continue_processing", - "convert_for_pattern", - "count_substr", - "count_to_double", - "count_to_port", - "count_to_v4_addr", - "counts_to_addr", - "current_analyzer", - "current_event_time", - "current_time", - "decode_base64", - "decode_base64_conn", - "decode_netbios_name", - "decode_netbios_name_type", - "disable_analyzer", - "disable_event_group", - "disable_module_events", - "do_profiling", - "double_to_count", - "double_to_int", - "double_to_interval", - "double_to_time", - "dump_current_packet", - "dump_packet", - "dump_rule_stats", - "edit", - "enable_event_group", - "enable_module_events", - "enable_raw_output", - "encode_base64", - "ends_with", - "entropy_test_add", - "entropy_test_finish", - "entropy_test_init", - "enum_names", - "enum_to_int", - "escape_string", - "exit", - "exp", - "file_magic", - "file_mode", - "file_size", - "filter_subnet_table", - "find_all", - "find_all_ordered", - "find_entropy", + "__init_primary_bifs", + "__init_secondary_bifs", + "active_file", + "addr_to_counts", + "addr_to_ptr_name", + "addr_to_subnet", + "all_set", + "anonymize_addr", + "any_set", + "backtrace", + "bare_mode", + "bloomfilter_add", + "bloomfilter_basic_init", + "bloomfilter_basic_init2", + "bloomfilter_clear", + "bloomfilter_counting_init", + "bloomfilter_decrement", + "bloomfilter_internal_state", + "bloomfilter_intersect", + "bloomfilter_lookup", + "bloomfilter_merge", + "bytestring_to_count", + "bytestring_to_double", + "bytestring_to_float", + "bytestring_to_hexstr", + "calc_next_rotate", + "cat", + "cat_sep", + "ceil", + "check_subnet", + "clean", + "clear_table", + "close", + "community_id_v1", + "compress_path", + "connection_exists", + "continue_processing", + "convert_for_pattern", + "count_substr", + "count_to_double", + "count_to_port", + "count_to_v4_addr", + "counts_to_addr", + "current_analyzer", + "current_event_time", + "current_time", + "decode_base64", + "decode_base64_conn", + "decode_netbios_name", + "decode_netbios_name_type", + "disable_analyzer", + "disable_event_group", + "disable_module_events", + "do_profiling", + "double_to_count", + "double_to_int", + "double_to_interval", + "double_to_time", + "dump_current_packet", + "dump_packet", + "dump_rule_stats", + "edit", + "enable_event_group", + "enable_module_events", + "enable_raw_output", + "encode_base64", + "ends_with", + "entropy_test_add", + "entropy_test_finish", + "entropy_test_init", + "enum_names", + "enum_to_int", + "escape_string", + "exit", + "exp", + "file_magic", + "file_mode", + "file_size", + "filter_subnet_table", + "find_all", + "find_all_ordered", + "find_entropy", "find_in_zeekpath", - "find_last", - "find_str", - "floor", - "flush_all", - "fmt", - "fmt_ftp_port", - "fnv1a32", - "from_json", - "generate_all_events", - "get_broker_stats", - "get_conn_stats", - "get_conn_transport_proto", - "get_contents_file", - "get_current_conn_bytes_threshold", - "get_current_conn_duration_threshold", - "get_current_conn_packets_threshold", - "get_current_packet", - "get_current_packet_header", - "get_dns_stats", - "get_event_handler_stats", - "get_event_stats", - "get_file_analysis_stats", - "get_file_name", - "get_gap_stats", - "get_identifier_comments", - "get_identifier_declaring_script", - "get_login_state", - "get_matcher_stats", - "get_net_stats", - "get_orig_seq", - "get_package_readme", - "get_port_transport_proto", - "get_proc_stats", - "get_reassembler_stats", - "get_record_field_comments", - "get_record_field_declaring_script", - "get_reporter_stats", - "get_resp_seq", - "get_script_comments", - "get_thread_stats", - "get_timer_stats", - "getenv", - "gethostname", - "getpid", - "global_container_footprints", - "global_ids", - "global_options", - "gsub", - "has_event_group", - "has_module_events", - "have_spicy", - "have_spicy_analyzers", - "haversine_distance", - "hexdump", - "hexstr_to_bytestring", - "hll_cardinality_add", - "hll_cardinality_copy", - "hll_cardinality_estimate", - "hll_cardinality_init", - "hll_cardinality_merge_into", - "hrw_weight", - "identify_data", - "install_dst_addr_filter", - "install_dst_net_filter", - "install_src_addr_filter", - "install_src_net_filter", - "int_to_count", - "int_to_double", - "interval_to_double", - "is_alnum", - "is_alpha", - "is_ascii", - "is_file_analyzer", - "is_icmp_port", - "is_local_interface", - "is_num", - "is_packet_analyzer", - "is_processing_suspended", - "is_protocol_analyzer", - "is_remote_event", - "is_tcp_port", - "is_udp_port", - "is_v4_addr", - "is_v4_subnet", - "is_v6_addr", - "is_v6_subnet", - "is_valid_ip", - "join_string_set", - "join_string_vec", - "levenshtein_distance", - "ljust", - "ln", - "load_CPP", - "log10", - "log2", - "lookup_ID", - "lookup_addr", - "lookup_autonomous_system", - "lookup_connection", - "lookup_hostname", - "lookup_hostname_txt", - "lookup_location", - "lstrip", - "mask_addr", - "match_signatures", - "matching_subnets", - "md5_hash", - "md5_hash_finish", - "md5_hash_init", - "md5_hash_update", - "md5_hmac", - "mkdir", - "mmdb_open_asn_db", - "mmdb_open_location_db", - "network_time", - "open", - "open_for_append", - "order", - "packet_source", - "paraglob_equals", - "paraglob_init", - "paraglob_match", - "parse_distinguished_name", - "parse_eftp_port", - "parse_ftp_epsv", - "parse_ftp_pasv", - "parse_ftp_port", - "piped_exec", - "port_to_count", - "pow", - "preserve_prefix", - "preserve_subnet", - "print_raw", - "ptr_name_to_addr", - "rand", - "raw_bytes_to_v4_addr", - "raw_bytes_to_v6_addr", - "reading_live_traffic", - "reading_traces", - "record_fields", - "record_type_to_vector", - "remask_addr", - "remove_prefix", - "remove_suffix", - "rename", - "resize", - "reverse", - "rfind_str", - "rjust", - "rmdir", - "rotate_file", - "rotate_file_by_name", - "routing0_data_to_addrs", - "rstrip", - "safe_shell_quote", - "same_object", - "sct_verify", - "set_buf", - "set_contents_file", - "set_current_conn_bytes_threshold", - "set_current_conn_duration_threshold", - "set_current_conn_packets_threshold", - "set_file_handle", - "set_inactivity_timeout", - "set_keys", - "set_login_state", - "set_network_time", - "set_record_packets", - "set_secret", - "set_ssl_established", - "setenv", - "sha1_hash", - "sha1_hash_finish", - "sha1_hash_init", - "sha1_hash_update", - "sha256_hash", - "sha256_hash_finish", - "sha256_hash_init", - "sha256_hash_update", - "skip_further_processing", - "skip_http_entity_data", - "skip_smtp_data", - "sort", - "split_string", - "split_string1", - "split_string_all", - "split_string_n", - "sqrt", - "srand", - "starts_with", - "str_smith_waterman", - "str_split_indices", - "strcmp", - "strftime", - "string_cat", - "string_fill", - "string_to_ascii_hex", - "string_to_pattern", - "strip", - "strptime", - "strstr", - "sub", - "sub_bytes", - "subnet_to_addr", - "subnet_width", - "subst_string", - "suspend_processing", - "swap_case", - "syslog", - "system", - "system_env", - "table_keys", - "table_pattern_matcher_stats", - "table_values", - "terminate", - "time_to_double", - "to_addr", - "to_count", - "to_double", - "to_int", - "to_json", - "to_lower", - "to_port", - "to_string_literal", - "to_subnet", - "to_title", - "to_upper", - "topk_add", - "topk_count", - "topk_epsilon", - "topk_get_top", - "topk_init", - "topk_merge", - "topk_merge_prune", - "topk_size", - "topk_sum", - "type_aliases", - "type_name", - "unescape_URI", - "uninstall_dst_addr_filter", - "uninstall_dst_net_filter", - "uninstall_src_addr_filter", - "uninstall_src_net_filter", - "unique_id", - "unique_id_from", - "unlink", - "uuid_to_string", - "val_footprint", - "write_file", - "x509_check_cert_hostname", - "x509_check_hostname", - "x509_from_der", - "x509_get_certificate_string", - "x509_issuer_name_hash", - "x509_ocsp_verify", - "x509_parse", - "x509_set_certificate_cache", - "x509_set_certificate_cache_hit_callback", - "x509_spki_hash", - "x509_subject_name_hash", - "x509_verify", - "zeek_args", - "zeek_is_terminating", - "zeek_version", - "zfill", + "find_last", + "find_str", + "floor", + "flush_all", + "fmt", + "fmt_ftp_port", + "fnv1a32", + "from_json", + "generate_all_events", + "get_broker_stats", + "get_conn_stats", + "get_conn_transport_proto", + "get_contents_file", + "get_current_conn_bytes_threshold", + "get_current_conn_duration_threshold", + "get_current_conn_packets_threshold", + "get_current_packet", + "get_current_packet_header", + "get_dns_stats", + "get_event_handler_stats", + "get_event_stats", + "get_file_analysis_stats", + "get_file_name", + "get_gap_stats", + "get_identifier_comments", + "get_identifier_declaring_script", + "get_login_state", + "get_matcher_stats", + "get_net_stats", + "get_orig_seq", + "get_package_readme", + "get_port_transport_proto", + "get_proc_stats", + "get_reassembler_stats", + "get_record_field_comments", + "get_record_field_declaring_script", + "get_reporter_stats", + "get_resp_seq", + "get_script_comments", + "get_thread_stats", + "get_timer_stats", + "getenv", + "gethostname", + "getpid", + "global_container_footprints", + "global_ids", + "global_options", + "gsub", + "has_event_group", + "has_module_events", + "have_spicy", + "have_spicy_analyzers", + "haversine_distance", + "hexdump", + "hexstr_to_bytestring", + "hll_cardinality_add", + "hll_cardinality_copy", + "hll_cardinality_estimate", + "hll_cardinality_init", + "hll_cardinality_merge_into", + "hrw_weight", + "identify_data", + "install_dst_addr_filter", + "install_dst_net_filter", + "install_src_addr_filter", + "install_src_net_filter", + "int_to_count", + "int_to_double", + "interval_to_double", + "is_alnum", + "is_alpha", + "is_ascii", + "is_file_analyzer", + "is_icmp_port", + "is_local_interface", + "is_num", + "is_packet_analyzer", + "is_processing_suspended", + "is_protocol_analyzer", + "is_remote_event", + "is_tcp_port", + "is_udp_port", + "is_v4_addr", + "is_v4_subnet", + "is_v6_addr", + "is_v6_subnet", + "is_valid_ip", + "join_string_set", + "join_string_vec", + "levenshtein_distance", + "ljust", + "ln", + "load_CPP", + "log10", + "log2", + "lookup_ID", + "lookup_addr", + "lookup_autonomous_system", + "lookup_connection", + "lookup_connection_analyzer_id", + "lookup_hostname", + "lookup_hostname_txt", + "lookup_location", + "lstrip", + "mask_addr", + "match_signatures", + "matching_subnets", + "md5_hash", + "md5_hash_finish", + "md5_hash_init", + "md5_hash_update", + "md5_hmac", + "mkdir", + "mmdb_open_asn_db", + "mmdb_open_location_db", + "network_time", + "open", + "open_for_append", + "order", + "packet_source", + "paraglob_equals", + "paraglob_init", + "paraglob_match", + "parse_distinguished_name", + "parse_eftp_port", + "parse_ftp_epsv", + "parse_ftp_pasv", + "parse_ftp_port", + "piped_exec", + "port_to_count", + "pow", + "preserve_prefix", + "preserve_subnet", + "print_raw", + "ptr_name_to_addr", + "rand", + "raw_bytes_to_v4_addr", + "raw_bytes_to_v6_addr", + "reading_live_traffic", + "reading_traces", + "record_fields", + "record_type_to_vector", + "remask_addr", + "remove_prefix", + "remove_suffix", + "rename", + "resize", + "reverse", + "rfind_str", + "rjust", + "rmdir", + "rotate_file", + "rotate_file_by_name", + "routing0_data_to_addrs", + "rstrip", + "safe_shell_quote", + "same_object", + "sct_verify", + "set_buf", + "set_contents_file", + "set_current_conn_bytes_threshold", + "set_current_conn_duration_threshold", + "set_current_conn_packets_threshold", + "set_file_handle", + "set_inactivity_timeout", + "set_keys", + "set_login_state", + "set_network_time", + "set_record_packets", + "set_secret", + "set_ssl_established", + "setenv", + "sha1_hash", + "sha1_hash_finish", + "sha1_hash_init", + "sha1_hash_update", + "sha256_hash", + "sha256_hash_finish", + "sha256_hash_init", + "sha256_hash_update", + "skip_further_processing", + "skip_http_entity_data", + "skip_smtp_data", + "sort", + "split_string", + "split_string1", + "split_string_all", + "split_string_n", + "sqrt", + "srand", + "starts_with", + "str_smith_waterman", + "str_split_indices", + "strcmp", + "strftime", + "string_cat", + "string_fill", + "string_to_ascii_hex", + "string_to_pattern", + "strip", + "strptime", + "strstr", + "sub", + "sub_bytes", + "subnet_to_addr", + "subnet_width", + "subst_string", + "suspend_processing", + "swap_case", + "syslog", + "system", + "system_env", + "table_keys", + "table_pattern_matcher_stats", + "table_values", + "terminate", + "time_to_double", + "to_addr", + "to_count", + "to_double", + "to_int", + "to_json", + "to_lower", + "to_port", + "to_string_literal", + "to_subnet", + "to_title", + "to_upper", + "topk_add", + "topk_count", + "topk_epsilon", + "topk_get_top", + "topk_init", + "topk_merge", + "topk_merge_prune", + "topk_size", + "topk_sum", + "type_aliases", + "type_name", + "unescape_URI", + "uninstall_dst_addr_filter", + "uninstall_dst_net_filter", + "uninstall_src_addr_filter", + "uninstall_src_net_filter", + "unique_id", + "unique_id_from", + "unlink", + "uuid_to_string", + "val_footprint", + "write_file", + "x509_check_cert_hostname", + "x509_check_hostname", + "x509_from_der", + "x509_get_certificate_string", + "x509_issuer_name_hash", + "x509_ocsp_verify", + "x509_parse", + "x509_set_certificate_cache", + "x509_set_certificate_cache_hit_callback", + "x509_spki_hash", + "x509_subject_name_hash", + "x509_verify", + "zeek_args", + "zeek_is_terminating", + "zeek_version", + "zfill", ); function fmt_str_set(s: set[string]): string From 0ee28866a17da091361bda17c9f56656891c8a31 Mon Sep 17 00:00:00 2001 From: Vern Paxson Date: Sat, 1 Jun 2024 12:23:46 -0700 Subject: [PATCH 05/89] script optimization baseline tweaks due to recent minor changes --- .../bifs.disable_analyzer-invalid-aid/out | 35 +++++++++++++++++++ .../broker.store.create-failure/zeek.err | 14 ++++---- .../bifs.disable_analyzer-for-conn-2/output | 1 + .../bifs.disable_analyzer-for-conn-3/output | 1 + .../bifs.disable_analyzer-for-conn/output | 3 ++ .../bifs.disable_analyzer-invalid-aid/out | 35 +++++++++++++++++++ 6 files changed, 82 insertions(+), 7 deletions(-) create mode 100644 testing/btest/Baseline.cpp/bifs.disable_analyzer-invalid-aid/out create mode 100644 testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-2/output create mode 100644 testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-3/output create mode 100644 testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn/output create mode 100644 testing/btest/Baseline.zam/bifs.disable_analyzer-invalid-aid/out diff --git a/testing/btest/Baseline.cpp/bifs.disable_analyzer-invalid-aid/out b/testing/btest/Baseline.cpp/bifs.disable_analyzer-invalid-aid/out new file mode 100644 index 0000000000..47ad8f519f --- /dev/null +++ b/testing/btest/Baseline.cpp/bifs.disable_analyzer-invalid-aid/out @@ -0,0 +1,35 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) diff --git a/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err b/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err index 3e3ab66b88..2e4a0a075b 100644 --- a/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err +++ b/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err @@ -1,11 +1,11 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. error in <...>/create-failure.zeek (C++), line 61: Failed to attach master store backend_failure: (<___>testing_btest__tmp_broker_store_create_failure_create_failure_zeek__zeek_init__36__zf()) error in <...>/create-failure.zeek (C++), line 61: Could not create Broker master store '../fail' (<___>testing_btest__tmp_broker_store_create_failure_create_failure_zeek__zeek_init__36__zf()) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) received termination signal diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-2/output b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-2/output new file mode 100644 index 0000000000..49d861c74c --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-2/output @@ -0,0 +1 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-3/output b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-3/output new file mode 100644 index 0000000000..49d861c74c --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-3/output @@ -0,0 +1 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn/output b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn/output new file mode 100644 index 0000000000..010cfa58e2 --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn/output @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +AllAnalyzers::ANALYZER_ANALYZER_HTTP +AllAnalyzers::ANALYZER_ANALYZER_HTTP diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-invalid-aid/out b/testing/btest/Baseline.zam/bifs.disable_analyzer-invalid-aid/out new file mode 100644 index 0000000000..6279bf0504 --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-invalid-aid/out @@ -0,0 +1,35 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) From 00b24b043a90239ecd37af85bf65316a204e702a Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Fri, 31 May 2024 16:36:58 -0700 Subject: [PATCH 06/89] Set running_under_test for scripts.base.frameworks.logging.telemetry test --- testing/btest/scripts/base/frameworks/logging/telemetry.zeek | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testing/btest/scripts/base/frameworks/logging/telemetry.zeek b/testing/btest/scripts/base/frameworks/logging/telemetry.zeek index a71ffd7d00..2ed089756b 100644 --- a/testing/btest/scripts/base/frameworks/logging/telemetry.zeek +++ b/testing/btest/scripts/base/frameworks/logging/telemetry.zeek @@ -8,6 +8,9 @@ @load policy/frameworks/telemetry/log +# Force telemetry output to be sorted for test determinism +redef running_under_test = T; + global http_logs = 0; hook HTTP::log_policy(rec: HTTP::Info, id: Log::ID, filter: Log::Filter) { From f55c0a5292757ff08e1fa38a91cfc43ee666db10 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Fri, 31 May 2024 17:02:06 -0700 Subject: [PATCH 07/89] Fix race condition by pre-building the cluster json data for services.json --- src/telemetry/Manager.cc | 11 +++++++---- src/telemetry/Manager.h | 8 ++++---- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index 8b0a037e74..d414b20667 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -44,6 +44,8 @@ void Manager::InitPostScript() { static auto manager_type = node_type_type->Lookup("Cluster", "MANAGER"); if ( local_node_type == manager_type ) { + BuildClusterJson(); + callbacks = new CivetCallbacks(); callbacks->begin_request = [](struct mg_connection* conn) -> int { // Handle the services.json request ourselves by building up a response based on @@ -414,7 +416,7 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st return ret_val; } -std::string Manager::GetClusterJson() const { +void Manager::BuildClusterJson() { rapidjson::StringBuffer buffer; json::detail::NullDoubleWriter writer(buffer); @@ -423,8 +425,9 @@ std::string Manager::GetClusterJson() const { writer.Key("targets"); writer.StartArray(); - auto cluster_nodes = id::find_val("Cluster::nodes")->AsTableVal()->ToMap(); - for ( const auto& [idx, value] : cluster_nodes ) { + auto& node_val = id::find_val("Cluster::nodes"); + auto node_map = node_val->AsTableVal()->ToMap(); + for ( const auto& [idx, value] : node_map ) { auto node = value->AsRecordVal(); auto ip = node->GetField("ip"); auto port = node->GetField("metrics_port"); @@ -440,7 +443,7 @@ std::string Manager::GetClusterJson() const { writer.EndObject(); writer.EndArray(); - return buffer.GetString(); + cluster_json = buffer.GetString(); } CounterFamilyPtr Manager::CounterFamily(std::string_view prefix, std::string_view name, diff --git a/src/telemetry/Manager.h b/src/telemetry/Manager.h index 8a1deb5fc4..f81a39a6d5 100644 --- a/src/telemetry/Manager.h +++ b/src/telemetry/Manager.h @@ -200,7 +200,7 @@ public: * @return A JSON description of the cluster configuration for reporting * to Prometheus for service discovery requests. */ - std::string GetClusterJson() const; + std::string GetClusterJson() const { return cluster_json; } /** * @return The pointer to the prometheus-cpp registry used by the telemetry @@ -230,6 +230,7 @@ protected: private: RecordValPtr GetMetricOptsRecord(const prometheus::MetricFamily& metric_family); + void BuildClusterJson(); std::map> families; std::map opts_records; @@ -242,11 +243,10 @@ private: GaugePtr cpu_gauge; GaugePtr fds_gauge; - std::string endpoint_name; - std::vector export_prefixes; - std::shared_ptr prometheus_registry; std::unique_ptr prometheus_exposer; + + std::string cluster_json; }; } // namespace zeek::telemetry From 87717fed0a99e340a0063cfaed9a8c5aa96a2dd0 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Fri, 31 May 2024 18:21:51 -0700 Subject: [PATCH 08/89] Remove prefix column from telemetry.log --- scripts/policy/frameworks/telemetry/log.zeek | 8 -------- .../telemetry.log | 16 ++++++++-------- .../telemetry.log | 6 +++--- .../telemetry_histogram.log | 8 ++++---- .../telemetry.log.filtered | 8 ++++---- .../telemetry_histogram.log.filtered | 4 ++-- .../base/frameworks/logging/telemetry.zeek | 2 +- 7 files changed, 22 insertions(+), 30 deletions(-) diff --git a/scripts/policy/frameworks/telemetry/log.zeek b/scripts/policy/frameworks/telemetry/log.zeek index 935b92cefa..d29dc896d7 100644 --- a/scripts/policy/frameworks/telemetry/log.zeek +++ b/scripts/policy/frameworks/telemetry/log.zeek @@ -33,9 +33,6 @@ export { ## the underlying metric type. metric_type: string &log; - ## The prefix (namespace) of the metric. - prefix: string &log; - ## The name of the metric. name: string &log; @@ -57,9 +54,6 @@ export { ## Peer that generated this log. peer: string &log; - ## The prefix (namespace) of the metric. - prefix: string &log; - ## The name of the metric. name: string &log; @@ -137,7 +131,6 @@ function do_log() local rec = Info($ts=ts, $peer=peer_description, $metric_type=metric_type, - $prefix=m$opts$prefix, $name=m$opts$name, $labels=m$opts$labels, $label_values=m$labels, @@ -168,7 +161,6 @@ function do_log() local hrec = HistogramInfo($ts=ts, $peer=peer_description, - $prefix=hm$opts$prefix, $name=hm$opts$name, $labels=hm$opts$labels, $label_values=hm$labels, diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log index 6e0e60a2f8..a84c0f505c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log @@ -5,12 +5,12 @@ #unset_field - #path telemetry #open XXXX-XX-XX-XX-XX-XX -#fields ts peer metric_type prefix name labels label_values value -#types time string string string string vector[string] vector[string] double -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_writer_writes_total writer,module,stream,filter-name,path default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_writer_writes_total writer,module,stream,filter-name,path default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_writer_writes_total writer,module,stream,filter-name,path default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0 +#fields ts peer metric_type name labels label_values value +#types time string string string vector[string] vector[string] double +XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0 #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log index d79fef633b..af06992a04 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log @@ -5,7 +5,7 @@ #unset_field - #path telemetry #open XXXX-XX-XX-XX-XX-XX -#fields ts peer metric_type prefix name labels label_values value -#types time string string string string vector[string] vector[string] double -XXXXXXXXXX.XXXXXX zeek counter btest btest_connections_total proto tcp 500.0 +#fields ts peer metric_type name labels label_values value +#types time string string string vector[string] vector[string] double +XXXXXXXXXX.XXXXXX zeek counter btest_connections_total proto tcp 500.0 #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log index 79adb57972..a30298db72 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log @@ -5,8 +5,8 @@ #unset_field - #path telemetry_histogram #open XXXX-XX-XX-XX-XX-XX -#fields ts peer prefix name labels label_values bounds values sum observations -#types time string string string vector[string] vector[string] vector[double] vector[double] double double -XXXXXXXXXX.XXXXXX zeek btest btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 -XXXXXXXXXX.XXXXXX zeek btest btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 +#fields ts peer name labels label_values bounds values sum observations +#types time string string vector[string] vector[string] vector[double] vector[double] double double +XXXXXXXXXX.XXXXXX zeek btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 +XXXXXXXXXX.XXXXXX zeek btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered index 19b4b90ddb..c7b26a1f28 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered @@ -1,5 +1,5 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_total_sessions_total protocol tcp 1.0 -XXXXXXXXXX.XXXXXX zeek gauge zeek zeek_active_sessions protocol tcp 1.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_total_sessions_total protocol tcp 500.0 -XXXXXXXXXX.XXXXXX zeek gauge zeek zeek_active_sessions protocol tcp 500.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_total_sessions_total protocol tcp 1.0 +XXXXXXXXXX.XXXXXX zeek gauge zeek_active_sessions protocol tcp 1.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_total_sessions_total protocol tcp 500.0 +XXXXXXXXXX.XXXXXX zeek gauge zeek_active_sessions protocol tcp 500.0 diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered index b382cd5ca6..d47ba69d07 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered @@ -1,3 +1,3 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -XXXXXXXXXX.XXXXXX zeek zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 -XXXXXXXXXX.XXXXXX zeek zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 +XXXXXXXXXX.XXXXXX zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 +XXXXXXXXXX.XXXXXX zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 diff --git a/testing/btest/scripts/base/frameworks/logging/telemetry.zeek b/testing/btest/scripts/base/frameworks/logging/telemetry.zeek index 2ed089756b..43c1ab641e 100644 --- a/testing/btest/scripts/base/frameworks/logging/telemetry.zeek +++ b/testing/btest/scripts/base/frameworks/logging/telemetry.zeek @@ -31,7 +31,7 @@ hook Log::log_stream_policy(rec: any, id: Log::ID) hook Telemetry::log_policy(rec: Telemetry::Info, id: Log::ID, filter: Log::Filter) { - if ( rec$prefix != "zeek" || /^zeek_log_/ !in rec$name ) + if ( /^zeek_log_/ !in rec$name ) break; if ( /HTTP|DNS|Conn/ !in cat(rec$label_values) ) From b1578d4dedcb57e4082fc25801bfafc10e29566f Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Fri, 31 May 2024 20:35:05 -0700 Subject: [PATCH 09/89] Ensure the order of label values matches the label names --- src/telemetry/Manager.cc | 46 ++++++++++++------- .../telemetry.log | 6 +-- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index d414b20667..e2c3276c7d 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -259,6 +259,32 @@ static bool compare_histograms(const std::optional& a, const std::optional return comparer(a, b, metric_record_type); } +static VectorValPtr build_label_values_vector(const std::vector& prom_labels, + const VectorValPtr& record_label_names) { + static auto string_vec_type = zeek::id::find_type("string_vec"); + auto label_values_vec = make_intrusive(string_vec_type); + + // This feels really bad, since it's an O(m*n) search to bulld the vector, + // but prometheus-cpp returns us a vector of labels and so we just have to + // search through it. + int i = 0; + for ( const auto& name : record_label_names->RawVec() ) { + auto n = name->AsString()->ToStdStringView(); + auto it = std::find_if(prom_labels.begin(), prom_labels.end(), + [n](const prometheus::ClientMetric::Label& l) { return l.name == n; }); + if ( it != prom_labels.end() ) + label_values_vec->Assign(i, make_intrusive(it->value)); + + // See the comment in GetMetricOptsRecord about how labels from non-Zeek + // metrics within the same family can have different labels from each + // other. In this case we might leave some fields null in the output. + + ++i; + } + + return label_values_vec; +} + ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view name_pattern) { static auto metrics_vector_type = zeek::id::find_type("Telemetry::MetricVector"); static auto string_vec_type = zeek::id::find_type("string_vec"); @@ -287,17 +313,11 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view continue; RecordValPtr opts_record = GetMetricOptsRecord(fam); + const auto& label_names = opts_record->GetField("labels"); for ( const auto& inst : fam.metric ) { - auto label_values_vec = make_intrusive(string_vec_type); - for ( const auto& label : inst.label ) { - // We don't include the endpoint key/value unless it's a prometheus request - if ( label.name != "endpoint" ) - label_values_vec->Append(make_intrusive(label.value)); - } - auto r = make_intrusive(metric_record_type); - r->Assign(labels_idx, label_values_vec); + r->Assign(labels_idx, build_label_values_vector(inst.label, label_names)); r->Assign(opts_idx, opts_record); if ( fam.type == prometheus::MetricType::Counter ) @@ -360,17 +380,11 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st continue; RecordValPtr opts_record = GetMetricOptsRecord(fam); + const auto& label_names = opts_record->GetField("labels"); for ( const auto& inst : fam.metric ) { - auto label_values_vec = make_intrusive(string_vec_type); - for ( const auto& label : inst.label ) { - // We don't include the endpoint key/value unless it's a prometheus request - if ( label.name != "endpoint" ) - label_values_vec->Append(make_intrusive(label.value)); - } - auto r = make_intrusive(histogram_metric_type); - r->Assign(labels_idx, label_values_vec); + r->Assign(labels_idx, build_label_values_vector(inst.label, label_names)); r->Assign(opts_idx, opts_record); auto double_values_vec = make_intrusive(double_vec_type); diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log index a84c0f505c..e06856304b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log @@ -10,7 +10,7 @@ XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0 -XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0 -XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0 -XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path Log::WRITER_ASCII,Conn,Conn::LOG,-,conn 30.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path Log::WRITER_ASCII,DNS,DNS::LOG,-,dns 23.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path Log::WRITER_ASCII,HTTP,HTTP::LOG,-,http 10.0 #close XXXX-XX-XX-XX-XX-XX From 433c2578866dcddcbe41e98c909582bb39eab5ec Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Sun, 2 Jun 2024 19:46:50 -0700 Subject: [PATCH 10/89] Move telmetry label names out of opts records, into main metric records --- scripts/base/frameworks/telemetry/main.zeek | 35 ++--- scripts/base/init-bare.zeek | 30 +++-- scripts/policy/frameworks/telemetry/log.zeek | 8 +- src/telemetry/Manager.cc | 121 ++++++------------ .../telemetry.log | 6 +- .../base/frameworks/telemetry/basic.zeek | 34 ++--- .../telemetry/conn-duration-histogram.zeek | 10 +- .../telemetry/event-handler-invocations.zeek | 4 +- testing/btest/scripts/base/utils/json.test | 12 +- .../frameworks/telemetry/log-prefixes.zeek | 4 +- 10 files changed, 114 insertions(+), 150 deletions(-) diff --git a/scripts/base/frameworks/telemetry/main.zeek b/scripts/base/frameworks/telemetry/main.zeek index 59e480a125..e41ef1b21e 100644 --- a/scripts/base/frameworks/telemetry/main.zeek +++ b/scripts/base/frameworks/telemetry/main.zeek @@ -36,7 +36,8 @@ export { }; ## Register a counter family. - global register_counter_family: function(opts: MetricOpts): CounterFamily; + global register_counter_family: function(opts: MetricOpts, + label_names: labels_vector &default=vector()): CounterFamily; ## Get a :zeek:see:`Telemetry::Counter` instance given family and label values. global counter_with: function(cf: CounterFamily, @@ -119,7 +120,8 @@ export { }; ## Register a gauge family. - global register_gauge_family: function(opts: MetricOpts): GaugeFamily; + global register_gauge_family: function(opts: MetricOpts, + label_names: labels_vector &default=vector()): GaugeFamily; ## Get a :zeek:see:`Telemetry::Gauge` instance given family and label values. @@ -215,7 +217,8 @@ export { }; ## Register a histogram family. - global register_histogram_family: function(opts: MetricOpts): HistogramFamily; + global register_histogram_family: function(opts: MetricOpts, + label_names: labels_vector &default=vector()): HistogramFamily; ## Get a :zeek:see:`Telemetry::Histogram` instance given family and label values. global histogram_with: function(hf: HistogramFamily, @@ -290,16 +293,16 @@ function make_labels(keys: vector of string, values: labels_vector): table[strin return labels; } -function register_counter_family(opts: MetricOpts): CounterFamily +function register_counter_family(opts: MetricOpts, label_names: labels_vector): CounterFamily { local f = Telemetry::__counter_family( opts$prefix, opts$name, - opts$labels, + label_names, opts$help_text, opts$unit ); - return CounterFamily($__family=f, $__labels=opts$labels); + return CounterFamily($__family=f, $__labels=label_names); } # Fallback Counter returned when there are issues with the labels. @@ -349,16 +352,16 @@ function counter_family_set(cf: CounterFamily, label_values: labels_vector, valu return counter_set(counter_with(cf, label_values), value); } -function register_gauge_family(opts: MetricOpts): GaugeFamily +function register_gauge_family(opts: MetricOpts, label_names: labels_vector): GaugeFamily { local f = Telemetry::__gauge_family( opts$prefix, opts$name, - opts$labels, + label_names, opts$help_text, opts$unit ); - return GaugeFamily($__family=f, $__labels=opts$labels); + return GaugeFamily($__family=f, $__labels=label_names); } # Fallback Gauge returned when there are issues with the label usage. @@ -417,17 +420,17 @@ function gauge_family_set(gf: GaugeFamily, label_values: labels_vector, value: d return gauge_set(gauge_with(gf, label_values), value); } -function register_histogram_family(opts: MetricOpts): HistogramFamily +function register_histogram_family(opts: MetricOpts, label_names: labels_vector): HistogramFamily { local f = Telemetry::__histogram_family( opts$prefix, opts$name, - opts$labels, + label_names, opts$bounds, opts$help_text, opts$unit ); - return HistogramFamily($__family=f, $__labels=opts$labels); + return HistogramFamily($__family=f, $__labels=label_names); } # Fallback Histogram when there are issues with the labels. @@ -483,10 +486,10 @@ global version_gauge_family = Telemetry::register_gauge_family([ $prefix="zeek", $name="version_info", $unit="", - $help_text="The Zeek version", - $labels=vector("version_number", "major", "minor", "patch", "commit", - "beta", "debug","version_string") -]); + $help_text="The Zeek version"], + vector("version_number", "major", "minor", "patch", "commit", + "beta", "debug","version_string") +); event zeek_init() { diff --git a/scripts/base/init-bare.zeek b/scripts/base/init-bare.zeek index dd47c55aad..97a2bf169f 100644 --- a/scripts/base/init-bare.zeek +++ b/scripts/base/init-bare.zeek @@ -5802,14 +5802,6 @@ export { ## Documentation for this metric. help_text: string &optional; - ## The label names (also called dimensions) of the metric. When - ## instantiating or working with concrete metrics, corresponding - ## label values have to be provided. Examples of a label might - ## be the protocol a general observation applies to, the - ## directionality in a traffic flow, or protocol-specific - ## context like a particular message type. - labels: vector of string &default=vector(); - ## Whether the metric represents something that is accumulating. ## Defaults to ``T`` for counters and ``F`` for gauges and ## histograms. @@ -5832,8 +5824,16 @@ export { ## A :zeek:see:`Telemetry::MetricOpts` record describing this metric. opts: MetricOpts; + ## The label names (also called dimensions) of the metric. When + ## instantiating or working with concrete metrics, corresponding + ## label values have to be provided. Examples of a label might + ## be the protocol a general observation applies to, the + ## directionality in a traffic flow, or protocol-specific + ## context like a particular message type. + label_names: vector of string &default=vector(); + ## The label values associated with this metric, if any. - labels: vector of string; + label_values: vector of string &optional; ## The value of gauge or counter cast to a double ## independent of the underlying data type. @@ -5847,8 +5847,16 @@ export { ## A :zeek:see:`Telemetry::MetricOpts` record describing this histogram. opts: MetricOpts; - ## The label values associated with this histogram, if any. - labels: vector of string; + ## The label names (also called dimensions) of the metric. When + ## instantiating or working with concrete metrics, corresponding + ## label values have to be provided. Examples of a label might + ## be the protocol a general observation applies to, the + ## directionality in a traffic flow, or protocol-specific + ## context like a particular message type. + label_names: vector of string &default=vector(); + + ## The label values associated with this metric, if any. + label_values: vector of string &optional; ## Individual counters for each of the buckets as ## described by the *bounds* field in *opts*; diff --git a/scripts/policy/frameworks/telemetry/log.zeek b/scripts/policy/frameworks/telemetry/log.zeek index d29dc896d7..8ee376eee4 100644 --- a/scripts/policy/frameworks/telemetry/log.zeek +++ b/scripts/policy/frameworks/telemetry/log.zeek @@ -132,8 +132,8 @@ function do_log() $peer=peer_description, $metric_type=metric_type, $name=m$opts$name, - $labels=m$opts$labels, - $label_values=m$labels, + $labels=m$label_names, + $label_values=m$label_values, $value=m$value); Log::write(LOG, rec); @@ -162,8 +162,8 @@ function do_log() local hrec = HistogramInfo($ts=ts, $peer=peer_description, $name=hm$opts$name, - $labels=hm$opts$labels, - $label_values=hm$labels, + $labels=hm$label_names, + $label_values=hm$label_values, $bounds=hm$opts$bounds, $values=hm$values, $sum=hm$sum, diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index e2c3276c7d..888c8bad38 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -134,7 +134,6 @@ RecordValPtr Manager::GetMetricOptsRecord(const prometheus::MetricFamily& metric static auto name_idx = metric_opts_type->FieldOffset("name"); static auto help_text_idx = metric_opts_type->FieldOffset("help_text"); static auto unit_idx = metric_opts_type->FieldOffset("unit"); - static auto labels_idx = metric_opts_type->FieldOffset("labels"); static auto is_total_idx = metric_opts_type->FieldOffset("is_total"); static auto metric_type_idx = metric_opts_type->FieldOffset("metric_type"); @@ -156,55 +155,15 @@ RecordValPtr Manager::GetMetricOptsRecord(const prometheus::MetricFamily& metric // Assume that a metric ending with _total is always a summed metric so we can set that. record_val->Assign(is_total_idx, val_mgr->Bool(util::ends_with(metric_family.name, "_total"))); - auto label_names_vec = make_intrusive(string_vec_type); - - // Check if this is a Zeek-internal metric. We keep a little more information about a metric - // for these than we do for ones that were inserted into prom-cpp directly. - if ( auto it = families.find(metric_family.name); it != families.end() ) { - record_val->Assign(metric_type_idx, - zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal(it->second->MetricType())); - - for ( const auto& lbl : it->second->LabelNames() ) - label_names_vec->Append(make_intrusive(lbl)); - } - else { - // prom-cpp stores everything internally as doubles - if ( metric_family.type == prometheus::MetricType::Counter ) - record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( - BifEnum::Telemetry::MetricType::COUNTER)); - if ( metric_family.type == prometheus::MetricType::Gauge ) - record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( - BifEnum::Telemetry::MetricType::GAUGE)); - if ( metric_family.type == prometheus::MetricType::Histogram ) - record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( - BifEnum::Telemetry::MetricType::HISTOGRAM)); - - // prometheus-cpp doesn't store label names anywhere other than in each - // instrument. this is valid because label names can be different - // between instruments within a single family for prometheus. we don't - // follow that model in Zeek, so use the names from the first instrument - // but validate that they're the same in the rest and warn if not. - if ( ! metric_family.metric.empty() ) { - std::unordered_set names; - for ( const auto& lbl : metric_family.metric[0].label ) { - label_names_vec->Append(make_intrusive(lbl.name)); - names.insert(lbl.name); - } - - if ( metric_family.metric.size() > 1 ) { - for ( size_t i = 1; i < metric_family.metric.size(); ++i ) { - for ( const auto& lbl : metric_family.metric[i].label ) { - if ( names.count(lbl.name) == 0 ) - reporter->Warning( - "Telemetry labels must be the same across all instruments for metric family %s\n", - metric_family.name.c_str()); - } - } - } - } - } - - record_val->Assign(labels_idx, label_names_vec); + if ( metric_family.type == prometheus::MetricType::Counter ) + record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( + BifEnum::Telemetry::MetricType::COUNTER)); + if ( metric_family.type == prometheus::MetricType::Gauge ) + record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( + BifEnum::Telemetry::MetricType::GAUGE)); + if ( metric_family.type == prometheus::MetricType::Histogram ) + record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( + BifEnum::Telemetry::MetricType::HISTOGRAM)); opts_records.insert({metric_family.name, record_val}); @@ -244,8 +203,8 @@ static bool comparer(const std::optional& a, const std::optional& b, auto a_r = a->ToVal(type)->AsRecordVal(); auto b_r = b->ToVal(type)->AsRecordVal(); - auto a_labels = a_r->GetField("labels"); - auto b_labels = b_r->GetField("labels"); + auto a_labels = a_r->GetField("label_values"); + auto b_labels = b_r->GetField("label_values"); return compare_string_vectors(a_labels, b_labels); } @@ -259,39 +218,14 @@ static bool compare_histograms(const std::optional& a, const std::optional return comparer(a, b, metric_record_type); } -static VectorValPtr build_label_values_vector(const std::vector& prom_labels, - const VectorValPtr& record_label_names) { - static auto string_vec_type = zeek::id::find_type("string_vec"); - auto label_values_vec = make_intrusive(string_vec_type); - - // This feels really bad, since it's an O(m*n) search to bulld the vector, - // but prometheus-cpp returns us a vector of labels and so we just have to - // search through it. - int i = 0; - for ( const auto& name : record_label_names->RawVec() ) { - auto n = name->AsString()->ToStdStringView(); - auto it = std::find_if(prom_labels.begin(), prom_labels.end(), - [n](const prometheus::ClientMetric::Label& l) { return l.name == n; }); - if ( it != prom_labels.end() ) - label_values_vec->Assign(i, make_intrusive(it->value)); - - // See the comment in GetMetricOptsRecord about how labels from non-Zeek - // metrics within the same family can have different labels from each - // other. In this case we might leave some fields null in the output. - - ++i; - } - - return label_values_vec; -} - ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view name_pattern) { static auto metrics_vector_type = zeek::id::find_type("Telemetry::MetricVector"); static auto string_vec_type = zeek::id::find_type("string_vec"); static auto metric_record_type = zeek::id::find_type("Telemetry::Metric"); static auto opts_idx = metric_record_type->FieldOffset("opts"); - static auto labels_idx = metric_record_type->FieldOffset("labels"); static auto value_idx = metric_record_type->FieldOffset("value"); + static auto label_names_idx = metric_record_type->FieldOffset("label_names"); + static auto label_values_idx = metric_record_type->FieldOffset("label_values"); static auto metric_opts_type = zeek::id::find_type("Telemetry::MetricOpts"); static auto metric_type_idx = metric_opts_type->FieldOffset("metric_type"); @@ -313,11 +247,9 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view continue; RecordValPtr opts_record = GetMetricOptsRecord(fam); - const auto& label_names = opts_record->GetField("labels"); for ( const auto& inst : fam.metric ) { auto r = make_intrusive(metric_record_type); - r->Assign(labels_idx, build_label_values_vector(inst.label, label_names)); r->Assign(opts_idx, opts_record); if ( fam.type == prometheus::MetricType::Counter ) @@ -325,6 +257,17 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view else if ( fam.type == prometheus::MetricType::Gauge ) r->Assign(value_idx, zeek::make_intrusive(inst.gauge.value)); + auto label_names_vec = make_intrusive(string_vec_type); + auto label_values_vec = make_intrusive(string_vec_type); + + for ( const auto& lbl : inst.label ) { + label_names_vec->Append(make_intrusive(lbl.name)); + label_values_vec->Append(make_intrusive(lbl.value)); + } + + r->Assign(label_names_idx, label_names_vec); + r->Assign(label_values_idx, label_values_vec); + ret_val->Append(r); } } @@ -350,8 +293,9 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st static auto string_vec_type = zeek::id::find_type("string_vec"); static auto double_vec_type = zeek::id::find_type("double_vec"); static auto histogram_metric_type = zeek::id::find_type("Telemetry::HistogramMetric"); - static auto labels_idx = histogram_metric_type->FieldOffset("labels"); static auto values_idx = histogram_metric_type->FieldOffset("values"); + static auto label_names_idx = histogram_metric_type->FieldOffset("label_names"); + static auto label_values_idx = histogram_metric_type->FieldOffset("label_values"); static auto observations_idx = histogram_metric_type->FieldOffset("observations"); static auto sum_idx = histogram_metric_type->FieldOffset("sum"); @@ -380,13 +324,22 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st continue; RecordValPtr opts_record = GetMetricOptsRecord(fam); - const auto& label_names = opts_record->GetField("labels"); for ( const auto& inst : fam.metric ) { auto r = make_intrusive(histogram_metric_type); - r->Assign(labels_idx, build_label_values_vector(inst.label, label_names)); r->Assign(opts_idx, opts_record); + auto label_names_vec = make_intrusive(string_vec_type); + auto label_values_vec = make_intrusive(string_vec_type); + + for ( const auto& lbl : inst.label ) { + label_names_vec->Append(make_intrusive(lbl.name)); + label_values_vec->Append(make_intrusive(lbl.value)); + } + + r->Assign(label_names_idx, label_names_vec); + r->Assign(label_values_idx, label_values_vec); + auto double_values_vec = make_intrusive(double_vec_type); std::vector boundaries; uint64_t last = 0.0; diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log index e06856304b..f371070a8e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log @@ -10,7 +10,7 @@ XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0 -XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path Log::WRITER_ASCII,Conn,Conn::LOG,-,conn 30.0 -XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path Log::WRITER_ASCII,DNS,DNS::LOG,-,dns 23.0 -XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total writer,module,stream,filter-name,path Log::WRITER_ASCII,HTTP,HTTP::LOG,-,http 10.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0 #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/scripts/base/frameworks/telemetry/basic.zeek b/testing/btest/scripts/base/frameworks/telemetry/basic.zeek index 72b675dc0a..aeb1e5cde1 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/basic.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/basic.zeek @@ -14,42 +14,42 @@ global btest_a_cf = Telemetry::register_counter_family([ $prefix="btest", $name="a_test", $unit="", - $help_text="A btest metric", - $labels=vector("x", "y") -]); + $help_text="A btest metric"], + vector("x", "y") +); global btest_b_cf = Telemetry::register_counter_family([ $prefix="btest", $name="b_test", $unit="", - $help_text="Another btest metric", - $labels=vector("x", "y") -]); + $help_text="Another btest metric"], + vector("x", "y") +); global btest_c_cf = Telemetry::register_counter_family([ $prefix="btest", $name="c_test", $unit="", - $help_text="The last btest metric", - $labels=vector("x", "y") -]); + $help_text="The last btest metric"], + vector("x", "y") +); global system_sensor_temp_gf = Telemetry::register_gauge_family([ $prefix="system", $name="sensor_temperature", $unit="celsius", - $help_text="Temperatures reported by sensors in the system", - $labels=vector("name") -]); + $help_text="Temperatures reported by sensors in the system"], + vector("name") +); global btest_sample_histogram_hf = Telemetry::register_histogram_family([ $prefix="btest", $name="sample_histogram", $unit="", $help_text="A sample histogram that is not returned by Telemetry::collect_metrics", - $bounds=vector(1.0, 2.0, 3.0, 4.0, 5.0), - $labels=vector("dim") -]); + $bounds=vector(1.0, 2.0, 3.0, 4.0, 5.0)], + vector("dim") +); function print_metrics(what: string, metrics: vector of Telemetry::Metric) { @@ -57,7 +57,7 @@ function print_metrics(what: string, metrics: vector of Telemetry::Metric) for (i in metrics) { local m = metrics[i]; - print m$opts$metric_type, m$opts$prefix, m$opts$name, m$opts$labels, m$labels, m$value; + print m$opts$metric_type, m$opts$prefix, m$opts$name, m$label_names, m$label_values, m$value; } } @@ -67,7 +67,7 @@ function print_histogram_metrics(what: string, metrics: vector of Telemetry::His for (i in metrics) { local m = metrics[i]; - print m$opts$metric_type, m$opts$prefix, m$opts$name, m$opts$bounds, m$opts$labels, m$labels, m$values, m$sum, m$observations; + print m$opts$metric_type, m$opts$prefix, m$opts$name, m$opts$bounds, m$label_names, m$label_values, m$values, m$sum, m$observations; } } diff --git a/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek b/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek index b892f5740c..f327e2ebff 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek @@ -18,11 +18,11 @@ global connection_duration_hf = Telemetry::register_histogram_family([ global realistic_connection_duration_hf = Telemetry::register_histogram_family([ $prefix="zeek", $name="realistic_connection_duration", - $labels=vector("proto"), $unit="seconds", $help_text="Monitored connection durations by protocol", - $bounds=vector(0.1, 1.0, 10.0, 30.0, 60.0, 120.0, 300, 900.0, 1800.0) -]); + $bounds=vector(0.1, 1.0, 10.0, 30.0, 60.0, 120.0, 300, 900.0, 1800.0)], + vector("proto"), +); global connection_duration_h = Telemetry::histogram_with(connection_duration_hf); @@ -42,8 +42,8 @@ event zeek_done() &priority=-100 { local hm = histogram_metrics[i]; print hm$opts$metric_type, hm$opts$prefix, hm$opts$name; - print hm$opts$labels; - print hm$labels; + print hm$label_names; + print hm$label_values; print hm$opts$bounds; print hm$values; print hm$observations, hm$sum; diff --git a/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek b/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek index 5060c357a8..c0a9c73b2d 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek @@ -16,7 +16,7 @@ event zeek_done() &priority=-100 local ms = Telemetry::collect_metrics("zeek", "event_handler_invocations"); for ( _, m in ms ) { - if ( /zeek_.*|connection_.*/ in cat(m$labels)) - print m$opts$prefix, m$opts$name, m$labels, m$value; + if ( /zeek_.*|connection_.*/ in cat(m$label_values)) + print m$opts$prefix, m$opts$name, m$label_values, m$value; } } diff --git a/testing/btest/scripts/base/utils/json.test b/testing/btest/scripts/base/utils/json.test index 30e8e201ba..dbc394cd9a 100644 --- a/testing/btest/scripts/base/utils/json.test +++ b/testing/btest/scripts/base/utils/json.test @@ -140,9 +140,9 @@ event zeek_init() $prefix="btest", $name="btest_testing_gauge", $unit="", - $help_text="Btest testing", - $labels=vector("dim_1"), - ]); + $help_text="Btest testing"], + vector("dim_1"), + ); local gauge = Telemetry::gauge_with(gauge_family, vector("dim_1_value")); print to_json(gauge); print to_json(gauge_family); @@ -151,9 +151,9 @@ event zeek_init() $prefix="btest", $name="btest_testing_counter", $unit="", - $help_text="Btest testing", - $labels=vector("dim_1"), - ]); + $help_text="Btest testing"], + vector("dim_1"), + ); local counter = Telemetry::counter_with(counter_family, vector("dim_1_value")); print to_json(counter); print to_json(counter_family); diff --git a/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek b/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek index 8c208fd9b5..869fcc5884 100644 --- a/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek +++ b/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek @@ -12,9 +12,9 @@ global connections_by_proto_cf = Telemetry::register_counter_family([ $prefix="btest", $name="connections", $unit="", - $help_text="Total number of monitored connections", + $help_text="Total number of monitored connections"], $labels=vector("proto") -]); +); global connection_duration_hf = Telemetry::register_histogram_family([ $prefix="btest", From 65678fbfdb00fa1673e68dc0cd5715ce05a2a5ee Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Mon, 3 Jun 2024 09:34:17 -0700 Subject: [PATCH 11/89] Fix a bunch of copy-instead-of-move findings from Coverity --- src/telemetry/Counter.cc | 4 ++-- src/telemetry/Gauge.cc | 4 ++-- src/telemetry/Manager.cc | 20 ++++++++++---------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/telemetry/Counter.cc b/src/telemetry/Counter.cc index 5abb624f20..8b34624254 100644 --- a/src/telemetry/Counter.cc +++ b/src/telemetry/Counter.cc @@ -5,7 +5,7 @@ using namespace zeek::telemetry; Counter::Counter(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept : handle(family->Add(labels)), labels(labels) { if ( callback ) { - handle.AddCollectCallback(callback); + handle.AddCollectCallback(std::move(callback)); has_callback = true; } } @@ -37,5 +37,5 @@ std::shared_ptr CounterFamily::GetOrAdd(Span labels, std::shared_ptr CounterFamily::GetOrAdd(std::initializer_list labels, prometheus::CollectCallbackPtr callback) { - return GetOrAdd(Span{labels.begin(), labels.size()}, callback); + return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback)); } diff --git a/src/telemetry/Gauge.cc b/src/telemetry/Gauge.cc index f3f510b436..273c9a57bf 100644 --- a/src/telemetry/Gauge.cc +++ b/src/telemetry/Gauge.cc @@ -17,7 +17,7 @@ double Gauge::Value() const noexcept { Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept : handle(family->Add(labels)), labels(labels) { if ( callback ) { - handle.AddCollectCallback(callback); + handle.AddCollectCallback(std::move(callback)); has_callback = true; } } @@ -37,5 +37,5 @@ std::shared_ptr GaugeFamily::GetOrAdd(Span labels, prome std::shared_ptr GaugeFamily::GetOrAdd(std::initializer_list labels, prometheus::CollectCallbackPtr callback) { - return GetOrAdd(Span{labels.begin(), labels.size()}, callback); + return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback)); } diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index 888c8bad38..0fedde48e0 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -265,10 +265,10 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view label_values_vec->Append(make_intrusive(lbl.value)); } - r->Assign(label_names_idx, label_names_vec); - r->Assign(label_values_idx, label_values_vec); + r->Assign(label_names_idx, std::move(label_names_vec)); + r->Assign(label_values_idx, std::move(label_values_vec)); - ret_val->Append(r); + ret_val->Append(std::move(r)); } } @@ -285,7 +285,7 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view } } - return ret_val; + return std::move(ret_val); } ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::string_view name_pattern) { @@ -337,8 +337,8 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st label_values_vec->Append(make_intrusive(lbl.value)); } - r->Assign(label_names_idx, label_names_vec); - r->Assign(label_values_idx, label_values_vec); + r->Assign(label_names_idx, std::move(label_names_vec)); + r->Assign(label_values_idx, std::move(label_values_vec)); auto double_values_vec = make_intrusive(double_vec_type); std::vector boundaries; @@ -361,9 +361,9 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st r->Assign(sum_idx, zeek::make_intrusive(inst.histogram.sample_sum)); RecordValPtr local_opts_record = r->GetField(opts_idx); - local_opts_record->Assign(bounds_idx, bounds_vec); + local_opts_record->Assign(bounds_idx, std::move(bounds_vec)); - ret_val->Append(r); + ret_val->Append(std::move(r)); } } @@ -380,7 +380,7 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st } } - return ret_val; + return std::move(ret_val); } void Manager::BuildClusterJson() { @@ -488,7 +488,7 @@ GaugePtr Manager::GaugeInstance(std::string_view prefix, std::string_view name, std::string_view helptext, std::string_view unit, prometheus::CollectCallbackPtr callback) { auto lbl_span = Span{labels.begin(), labels.size()}; - return GaugeInstance(prefix, name, lbl_span, helptext, unit, callback); + return GaugeInstance(prefix, name, lbl_span, helptext, unit, std::move(callback)); } HistogramFamilyPtr Manager::HistogramFamily(std::string_view prefix, std::string_view name, From 32fe94f0f839c5f3d3bb7af4f604d8e426b69166 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Mon, 3 Jun 2024 09:35:20 -0700 Subject: [PATCH 12/89] Fix a memory leak with the CivetWeb callbacks in telemetry --- src/telemetry/Manager.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index 0fedde48e0..a5a0ac872d 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -66,6 +66,9 @@ void Manager::InitPostScript() { try { prometheus_exposer = std::make_unique(prometheus_url, 2, callbacks); + + // CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here + delete callbacks; } catch ( const CivetException& exc ) { reporter->FatalError("Failed to setup Prometheus endpoint: %s\n", exc.what()); } From dd0814c8048b1db9b1cc755017fd88f5404ba135 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Mon, 3 Jun 2024 10:41:16 -0700 Subject: [PATCH 13/89] Add prometheus-cpp files to install set for plugins to use --- CMakeLists.txt | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 60de8b05fc..13ea5a5fe2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -338,15 +338,20 @@ add_zeek_dynamic_plugin_build_interface_include_directories( ${PROJECT_SOURCE_DIR}/auxil/broker/libbroker ${PROJECT_SOURCE_DIR}/auxil/paraglob/include ${PROJECT_SOURCE_DIR}/auxil/rapidjson/include + ${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include ${CMAKE_BINARY_DIR}/src ${CMAKE_BINARY_DIR}/src/include ${CMAKE_BINARY_DIR}/auxil/binpac/lib - ${CMAKE_BINARY_DIR}/auxil/broker/libbroker) + ${CMAKE_BINARY_DIR}/auxil/broker/libbroker + ${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include) # threading/formatters/JSON.h includes rapidjson headers and may be used # by external plugins, extend the include path. target_include_directories(zeek_dynamic_plugin_base SYSTEM INTERFACE $) +target_include_directories( + zeek_dynamic_plugin_base SYSTEM + INTERFACE $) # Convenience function for adding an OBJECT library that feeds directly into the # main target(s). @@ -1013,6 +1018,12 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/rapidjson/include/rapidjson install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc DESTINATION include/zeek/3rdparty/) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/prometheus-cpp/core/include/prometheus + DESTINATION include/zeek/3rdparty/prometheus-cpp/include) + +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxil/prometheus-cpp/core/include/prometheus + DESTINATION include/zeek/3rdparty/prometheus-cpp/include) + # Create 3rdparty/ghc within the build directory so that the include for # "zeek/3rdparty/ghc/filesystem.hpp" works within the build tree. execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory From 9d6ba594b98221de887c87a070bc22044dce58b3 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Mon, 3 Jun 2024 10:41:50 -0700 Subject: [PATCH 14/89] Use forward declarations of prometheus-cpp types in telemetry::Manager --- src/telemetry/Manager.cc | 6 ++++++ src/telemetry/Manager.h | 9 ++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index a5a0ac872d..3ad3968e7d 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -6,6 +6,8 @@ // CivetServer is from the civetweb submodule in prometheus-cpp #include +#include +#include #include #include #include @@ -25,6 +27,10 @@ namespace zeek::telemetry { Manager::Manager() { prometheus_registry = std::make_shared(); } +// This can't be defined as =default because of the use of unique_ptr with a forward-declared type +// in Manager.h +Manager::~Manager() {} + void Manager::InitPostScript() { // Metrics port setting is used to calculate a URL for prometheus scraping std::string prometheus_url; diff --git a/src/telemetry/Manager.h b/src/telemetry/Manager.h index f81a39a6d5..c4c2537f1a 100644 --- a/src/telemetry/Manager.h +++ b/src/telemetry/Manager.h @@ -2,8 +2,6 @@ #pragma once -#include -#include #include #include #include @@ -24,6 +22,11 @@ class RecordVal; using RecordValPtr = IntrusivePtr; } // namespace zeek +namespace prometheus { +class Exposer; +class Registry; +} // namespace prometheus + namespace zeek::telemetry { /** @@ -37,7 +40,7 @@ public: Manager& operator=(const Manager&) = delete; - ~Manager() = default; + ~Manager(); /** * Initialization of the manager. This is called late during Zeek's From 2d6c433dca0d1cb9709f263f61bbc33a306b7420 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Mon, 3 Jun 2024 14:05:43 -0700 Subject: [PATCH 15/89] Update zeekctl tests for telemetry rework --- auxil/zeekctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/zeekctl b/auxil/zeekctl index 4dad935e9c..614228f93b 160000 --- a/auxil/zeekctl +++ b/auxil/zeekctl @@ -1 +1 @@ -Subproject commit 4dad935e9c995b7ae2f0a4e7677892fcfb988cf0 +Subproject commit 614228f93bec4a991e3aa50055b70a0644781607 From 1cdca7c1d000a042affe5150a413479bfd9306c5 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Mon, 3 Jun 2024 17:03:31 -0700 Subject: [PATCH 16/89] Change how we count FDs on Linux to fix zeekctl stop issues --- src/telemetry/ProcessStats.cc | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/telemetry/ProcessStats.cc b/src/telemetry/ProcessStats.cc index c0e9d322b7..f2a0447b63 100644 --- a/src/telemetry/ProcessStats.cc +++ b/src/telemetry/ProcessStats.cc @@ -67,6 +67,19 @@ std::atomic global_page_size; namespace zeek::telemetry::detail { +int64_t count_entries_in_directory(const char* path) { + int64_t result = 0; + if ( auto dptr = opendir(path); dptr != nullptr ) { + for ( auto entry = readdir(dptr); entry != nullptr; entry = readdir(dptr) ) { + auto fname = entry->d_name; + if ( strcmp(".", fname) != 0 && strcmp("..", fname) != 0 ) + ++result; + } + closedir(dptr); + } + return result; +} + /// Caches the result from a `sysconf` call in a cache variable to avoid /// frequent syscalls. Sets `cache_var` to -1 in case of an error. Initially, /// `cache_var` must be 0 and we assume a successful syscall would always return @@ -143,9 +156,7 @@ process_stats get_process_stats() { result.vms = vmsize_bytes; result.cpu = static_cast(utime_ticks + stime_ticks) / ticks_per_second; - zeek::filesystem::path fd_path{"/proc/self/fd"}; - result.fds = - std::distance(zeek::filesystem::directory_iterator{fd_path}, zeek::filesystem::directory_iterator{}); + result.fds = count_entries_in_directory("/proc/self/fd"); } return result; From 99e64aa1136632fc03ff3a022132041bf391df43 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Tue, 4 Jun 2024 08:49:42 -0700 Subject: [PATCH 17/89] Restore label_names field in MetricOpts record --- scripts/base/frameworks/telemetry/main.zeek | 35 +++++++++---------- scripts/base/init-bare.zeek | 12 +++++++ .../base/frameworks/telemetry/basic.zeek | 30 ++++++++-------- .../telemetry/conn-duration-histogram.zeek | 6 ++-- testing/btest/scripts/base/utils/json.test | 12 +++---- .../frameworks/telemetry/log-prefixes.zeek | 6 ++-- 6 files changed, 55 insertions(+), 46 deletions(-) diff --git a/scripts/base/frameworks/telemetry/main.zeek b/scripts/base/frameworks/telemetry/main.zeek index e41ef1b21e..d71a7d9783 100644 --- a/scripts/base/frameworks/telemetry/main.zeek +++ b/scripts/base/frameworks/telemetry/main.zeek @@ -36,8 +36,7 @@ export { }; ## Register a counter family. - global register_counter_family: function(opts: MetricOpts, - label_names: labels_vector &default=vector()): CounterFamily; + global register_counter_family: function(opts: MetricOpts): CounterFamily; ## Get a :zeek:see:`Telemetry::Counter` instance given family and label values. global counter_with: function(cf: CounterFamily, @@ -120,8 +119,7 @@ export { }; ## Register a gauge family. - global register_gauge_family: function(opts: MetricOpts, - label_names: labels_vector &default=vector()): GaugeFamily; + global register_gauge_family: function(opts: MetricOpts): GaugeFamily; ## Get a :zeek:see:`Telemetry::Gauge` instance given family and label values. @@ -217,8 +215,7 @@ export { }; ## Register a histogram family. - global register_histogram_family: function(opts: MetricOpts, - label_names: labels_vector &default=vector()): HistogramFamily; + global register_histogram_family: function(opts: MetricOpts): HistogramFamily; ## Get a :zeek:see:`Telemetry::Histogram` instance given family and label values. global histogram_with: function(hf: HistogramFamily, @@ -293,16 +290,16 @@ function make_labels(keys: vector of string, values: labels_vector): table[strin return labels; } -function register_counter_family(opts: MetricOpts, label_names: labels_vector): CounterFamily +function register_counter_family(opts: MetricOpts): CounterFamily { local f = Telemetry::__counter_family( opts$prefix, opts$name, - label_names, + opts$label_names, opts$help_text, opts$unit ); - return CounterFamily($__family=f, $__labels=label_names); + return CounterFamily($__family=f, $__labels=opts$label_names); } # Fallback Counter returned when there are issues with the labels. @@ -352,16 +349,16 @@ function counter_family_set(cf: CounterFamily, label_values: labels_vector, valu return counter_set(counter_with(cf, label_values), value); } -function register_gauge_family(opts: MetricOpts, label_names: labels_vector): GaugeFamily +function register_gauge_family(opts: MetricOpts): GaugeFamily { local f = Telemetry::__gauge_family( opts$prefix, opts$name, - label_names, + opts$label_names, opts$help_text, opts$unit ); - return GaugeFamily($__family=f, $__labels=label_names); + return GaugeFamily($__family=f, $__labels=opts$label_names); } # Fallback Gauge returned when there are issues with the label usage. @@ -420,17 +417,17 @@ function gauge_family_set(gf: GaugeFamily, label_values: labels_vector, value: d return gauge_set(gauge_with(gf, label_values), value); } -function register_histogram_family(opts: MetricOpts, label_names: labels_vector): HistogramFamily +function register_histogram_family(opts: MetricOpts): HistogramFamily { local f = Telemetry::__histogram_family( opts$prefix, opts$name, - label_names, + opts$label_names, opts$bounds, opts$help_text, opts$unit ); - return HistogramFamily($__family=f, $__labels=label_names); + return HistogramFamily($__family=f, $__labels=opts$label_names); } # Fallback Histogram when there are issues with the labels. @@ -486,10 +483,10 @@ global version_gauge_family = Telemetry::register_gauge_family([ $prefix="zeek", $name="version_info", $unit="", - $help_text="The Zeek version"], - vector("version_number", "major", "minor", "patch", "commit", - "beta", "debug","version_string") -); + $help_text="The Zeek version", + $label_names=vector("version_number", "major", "minor", "patch", "commit", + "beta", "debug","version_string") +]); event zeek_init() { diff --git a/scripts/base/init-bare.zeek b/scripts/base/init-bare.zeek index 97a2bf169f..30b49def26 100644 --- a/scripts/base/init-bare.zeek +++ b/scripts/base/init-bare.zeek @@ -5802,6 +5802,18 @@ export { ## Documentation for this metric. help_text: string &optional; + ## The label names (also called dimensions) of the metric. When + ## instantiating or working with concrete metrics, corresponding + ## label values have to be provided. Examples of a label might + ## be the protocol a general observation applies to, the + ## directionality in a traffic flow, or protocol-specific + ## context like a particular message type. This field is only + ## used in the construction of new metrics and will not be + ## filled in when returned from + ## :zeek:see:`Telemetry::collect_metrics` or + ## :zeek:see:`Telemetry::collect_histogram_metrics`, + label_names: vector of string &default=vector(); + ## Whether the metric represents something that is accumulating. ## Defaults to ``T`` for counters and ``F`` for gauges and ## histograms. diff --git a/testing/btest/scripts/base/frameworks/telemetry/basic.zeek b/testing/btest/scripts/base/frameworks/telemetry/basic.zeek index aeb1e5cde1..0592bff684 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/basic.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/basic.zeek @@ -14,42 +14,42 @@ global btest_a_cf = Telemetry::register_counter_family([ $prefix="btest", $name="a_test", $unit="", - $help_text="A btest metric"], - vector("x", "y") -); + $help_text="A btest metric", + $label_names=vector("x", "y") +]); global btest_b_cf = Telemetry::register_counter_family([ $prefix="btest", $name="b_test", $unit="", - $help_text="Another btest metric"], - vector("x", "y") -); + $help_text="Another btest metric", + $label_names=vector("x", "y") +]); global btest_c_cf = Telemetry::register_counter_family([ $prefix="btest", $name="c_test", $unit="", - $help_text="The last btest metric"], - vector("x", "y") -); + $help_text="The last btest metric", + $label_names=vector("x", "y") +]); global system_sensor_temp_gf = Telemetry::register_gauge_family([ $prefix="system", $name="sensor_temperature", $unit="celsius", - $help_text="Temperatures reported by sensors in the system"], - vector("name") -); + $help_text="Temperatures reported by sensors in the system", + $label_names=vector("name") +]); global btest_sample_histogram_hf = Telemetry::register_histogram_family([ $prefix="btest", $name="sample_histogram", $unit="", $help_text="A sample histogram that is not returned by Telemetry::collect_metrics", - $bounds=vector(1.0, 2.0, 3.0, 4.0, 5.0)], - vector("dim") -); + $bounds=vector(1.0, 2.0, 3.0, 4.0, 5.0), + $label_names=vector("dim") +]); function print_metrics(what: string, metrics: vector of Telemetry::Metric) { diff --git a/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek b/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek index f327e2ebff..3f01d9ddf3 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek @@ -18,11 +18,11 @@ global connection_duration_hf = Telemetry::register_histogram_family([ global realistic_connection_duration_hf = Telemetry::register_histogram_family([ $prefix="zeek", $name="realistic_connection_duration", + $label_names=vector("proto"), $unit="seconds", $help_text="Monitored connection durations by protocol", - $bounds=vector(0.1, 1.0, 10.0, 30.0, 60.0, 120.0, 300, 900.0, 1800.0)], - vector("proto"), -); + $bounds=vector(0.1, 1.0, 10.0, 30.0, 60.0, 120.0, 300, 900.0, 1800.0), +]); global connection_duration_h = Telemetry::histogram_with(connection_duration_hf); diff --git a/testing/btest/scripts/base/utils/json.test b/testing/btest/scripts/base/utils/json.test index dbc394cd9a..da741102fe 100644 --- a/testing/btest/scripts/base/utils/json.test +++ b/testing/btest/scripts/base/utils/json.test @@ -140,9 +140,9 @@ event zeek_init() $prefix="btest", $name="btest_testing_gauge", $unit="", - $help_text="Btest testing"], - vector("dim_1"), - ); + $help_text="Btest testing", + $label_names=vector("dim_1"), + ]); local gauge = Telemetry::gauge_with(gauge_family, vector("dim_1_value")); print to_json(gauge); print to_json(gauge_family); @@ -151,9 +151,9 @@ event zeek_init() $prefix="btest", $name="btest_testing_counter", $unit="", - $help_text="Btest testing"], - vector("dim_1"), - ); + $help_text="Btest testing", + $label_names=vector("dim_1"), + ]); local counter = Telemetry::counter_with(counter_family, vector("dim_1_value")); print to_json(counter); print to_json(counter_family); diff --git a/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek b/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek index 869fcc5884..0752c605e2 100644 --- a/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek +++ b/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek @@ -12,9 +12,9 @@ global connections_by_proto_cf = Telemetry::register_counter_family([ $prefix="btest", $name="connections", $unit="", - $help_text="Total number of monitored connections"], - $labels=vector("proto") -); + $help_text="Total number of monitored connections", + $label_names=vector("proto") +]); global connection_duration_hf = Telemetry::register_histogram_family([ $prefix="btest", From 2680bac48028171fbc7e2415c343b53674156f96 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Tue, 4 Jun 2024 09:09:33 -0700 Subject: [PATCH 18/89] Remove unnecessary shared_from_this on instrument classes --- src/telemetry/Counter.h | 2 +- src/telemetry/Gauge.h | 2 +- src/telemetry/Histogram.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/telemetry/Counter.h b/src/telemetry/Counter.h index 5f8ec2bf32..f6c49315b7 100644 --- a/src/telemetry/Counter.h +++ b/src/telemetry/Counter.h @@ -63,7 +63,7 @@ private: using CounterPtr = std::shared_ptr; -class CounterFamily : public MetricFamily, public std::enable_shared_from_this { +class CounterFamily : public MetricFamily { public: static inline const char* OpaqueName = "CounterMetricFamilyVal"; diff --git a/src/telemetry/Gauge.h b/src/telemetry/Gauge.h index cf04e7a9a0..900cb7b784 100644 --- a/src/telemetry/Gauge.h +++ b/src/telemetry/Gauge.h @@ -81,7 +81,7 @@ private: using GaugePtr = std::shared_ptr; -class GaugeFamily : public MetricFamily, public std::enable_shared_from_this { +class GaugeFamily : public MetricFamily { public: static inline const char* OpaqueName = "GaugeMetricFamilyVal"; diff --git a/src/telemetry/Histogram.h b/src/telemetry/Histogram.h index 65d371cd6d..ec8858e463 100644 --- a/src/telemetry/Histogram.h +++ b/src/telemetry/Histogram.h @@ -46,7 +46,7 @@ private: using HistogramPtr = std::shared_ptr; -class HistogramFamily : public MetricFamily, public std::enable_shared_from_this { +class HistogramFamily : public MetricFamily { public: static inline const char* OpaqueName = "HistogramMetricFamilyVal"; From 1aebe01e1476965e834b630bf86fcda8d54f03a4 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Tue, 4 Jun 2024 12:21:44 -0700 Subject: [PATCH 19/89] Switch to zeek fork of prometheus-cpp --- .gitmodules | 2 +- auxil/prometheus-cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index ddb2651a18..38e0606337 100644 --- a/.gitmodules +++ b/.gitmodules @@ -75,4 +75,4 @@ url = https://github.com/microsoft/vcpkg [submodule "auxil/prometheus-cpp"] path = auxil/prometheus-cpp - url = https://github.com/jupp0r/prometheus-cpp + url = https://github.com/zeek/prometheus-cpp diff --git a/auxil/prometheus-cpp b/auxil/prometheus-cpp index cdb357ad55..2fec7205d1 160000 --- a/auxil/prometheus-cpp +++ b/auxil/prometheus-cpp @@ -1 +1 @@ -Subproject commit cdb357ad556c9ba96cbfa90fed2940fedf101673 +Subproject commit 2fec7205d1a9cb4829b86c943d599696d53de85c From 9eb39d69071221050cc8bb8fa269dcebbcb885db Mon Sep 17 00:00:00 2001 From: zeek-bot Date: Wed, 5 Jun 2024 00:22:15 +0000 Subject: [PATCH 20/89] Update doc submodule [nomail] [skip ci] --- doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc b/doc index a369cee890..39de6cb8c6 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit a369cee890d88a106216915c0202ddb581e39974 +Subproject commit 39de6cb8c602a4ed79942e08bd7c97c1eca9c3ab From 7ac703b97d02fae78dce438832de9d408380c5d4 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Thu, 6 Jun 2024 08:20:46 -0700 Subject: [PATCH 21/89] Update cmake submodule [nomail] --- cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake b/cmake index 34cf738d60..b66530ba8e 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 34cf738d60a95d5ecf001de2c6e259578be4fc56 +Subproject commit b66530ba8ec0153ac3e720265191633eea2e0c03 From c0f14bdc0b36daf3af4836613b80b0ac946706c3 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Thu, 6 Jun 2024 08:53:48 -0700 Subject: [PATCH 22/89] Change prometheus test to check for require jq --- .../btest/scripts/policy/frameworks/telemetry/prometheus.zeek | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek b/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek index ff7155b267..0d6e7794b0 100644 --- a/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek +++ b/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek @@ -2,6 +2,7 @@ # Note compilable to C++ due to globals being initialized to a record that # has an opaque type as a field. # @TEST-REQUIRES: test "${ZEEK_USE_CPP}" != "1" +# @TEST-REQUIRES: which jq # # @TEST-PORT: BROKER_PORT1 # @TEST-PORT: BROKER_PORT2 From 777b0be03e4e62783910e205b21877286f66598e Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Thu, 6 Jun 2024 12:34:46 -0700 Subject: [PATCH 23/89] Check for 'zeekctl check' before trying to start up prometheus --- src/telemetry/Manager.cc | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index 3ad3968e7d..d8343a1a43 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -70,16 +70,18 @@ void Manager::InitPostScript() { } } - try { - prometheus_exposer = std::make_unique(prometheus_url, 2, callbacks); + if ( ! getenv("ZEEKCTL_CHECK_CONFIG") ) { + try { + prometheus_exposer = std::make_unique(prometheus_url, 2, callbacks); - // CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here - delete callbacks; - } catch ( const CivetException& exc ) { - reporter->FatalError("Failed to setup Prometheus endpoint: %s\n", exc.what()); + // CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here + delete callbacks; + } catch ( const CivetException& exc ) { + reporter->FatalError("Failed to setup Prometheus endpoint: %s\n", exc.what()); + } + + prometheus_exposer->RegisterCollectable(prometheus_registry); } - - prometheus_exposer->RegisterCollectable(prometheus_registry); } #ifdef HAVE_PROCESS_STAT_METRICS From 7a3a2606f0424fdc556394a7a84546ed3b0539c2 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Tue, 4 Jun 2024 21:42:56 -0700 Subject: [PATCH 24/89] Update cmake submodule [nomail] --- cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake b/cmake index b66530ba8e..aeca56db02 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit b66530ba8ec0153ac3e720265191633eea2e0c03 +Subproject commit aeca56db02ef5056ea880ec7fa94463841b4b535 From d549e3d56a7558fe9ffc5c38ab3c21b8d14e94f8 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Thu, 6 Jun 2024 18:32:56 -0700 Subject: [PATCH 25/89] Add Telemetry::metrics_address option --- scripts/base/frameworks/telemetry/options.zeek | 4 ++++ src/telemetry/Manager.cc | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/base/frameworks/telemetry/options.zeek b/scripts/base/frameworks/telemetry/options.zeek index f26d8a0ef0..9e5adf1fbb 100644 --- a/scripts/base/frameworks/telemetry/options.zeek +++ b/scripts/base/frameworks/telemetry/options.zeek @@ -6,6 +6,10 @@ module Telemetry; # to see the options without needing the rest. export { + ## Address used to make metric data available to Prometheus scrapers via + ## HTTP. + const metrics_address = getenv("ZEEK_DEFAULT_LISTEN_ADDRESS") &redef; + ## Port used to make metric data available to Prometheus scrapers via ## HTTP. const metrics_port = 0/unknown &redef; diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index d8343a1a43..1df895dea1 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -35,8 +35,9 @@ void Manager::InitPostScript() { // Metrics port setting is used to calculate a URL for prometheus scraping std::string prometheus_url; auto metrics_port = id::find_val("Telemetry::metrics_port")->AsPortVal(); + auto metrics_address = id::find_val("Telemetry::metrics_address")->AsStringVal()->ToStdString(); if ( metrics_port->Port() != 0 ) - prometheus_url = util::fmt("localhost:%u", metrics_port->Port()); + prometheus_url = util::fmt("%s:%u", metrics_address.data(), metrics_port->Port()); if ( ! prometheus_url.empty() ) { CivetCallbacks* callbacks = nullptr; @@ -77,7 +78,8 @@ void Manager::InitPostScript() { // CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here delete callbacks; } catch ( const CivetException& exc ) { - reporter->FatalError("Failed to setup Prometheus endpoint: %s\n", exc.what()); + reporter->FatalError("Failed to setup Prometheus endpoint: %s. Attempted to bind to %s.", exc.what(), + prometheus_url.c_str()); } prometheus_exposer->RegisterCollectable(prometheus_registry); From 753127be6d30e9ff3759aae28ac9fa6f5a1d7312 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Fri, 7 Jun 2024 11:31:34 -0700 Subject: [PATCH 26/89] Suppress a known data race during civetweb shutdown --- ci/tsan_suppressions.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ci/tsan_suppressions.txt b/ci/tsan_suppressions.txt index ded78d13ac..7490adda94 100644 --- a/ci/tsan_suppressions.txt +++ b/ci/tsan_suppressions.txt @@ -42,3 +42,7 @@ race:zeek::threading::InputMessage::Object mutex:zeek::threading::Queue::Put mutex:zeek::threading::Queue::LocksForAllQueues deadlock:zeek::threading::Queue::LocksForAllQueues + +# This only happens at shutdown. It was supposedly fixed in civetweb, but has cropped +# up again. See https://github.com/civetweb/civetweb/issues/861 for details. +race:mg_stop From d60365349522a91f15e2b2f5f83e290f29ef5bd4 Mon Sep 17 00:00:00 2001 From: zeek-bot Date: Sat, 8 Jun 2024 00:11:59 +0000 Subject: [PATCH 27/89] Update doc submodule [nomail] [skip ci] --- doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc b/doc index 39de6cb8c6..5c377d2320 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit 39de6cb8c602a4ed79942e08bd7c97c1eca9c3ab +Subproject commit 5c377d232043cfcaf23df260880ca1613b19a9f4 From f228cf878a8c06f036bef9f7fceb9ed32028e48c Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Mon, 10 Jun 2024 17:29:58 +0200 Subject: [PATCH 28/89] cirrus: Unset CCACHE_BASEDIR for asan/coverage build When CCACHE_BASEDIR is set, ccache will rewrite absolute paths to relative paths in order to allow compilation in different source directories. We do not need this feature on Cirrus (the checkout is always in /zeek) and using absolute paths avoids confusion/normalization needs for the gcov -p results. We could consider removing the global CCACHE_BASEDIR, but it'd bust the ccache of every other task, too. --- .cirrus.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.cirrus.yml b/.cirrus.yml index 54458bfd38..e159dbb0ac 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -397,6 +397,8 @@ asan_sanitizer_task: CXXFLAGS: -DZEEK_DICT_DEBUG ZEEK_CI_CONFIGURE_FLAGS: *ASAN_SANITIZER_CONFIG ASAN_OPTIONS: detect_leaks=1:detect_odr_violation=0 + # Use absolute paths for coverage files. + CCACHE_BASEDIR: ubsan_sanitizer_task: container: From 8bf3d3c7fcc88eff035adacfbc929d18a0a254ce Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Mon, 10 Jun 2024 20:10:53 +0200 Subject: [PATCH 29/89] Bump cmake for -fprofile-update=atomic usage --- cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake b/cmake index aeca56db02..f07d6ca3af 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit aeca56db02ef5056ea880ec7fa94463841b4b535 +Subproject commit f07d6ca3af3a7d821d1b9369d24aa91e053396a2 From 20eeb6dbf61e659679173cf91a67002773788dd8 Mon Sep 17 00:00:00 2001 From: Benjamin Bannier Date: Tue, 11 Jun 2024 14:48:35 +0200 Subject: [PATCH 30/89] Drop EOL centos8-stream in CI --- .cirrus.yml | 8 -------- ci/centos-stream-8/Dockerfile | 34 ---------------------------------- 2 files changed, 42 deletions(-) delete mode 100644 ci/centos-stream-8/Dockerfile diff --git a/.cirrus.yml b/.cirrus.yml index e159dbb0ac..2352701c91 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -188,14 +188,6 @@ centosstream9_task: << : *RESOURCES_TEMPLATE << : *CI_TEMPLATE -centosstream8_task: - container: - # Stream 8 EOL: May 31, 2024 - dockerfile: ci/centos-stream-8/Dockerfile - << : *RESOURCES_TEMPLATE - << : *CI_TEMPLATE - << : *SKIP_TASK_ON_PR - debian12_task: container: # Debian 12 (bookworm) EOL: TBD diff --git a/ci/centos-stream-8/Dockerfile b/ci/centos-stream-8/Dockerfile deleted file mode 100644 index d8e7322c11..0000000000 --- a/ci/centos-stream-8/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM quay.io/centos/centos:stream8 - -# A version field to invalidate Cirrus's build cache when needed, as suggested in -# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822 -ENV DOCKERFILE_VERSION 20230801 - -RUN dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -RUN dnf config-manager --set-enabled powertools - -RUN dnf -y install \ - bison \ - ccache \ - cmake \ - diffutils \ - flex \ - gcc \ - gcc-c++ \ - git \ - jq \ - libpcap-devel \ - make \ - openssl \ - openssl-devel \ - procps-ng \ - python38 \ - python38-devel \ - python38-pip\ - sqlite \ - swig \ - which \ - zlib-devel \ - && dnf clean all && rm -rf /var/cache/dnf - -RUN pip3 install websockets junit2html From 9ad77ea9dab06161ea0a3658338295c72191cae4 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Mon, 10 Jun 2024 09:21:28 +0000 Subject: [PATCH 31/89] Update zeekctl submodule --- auxil/zeekctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/zeekctl b/auxil/zeekctl index 614228f93b..0dd71f37ec 160000 --- a/auxil/zeekctl +++ b/auxil/zeekctl @@ -1 +1 @@ -Subproject commit 614228f93bec4a991e3aa50055b70a0644781607 +Subproject commit 0dd71f37ec0120d8440d08451564070aacdda0cc From 9e95ef7f0f2e2b7a2f70820ff0dfcf0713b57b6b Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Mon, 10 Jun 2024 09:26:24 +0000 Subject: [PATCH 32/89] NEWS: Add entry about FileExtractDir --- NEWS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/NEWS b/NEWS index f5ae0c5de3..deedb7b927 100644 --- a/NEWS +++ b/NEWS @@ -51,6 +51,12 @@ Breaking Changes instruments are not. ``Histogram`` instruments don't have the concept of summing. +- Zeekctl now sets `FileExtract::prefix` to `spool/extract_files/` to avoid + deletion of extracted files when stopping worker nodes. To revert to the + previous behavior, set `FileExtractDir` to an empty string in `zeekctl.cfg`. + + If you never enabled Zeek's file extraction functionality, there's no impact. + New Functionality ----------------- From 956e147f708bd865b3215b8471c22e10bce3de84 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 14 Jun 2024 10:55:16 +0200 Subject: [PATCH 33/89] Bump Spicy. --- auxil/spicy | 2 +- src/analyzer/protocol/ldap/ldap.evt | 6 ++-- src/analyzer/protocol/ldap/ldap.spicy | 16 +++++----- src/analyzer/protocol/quic/QUIC.evt | 2 +- src/analyzer/protocol/quic/QUIC.spicy | 30 +++++++++---------- testing/btest/spicy/double-types.zeek | 6 ++-- testing/btest/spicy/export-protocol-enum.zeek | 2 +- testing/btest/spicy/multiple-enum.zeek | 6 ++-- 8 files changed, 35 insertions(+), 35 deletions(-) diff --git a/auxil/spicy b/auxil/spicy index 83bc845b8d..5ff0cfea39 160000 --- a/auxil/spicy +++ b/auxil/spicy @@ -1 +1 @@ -Subproject commit 83bc845b8daf82fa22b783261d4c339627d55c09 +Subproject commit 5ff0cfea39ece44d1ef94f9762926b4bb4138d58 diff --git a/src/analyzer/protocol/ldap/ldap.evt b/src/analyzer/protocol/ldap/ldap.evt index 369a21d331..96baef6f98 100644 --- a/src/analyzer/protocol/ldap/ldap.evt +++ b/src/analyzer/protocol/ldap/ldap.evt @@ -14,9 +14,9 @@ import LDAP; on LDAP::Message -> event LDAP::message($conn, self.messageID, self.opcode, - self.result.code, - self.result.matchedDN, - self.result.diagnosticMessage, + self.result_.code, + self.result_.matchedDN, + self.result_.diagnosticMessage, self.obj, self.arg); diff --git a/src/analyzer/protocol/ldap/ldap.spicy b/src/analyzer/protocol/ldap/ldap.spicy index 7c60d1ec19..8d74b85237 100644 --- a/src/analyzer/protocol/ldap/ldap.spicy +++ b/src/analyzer/protocol/ldap/ldap.spicy @@ -223,7 +223,7 @@ public type Message = unit { var opcode: ProtocolOpcode = ProtocolOpcode::Undef; var applicationBytes: bytes; var unsetResultDefault: Result; - var result: Result& = self.unsetResultDefault; + var result_: Result& = self.unsetResultDefault; var obj: string = ""; var arg: string = ""; var success: bool = False; @@ -328,7 +328,7 @@ type BindRequest = unit(inout message: Message) { type BindResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } # TODO: if we want to parse SASL credentials returned @@ -777,7 +777,7 @@ type SearchResultEntry = unit(inout message: Message) { type SearchResultDone = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -800,7 +800,7 @@ type ModifyRequest = unit(inout message: Message) { type ModifyResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -816,7 +816,7 @@ type ModifyResponse = unit(inout message: Message) { type AddResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -832,7 +832,7 @@ type DelRequest = unit(inout message: Message) { type DelResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -847,7 +847,7 @@ type DelResponse = unit(inout message: Message) { type ModDNResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -862,7 +862,7 @@ type ModDNResponse = unit(inout message: Message) { type CompareResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; diff --git a/src/analyzer/protocol/quic/QUIC.evt b/src/analyzer/protocol/quic/QUIC.evt index b81b0084db..a985acf519 100644 --- a/src/analyzer/protocol/quic/QUIC.evt +++ b/src/analyzer/protocol/quic/QUIC.evt @@ -19,6 +19,6 @@ on QUIC::HandshakePacket -> event QUIC::handshake_packet($conn, $is_orig, self.h on QUIC::ZeroRTTPacket -> event QUIC::zero_rtt_packet($conn, $is_orig, self.header.version, self.header.dest_conn_id, self.header.src_conn_id); on QUIC::ConnectionClosePayload -> event QUIC::connection_close_frame($conn, $is_orig, self.header.version, self.header.dest_conn_id, self.header.src_conn_id, - self.error_code.result, self.reason_phrase); + self.error_code.result_, self.reason_phrase); on QUIC::UnhandledVersion -> event QUIC::unhandled_version($conn, $is_orig, self.header.version, self.header.dest_conn_id, self.header.src_conn_id); diff --git a/src/analyzer/protocol/quic/QUIC.spicy b/src/analyzer/protocol/quic/QUIC.spicy index fb2f854a3a..bde1650ee2 100644 --- a/src/analyzer/protocol/quic/QUIC.spicy +++ b/src/analyzer/protocol/quic/QUIC.spicy @@ -157,7 +157,7 @@ type FrameType = enum { type VariableLengthInteger = unit { var bytes_to_parse: uint64; - var result: uint64; + var result_: uint64; # Value of the two most significant bits indicates number of bytes # to parse for the variable length integer. @@ -166,11 +166,11 @@ type VariableLengthInteger = unit { # Section 16 and Appendix A : uint8 { self.bytes_to_parse = 2**((0xC0 & $$) >> 6); - self.result = $$ & 0x3F; + self.result_ = $$ & 0x3F; } : uint8[self.bytes_to_parse - 1] if (self.bytes_to_parse > 1) foreach { - self.result = (self.result << 8) | $$; + self.result_ = (self.result_ << 8) | $$; } }; @@ -185,8 +185,8 @@ public type LongHeaderPacketV1 = unit(inout outer: LongHeaderPacket) { outer.encrypted_offset = outer.offset() + self.initial_hdr.length.bytes_to_parse + self.initial_hdr.token_length.bytes_to_parse + - self.initial_hdr.token_length.result; - outer.payload_length = self.initial_hdr.length.result; + self.initial_hdr.token_length.result_; + outer.payload_length = self.initial_hdr.length.result_; } LongPacketTypeV1::ZERO_RTT -> zerortt_hdr : ZeroRTTPacket(outer); @@ -204,8 +204,8 @@ public type LongHeaderPacketV2 = unit(inout outer: LongHeaderPacket) { outer.encrypted_offset = outer.offset() + self.initial_hdr.length.bytes_to_parse + self.initial_hdr.token_length.bytes_to_parse + - self.initial_hdr.token_length.result; - outer.payload_length = self.initial_hdr.length.result; + self.initial_hdr.token_length.result_; + outer.payload_length = self.initial_hdr.length.result_; } LongPacketTypeV2::ZERO_RTT -> zerortt_hdr : ZeroRTTPacket(outer); @@ -281,7 +281,7 @@ public type Frame = unit(header: LongHeaderPacket, from_client: bool, crypto_sin FrameType::ACK2 -> b: ACKPayload; FrameType::CRYPTO -> c: CRYPTOPayload(from_client) { # Have the sink re-assemble potentially out-of-order cryptodata - crypto_sink.write(self.c.cryptodata, self.c.offset.result); + crypto_sink.write(self.c.cryptodata, self.c.offset.result_); } FrameType::CONNECTION_CLOSE1 -> : ConnectionClosePayload(header); FrameType::PADDING -> : skip /\x00*/; # eat the padding @@ -295,7 +295,7 @@ public type Frame = unit(header: LongHeaderPacket, from_client: bool, crypto_sin type CRYPTOPayload = unit(from_client: bool) { offset: VariableLengthInteger; length: VariableLengthInteger; - cryptodata: bytes &size=self.length.result; + cryptodata: bytes &size=self.length.result_; }; type ACKPayload = unit { @@ -313,7 +313,7 @@ type ConnectionClosePayload = unit(header: LongHeaderPacket) { -> frame_type: VariableLengthInteger; }; reason_phrase_length: VariableLengthInteger; - reason_phrase: bytes &size=self.reason_phrase_length.result; + reason_phrase: bytes &size=self.reason_phrase_length.result_; }; @@ -326,7 +326,7 @@ type ConnectionClosePayload = unit(header: LongHeaderPacket) { type InitialPacket = unit(header: LongHeaderPacket) { var header: LongHeaderPacket = header; token_length: VariableLengthInteger; - token: bytes &size=self.token_length.result; + token: bytes &size=self.token_length.result_; # 5.4.2. Header Protection Sample # @@ -336,25 +336,25 @@ type InitialPacket = unit(header: LongHeaderPacket) { # # Enforce 4 bytes Packet Number length + 16 bytes sample # ciphertext available. - length: VariableLengthInteger &requires=self.length.result >= 20; + length: VariableLengthInteger &requires=self.length.result_ >= 20; # Consume the remainder of payload. This # includes the packet number field, but we # do not know its length yet. We need the # payload for sampling, however. - payload: skip bytes &size=self.length.result; + payload: skip bytes &size=self.length.result_; }; type ZeroRTTPacket = unit(header: LongHeaderPacket) { var header: LongHeaderPacket = header; length: VariableLengthInteger; - payload: skip bytes &size=self.length.result; + payload: skip bytes &size=self.length.result_; }; type HandshakePacket = unit(header: LongHeaderPacket) { var header: LongHeaderPacket = header; length: VariableLengthInteger; - payload: skip bytes &size=self.length.result; + payload: skip bytes &size=self.length.result_; }; diff --git a/testing/btest/spicy/double-types.zeek b/testing/btest/spicy/double-types.zeek index a67b0c5ef5..08fd1f178b 100644 --- a/testing/btest/spicy/double-types.zeek +++ b/testing/btest/spicy/double-types.zeek @@ -32,7 +32,7 @@ protocol analyzer spicy::dtest over TCP: on dtest::Message -> event dtest_message(self.func); -on dtest::Message -> event dtest_result(self.sub.result); +on dtest::Message -> event dtest_result(self.sub.result_); on dtest::Message -> event dtest_result_tuple(dtest::bro_result(self)); @@ -56,11 +56,11 @@ public type Message = unit { }; public type SubMessage = unit { - result: uint8 &convert=RESULT($$); + result_: uint8 &convert=RESULT($$); }; public function bro_result(entry: Message) : tuple { - return (entry.func, entry.sub.result); + return (entry.func, entry.sub.result_); } # @TEST-END-FILE diff --git a/testing/btest/spicy/export-protocol-enum.zeek b/testing/btest/spicy/export-protocol-enum.zeek index a021dccbd5..532d3da8de 100644 --- a/testing/btest/spicy/export-protocol-enum.zeek +++ b/testing/btest/spicy/export-protocol-enum.zeek @@ -41,7 +41,7 @@ import spicy; public type Message = unit { sswitch: uint8; - result: uint8; + result_: uint8; var p_tcp: spicy::Protocol = spicy::Protocol::TCP; var p_udp: spicy::Protocol = spicy::Protocol::UDP; diff --git a/testing/btest/spicy/multiple-enum.zeek b/testing/btest/spicy/multiple-enum.zeek index ff6508d715..e077db70fe 100644 --- a/testing/btest/spicy/multiple-enum.zeek +++ b/testing/btest/spicy/multiple-enum.zeek @@ -22,10 +22,10 @@ protocol analyzer spicy::dtest over TCP: parse originator with dtest::Message; on dtest::Message if ( self.sswitch == 83 ) - -> event dtest_one(self.result); + -> event dtest_one(self.result_); on dtest::Message if ( self.sswitch != 83 ) - -> event dtest_two(self.result); + -> event dtest_two(self.result_); # @TEST-END-FILE # @TEST-START-FILE dtest.spicy @@ -38,7 +38,7 @@ public type RESULT = enum { public type Message = unit { sswitch: uint8; - result: uint8 &convert=RESULT($$); + result_: uint8 &convert=RESULT($$); }; # @TEST-END-FILE From 4318d5ab9e65f8e5bcd8b2c7df0b18feb2980e19 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 14 Jun 2024 09:55:39 +0200 Subject: [PATCH 34/89] Spicy: Disallow repeating replacements of the same analyzer. We now reject EVT files that attempt to replace the same built-in analyzer multiple times as doing so would be ill-defined and not very intuitive in what exactly it means. Closes #3783. --- src/spicy/manager.cc | 33 ++++++++++++--- .../spicy.replaces-conflicts-2/output | 2 + .../spicy.replaces-conflicts-3/output | 2 + .../Baseline/spicy.replaces-conflicts/output | 2 + testing/btest/spicy/replaces-conflicts.evt | 42 +++++++++++++++++++ 5 files changed, 75 insertions(+), 6 deletions(-) create mode 100644 testing/btest/Baseline/spicy.replaces-conflicts-2/output create mode 100644 testing/btest/Baseline/spicy.replaces-conflicts-3/output create mode 100644 testing/btest/Baseline/spicy.replaces-conflicts/output create mode 100644 testing/btest/spicy/replaces-conflicts.evt diff --git a/src/spicy/manager.cc b/src/spicy/manager.cc index 9ad40b10ad..1a9420e22a 100644 --- a/src/spicy/manager.cc +++ b/src/spicy/manager.cc @@ -897,14 +897,21 @@ void Manager::disableReplacedAnalyzers() { if ( file_mgr->Lookup(replaces, false) || packet_mgr->Lookup(replaces, false) ) reporter->FatalError("cannot replace '%s' analyzer with a protocol analyzer", replaces); - auto tag = analyzer_mgr->GetAnalyzerTag(replaces); - if ( ! tag ) { + auto component = analyzer_mgr->Lookup(replaces, false); + if ( ! component ) { SPICY_DEBUG(hilti::rt::fmt("%s is supposed to replace protocol analyzer %s, but that does not exist", info.name_analyzer, replaces)); continue; } + auto tag = component->Tag(); + if ( analyzer_mgr->HasComponentMapping(tag) ) + reporter->FatalError( + "%s: protocol analyzer %s is already mapped to a different analyzer; cannot replace an analyzer " + "multiple times", + info.name_analyzer.c_str(), component->Name().c_str()); + SPICY_DEBUG(hilti::rt::fmt("%s replaces existing protocol analyzer %s", info.name_analyzer, replaces)); info.replaces = tag; analyzer_mgr->DisableAnalyzer(tag); @@ -928,10 +935,17 @@ void Manager::disableReplacedAnalyzers() { continue; } + auto tag = component->Tag(); + if ( file_mgr->HasComponentMapping(tag) ) + reporter->FatalError( + "%s: file analyzer %s is already mapped to a different analyzer; cannot replace an analyzer multiple " + "times", + info.name_analyzer.c_str(), component->Name().c_str()); + SPICY_DEBUG(hilti::rt::fmt("%s replaces existing file analyzer %s", info.name_analyzer, replaces)); - info.replaces = component->Tag(); + info.replaces = tag; component->SetEnabled(false); - file_mgr->AddComponentMapping(component->Tag(), info.tag); + file_mgr->AddComponentMapping(tag, info.tag); } for ( auto& info : _packet_analyzers_by_type ) { @@ -948,10 +962,17 @@ void Manager::disableReplacedAnalyzers() { continue; } + auto tag = component->Tag(); + if ( packet_mgr->HasComponentMapping(tag) ) + reporter->FatalError( + "%s: packet analyzer %s is already mapped to a different analyzer; cannot replace an analyzer multiple " + "times", + info.name_analyzer.c_str(), component->Name().c_str()); + SPICY_DEBUG(hilti::rt::fmt("%s replaces existing packet analyzer %s", info.name_analyzer, replaces)); - info.replaces = component->Tag(); + info.replaces = tag; component->SetEnabled(false); - packet_mgr->AddComponentMapping(component->Tag(), info.tag); + packet_mgr->AddComponentMapping(tag, info.tag); } } diff --git a/testing/btest/Baseline/spicy.replaces-conflicts-2/output b/testing/btest/Baseline/spicy.replaces-conflicts-2/output new file mode 100644 index 0000000000..b4f454e6e7 --- /dev/null +++ b/testing/btest/Baseline/spicy.replaces-conflicts-2/output @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +fatal error: spicy::SSH_2: file analyzer MD5 is already mapped to a different analyzer; cannot replace an analyzer multiple times diff --git a/testing/btest/Baseline/spicy.replaces-conflicts-3/output b/testing/btest/Baseline/spicy.replaces-conflicts-3/output new file mode 100644 index 0000000000..0733458438 --- /dev/null +++ b/testing/btest/Baseline/spicy.replaces-conflicts-3/output @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +fatal error: spicy::SSH_2: packet analyzer Ethernet is already mapped to a different analyzer; cannot replace an analyzer multiple times diff --git a/testing/btest/Baseline/spicy.replaces-conflicts/output b/testing/btest/Baseline/spicy.replaces-conflicts/output new file mode 100644 index 0000000000..8cf95b5195 --- /dev/null +++ b/testing/btest/Baseline/spicy.replaces-conflicts/output @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +fatal error: redefinition of protocol analyzer spicy::SSH_1 diff --git a/testing/btest/spicy/replaces-conflicts.evt b/testing/btest/spicy/replaces-conflicts.evt new file mode 100644 index 0000000000..dbca6d637e --- /dev/null +++ b/testing/btest/spicy/replaces-conflicts.evt @@ -0,0 +1,42 @@ +# @TEST-REQUIRES: have-spicy +# +# @TEST-EXEC: spicyz -d -o ssh.hlto ssh.spicy %INPUT +# @TEST-EXEC-FAIL: zeek ssh.hlto >output 2>&1 +# @TEST-EXEC: btest-diff output + +# @TEST-START-FILE ssh.spicy +module SSH; + +import zeek; + +public type Banner = unit {}; +# @TEST-END-FILE + +protocol analyzer spicy::SSH_1 over TCP: + parse with SSH::Banner, + replaces SSH; + +protocol analyzer spicy::SSH_1 over UDP: + parse with SSH::Banner, + replaces SSH; + +# @TEST-START-NEXT + +file analyzer spicy::SSH_1: + parse with SSH::Banner, + replaces MD5; + +file analyzer spicy::SSH_2: + parse with SSH::Banner, + replaces MD5; + +# @TEST-START-NEXT + +packet analyzer spicy::SSH_1: + parse with SSH::Banner, + replaces Ethernet; + +packet analyzer spicy::SSH_2: + parse with SSH::Banner, + replaces Ethernet; + From 59d0f311a542fd5db71c91f6c99ec146ed29962e Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Fri, 24 May 2024 15:04:02 -0700 Subject: [PATCH 35/89] CI: drop Fedora 38, add 40 --- .cirrus.yml | 14 +++++++------- ci/{fedora-38 => fedora-40}/Dockerfile | 6 ++---- 2 files changed, 9 insertions(+), 11 deletions(-) rename ci/{fedora-38 => fedora-40}/Dockerfile (88%) diff --git a/.cirrus.yml b/.cirrus.yml index 2352701c91..ab50a7bc73 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -166,19 +166,19 @@ env: # Linux EOL timelines: https://linuxlifecycle.com/ # Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle +fedora40_task: + container: + # Fedora 40 EOL: Around May 2025 + dockerfile: ci/fedora-40/Dockerfile + << : *RESOURCES_TEMPLATE + << : *CI_TEMPLATE + fedora39_task: container: # Fedora 39 EOL: Around Nov 2024 dockerfile: ci/fedora-39/Dockerfile << : *RESOURCES_TEMPLATE << : *CI_TEMPLATE - -fedora38_task: - container: - # Fedora 38 EOL: Around May 2024 - dockerfile: ci/fedora-38/Dockerfile - << : *RESOURCES_TEMPLATE - << : *CI_TEMPLATE << : *SKIP_TASK_ON_PR centosstream9_task: diff --git a/ci/fedora-38/Dockerfile b/ci/fedora-40/Dockerfile similarity index 88% rename from ci/fedora-38/Dockerfile rename to ci/fedora-40/Dockerfile index 5ed4573ac6..f292e11ece 100644 --- a/ci/fedora-38/Dockerfile +++ b/ci/fedora-40/Dockerfile @@ -1,15 +1,14 @@ -FROM fedora:38 +FROM fedora:40 # A version field to invalidate Cirrus's build cache when needed, as suggested in # https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822 -ENV DOCKERFILE_VERSION 20230801 +ENV DOCKERFILE_VERSION 20240617 RUN dnf -y install \ bison \ ccache \ cmake \ diffutils \ - findutils \ flex \ gcc \ gcc-c++ \ @@ -21,7 +20,6 @@ RUN dnf -y install \ openssl \ openssl-devel \ procps-ng \ - python3 \ python3-devel \ python3-pip\ sqlite \ From 5af23757fa0196934621b1afbe50d7fb5b7c67ff Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Tue, 28 May 2024 23:13:22 -0700 Subject: [PATCH 36/89] Bump zeek-3rdparty to pull in sqlite move to 3.46 This avoids a compiler warning/error on Fedora 40. --- src/3rdparty | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/3rdparty b/src/3rdparty index 4c5985ca87..4c8fca119a 160000 --- a/src/3rdparty +++ b/src/3rdparty @@ -1 +1 @@ -Subproject commit 4c5985ca8743d33927943a58e2cc2b74d7b05790 +Subproject commit 4c8fca119aa2a847687ac215356a920312278a3e From 003d2d14688586c3a24b7cc4a0c6a7a40eb84425 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Tue, 18 Jun 2024 10:00:38 +0200 Subject: [PATCH 37/89] CMakeLists: Disable -Werror for 3rdparty/sqlite3.c We package vanilla sqlite from upstream and on Fedora 40 with sqlite 3.46 there's the following compiler warning: In function 'sqlite3Strlen30', inlined from 'sqlite3ColumnSetColl' at ../../src/3rdparty/sqlite3.c:122105:10: ../../src/3rdparty/sqlite3.c:35003:28: error: 'strlen' reading 1 or more bytes from a region of size 0 [-Werror=stringop-overread] 35003 | return 0x3fffffff & (int)strlen(z); | ^~~~~~~~~ In function 'sqlite3ColumnSetColl': Disabling -Werror on sqlite3.c seems sensible given we have little control over that code. --- src/CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index bb075c4d35..c7ae4f183c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -45,11 +45,13 @@ if (MSVC) # TODO: This is disabled for now because there a bunch of known # compiler warnings on Windows that we don't have good fixes for. #set(WERROR_FLAG "/WX") + #set(WNOERROR_FLAG "/WX:NO") endif () else () set(SIGN_COMPARE_FLAG "-Wno-sign-compare") if (BUILD_WITH_WERROR) set(WERROR_FLAG "-Werror") + set(WNOERROR_FLAG "-Wno-error") endif () endif () @@ -445,6 +447,10 @@ set(THIRD_PARTY_SRCS $<$:3rdparty/sqlite3.c> 3rdparty/strsep.c) +if (USE_SQLITE AND WNOERROR_FLAG) + set_source_files_properties(3rdparty/sqlite3.c PROPERTIES COMPILE_FLAGS ${WNOERROR_FLAG}) +endif () + # Highwayhash. Highwayhash is a bit special since it has architecture dependent # code... set(hhash_dir ${PROJECT_SOURCE_DIR}/auxil/highwayhash/highwayhash) From 751c35b476f5ac4cd56de7c819b0df39ed11999c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 18 Jun 2024 12:40:37 +0200 Subject: [PATCH 38/89] Spicy: Extend exception hierarchy. We move the current `TypeMismatch` into a new `ParameterMismatch` exception that's derived from a more general `TypeMismatch` now that can also be used for other, non-parameter mismatches. --- src/spicy/runtime-support.h | 81 ++++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 37 deletions(-) diff --git a/src/spicy/runtime-support.h b/src/spicy/runtime-support.h index f2a122b0c9..cae3fa322c 100644 --- a/src/spicy/runtime-support.h +++ b/src/spicy/runtime-support.h @@ -59,15 +59,22 @@ public: }; /** - * Exception thrown by event generation code if there's a type mismatch - * between the Spicy-side value and what the Zeek event expects. + * Exception thrown if there's a type mismatch between Spicy and Zeek side. */ class TypeMismatch : public UsageError { + using UsageError::UsageError; +}; + +/** + * Exception thrown by event generation code if there's a type mismatch between + * a Spicy-side parameter value and what the Zeek event expects. + */ +class ParameterMismatch : public TypeMismatch { public: - TypeMismatch(const std::string_view& msg, std::string_view location = "") - : UsageError(hilti::rt::fmt("Event parameter mismatch, %s", msg)) {} - TypeMismatch(const std::string_view& have, const TypePtr& want, std::string_view location = "") - : TypeMismatch(_fmt(have, want)) {} + ParameterMismatch(const std::string_view& msg, std::string_view location = "") + : TypeMismatch(hilti::rt::fmt("Event parameter mismatch, %s", msg)) {} + ParameterMismatch(const std::string_view& have, const TypePtr& want, std::string_view location = "") + : ParameterMismatch(_fmt(have, want)) {} private: std::string _fmt(const std::string_view& have, const TypePtr& want) { @@ -538,7 +545,7 @@ inline ValPtr to_val(const hilti::rt::DeferredExpression& t, const TypePtr */ inline ValPtr to_val(const std::string& s, const TypePtr& target) { if ( target->Tag() != TYPE_STRING ) - throw TypeMismatch("string", target); + throw ParameterMismatch("string", target); return make_intrusive(s); } @@ -549,7 +556,7 @@ inline ValPtr to_val(const std::string& s, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Bytes& b, const TypePtr& target) { if ( target->Tag() != TYPE_STRING ) - throw TypeMismatch("string", target); + throw ParameterMismatch("string", target); return make_intrusive(b.str()); } @@ -568,7 +575,7 @@ inline ValPtr to_val(hilti::rt::integer::safe i, const TypePtr& target) { if ( target->Tag() == TYPE_INT ) return val_mgr->Int(i); - throw TypeMismatch("uint64", target); + throw ParameterMismatch("uint64", target); } else { if ( target->Tag() == TYPE_INT ) @@ -578,10 +585,10 @@ inline ValPtr to_val(hilti::rt::integer::safe i, const TypePtr& target) { if ( i >= 0 ) return val_mgr->Count(i); else - throw TypeMismatch("negative int64", target); + throw ParameterMismatch("negative int64", target); } - throw TypeMismatch("int64", target); + throw ParameterMismatch("int64", target); } } @@ -599,7 +606,7 @@ ValPtr to_val(const hilti::rt::ValueReference& t, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Bool& b, const TypePtr& target) { if ( target->Tag() != TYPE_BOOL ) - throw TypeMismatch("bool", target); + throw ParameterMismatch("bool", target); return val_mgr->Bool(b); } @@ -610,7 +617,7 @@ inline ValPtr to_val(const hilti::rt::Bool& b, const TypePtr& target) { */ inline ValPtr to_val(double r, const TypePtr& target) { if ( target->Tag() != TYPE_DOUBLE ) - throw TypeMismatch("double", target); + throw ParameterMismatch("double", target); return make_intrusive(r); } @@ -621,7 +628,7 @@ inline ValPtr to_val(double r, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Address& d, const TypePtr& target) { if ( target->Tag() != TYPE_ADDR ) - throw TypeMismatch("addr", target); + throw ParameterMismatch("addr", target); auto in_addr = d.asInAddr(); if ( auto v4 = std::get_if(&in_addr) ) @@ -638,7 +645,7 @@ inline ValPtr to_val(const hilti::rt::Address& d, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Port& p, const TypePtr& target) { if ( target->Tag() != TYPE_PORT ) - throw TypeMismatch("port", target); + throw ParameterMismatch("port", target); switch ( p.protocol().value() ) { case hilti::rt::Protocol::TCP: return val_mgr->Port(p.port(), ::TransportProto::TRANSPORT_TCP); @@ -657,7 +664,7 @@ inline ValPtr to_val(const hilti::rt::Port& p, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Interval& i, const TypePtr& target) { if ( target->Tag() != TYPE_INTERVAL ) - throw TypeMismatch("interval", target); + throw ParameterMismatch("interval", target); return make_intrusive(i.seconds()); } @@ -668,7 +675,7 @@ inline ValPtr to_val(const hilti::rt::Interval& i, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Time& t, const TypePtr& target) { if ( target->Tag() != TYPE_TIME ) - throw TypeMismatch("time", target); + throw ParameterMismatch("time", target); return make_intrusive(t.seconds()); } @@ -680,7 +687,7 @@ inline ValPtr to_val(const hilti::rt::Time& t, const TypePtr& target) { template inline ValPtr to_val(const hilti::rt::Vector& v, const TypePtr& target) { if ( target->Tag() != TYPE_VECTOR && target->Tag() != TYPE_LIST ) - throw TypeMismatch("expected vector or list", target); + throw ParameterMismatch("expected vector or list", target); auto vt = cast_intrusive(target); auto zv = make_intrusive(vt); @@ -697,17 +704,17 @@ inline ValPtr to_val(const hilti::rt::Vector& v, const TypePtr& target) { template inline ValPtr to_val(const hilti::rt::Map& m, const TypePtr& target) { if constexpr ( hilti::rt::is_tuple::value ) - throw TypeMismatch("internal error: sets with tuples not yet supported in to_val()"); + throw ParameterMismatch("internal error: sets with tuples not yet supported in to_val()"); if ( target->Tag() != TYPE_TABLE ) - throw TypeMismatch("map", target); + throw ParameterMismatch("map", target); auto tt = cast_intrusive(target); if ( tt->IsSet() ) - throw TypeMismatch("map", target); + throw ParameterMismatch("map", target); if ( tt->GetIndexTypes().size() != 1 ) - throw TypeMismatch("map with non-tuple elements", target); + throw ParameterMismatch("map with non-tuple elements", target); auto zv = make_intrusive(tt); @@ -727,20 +734,20 @@ inline ValPtr to_val(const hilti::rt::Map& m, const TypePtr& target) { template inline ValPtr to_val(const hilti::rt::Set& s, const TypePtr& target) { if ( target->Tag() != TYPE_TABLE ) - throw TypeMismatch("set", target); + throw ParameterMismatch("set", target); auto tt = cast_intrusive(target); if ( ! tt->IsSet() ) - throw TypeMismatch("set", target); + throw ParameterMismatch("set", target); auto zv = make_intrusive(tt); for ( const auto& i : s ) { if constexpr ( hilti::rt::is_tuple::value ) - throw TypeMismatch("internal error: sets with tuples not yet supported in to_val()"); + throw ParameterMismatch("internal error: sets with tuples not yet supported in to_val()"); else { if ( tt->GetIndexTypes().size() != 1 ) - throw TypeMismatch("set with non-tuple elements", target); + throw ParameterMismatch("set with non-tuple elements", target); auto idx = to_val(i, tt->GetIndexTypes()[0]); zv->Assign(std::move(idx), nullptr); @@ -821,7 +828,7 @@ inline void set_record_field(RecordVal* rval, const IntrusivePtr& rt // Field must be &optional or &default. if ( auto attrs = rtype->FieldDecl(idx)->attrs; ! attrs || ! (attrs->Find(detail::ATTR_DEFAULT) || attrs->Find(detail::ATTR_OPTIONAL)) ) - throw TypeMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx))); + throw ParameterMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx))); } } } @@ -833,12 +840,12 @@ inline void set_record_field(RecordVal* rval, const IntrusivePtr& rt template::value>*> inline ValPtr to_val(const T& t, const TypePtr& target) { if ( target->Tag() != TYPE_RECORD ) - throw TypeMismatch("tuple", target); + throw ParameterMismatch("tuple", target); auto rtype = cast_intrusive(target); if ( std::tuple_size::value != rtype->NumFields() ) - throw TypeMismatch("tuple", target); + throw ParameterMismatch("tuple", target); auto rval = make_intrusive(rtype); size_t idx = 0; @@ -856,12 +863,12 @@ inline ValPtr to_val(const hilti::rt::Bitfield& v, const TypePtr& target) using Bitfield = hilti::rt::Bitfield; if ( target->Tag() != TYPE_RECORD ) - throw TypeMismatch("bitfield", target); + throw ParameterMismatch("bitfield", target); auto rtype = cast_intrusive(target); if ( sizeof...(Ts) - 1 != rtype->NumFields() ) - throw TypeMismatch("bitfield", target); + throw ParameterMismatch("bitfield", target); auto rval = make_intrusive(rtype); size_t idx = 0; @@ -887,7 +894,7 @@ constexpr bool is_optional = is_optional_impl::value>*> inline ValPtr to_val(const T& t, const TypePtr& target) { if ( target->Tag() != TYPE_RECORD ) - throw TypeMismatch("struct", target); + throw ParameterMismatch("struct", target); auto rtype = cast_intrusive(target); @@ -898,7 +905,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { t.__visit([&](std::string_view name, const auto& val) { if ( idx >= num_fields ) - throw TypeMismatch(hilti::rt::fmt("no matching record field for field '%s'", name)); + throw ParameterMismatch(hilti::rt::fmt("no matching record field for field '%s'", name)); // Special-case: Lift up anonymous bitfields (which always come as std::optionals). if ( name == "" ) { @@ -924,7 +931,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { std::string field_name = rtype->FieldName(idx); if ( field_name != name ) - throw TypeMismatch( + throw ParameterMismatch( hilti::rt::fmt("mismatch in field name: expected '%s', found '%s'", name, field_name)); set_record_field(rval.get(), rtype, idx++, val); @@ -934,7 +941,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { // We already check above that all Spicy-side fields are mapped so we // can only hit this if there are uninitialized Zeek-side fields left. if ( idx != num_fields ) - throw TypeMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx + 1))); + throw ParameterMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx + 1))); return rval; } @@ -959,7 +966,7 @@ inline ValPtr to_val_for_transport_proto(int64_t val, const TypePtr& target) { template::value>*> inline ValPtr to_val(const T& t, const TypePtr& target) { if ( target->Tag() != TYPE_ENUM ) - throw TypeMismatch("enum", target); + throw ParameterMismatch("enum", target); // We'll usually be getting an int64_t for T, but allow other signed ints // as well. @@ -969,7 +976,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { // Special case: map enum values to Zeek's semantics. if ( target->GetName() == "transport_proto" ) { if ( ! std::is_same_v ) - throw TypeMismatch(hilti::rt::demangle(typeid(t).name()), target); + throw ParameterMismatch(hilti::rt::demangle(typeid(t).name()), target); return to_val_for_transport_proto(it, target); } From 93dd9d67977f9d75feeca533f9cfafb378e09f4e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 18 Jun 2024 13:00:55 +0200 Subject: [PATCH 39/89] Spicy: Reformat `zeek.spicy` with `spicy-format`. --- scripts/spicy/zeek.spicy | 51 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/scripts/spicy/zeek.spicy b/scripts/spicy/zeek.spicy index a6f3b1f344..4af1621a59 100644 --- a/scripts/spicy/zeek.spicy +++ b/scripts/spicy/zeek.spicy @@ -12,13 +12,13 @@ import spicy; ## ## This function has been deprecated and will be removed. Use ``spicy::accept_input`` ## instead, which will have the same effect with Zeek. -public function confirm_protocol() : void &cxxname="zeek::spicy::rt::confirm_protocol"; +public function confirm_protocol(): void &cxxname="zeek::spicy::rt::confirm_protocol"; ## [Deprecated] Triggers a DPD protocol violation for the current connection. ## ## This function has been deprecated and will be removed. Use ``spicy::decline_input`` ## instead, which will have the same effect with Zeek. -public function reject_protocol(reason: string) : void &cxxname="zeek::spicy::rt::reject_protocol"; +public function reject_protocol(reason: string): void &cxxname="zeek::spicy::rt::reject_protocol"; ## Reports a "weird" to Zeek. This should be used with similar semantics as in ## Zeek: something quite unexpected happening at the protocol level, which however @@ -31,19 +31,19 @@ public function reject_protocol(reason: string) : void &cxxname="zeek::spicy::rt public function weird(id: string, addl: string = "") &cxxname="zeek::spicy::rt::weird"; ## Returns true if we're currently parsing the originator side of a connection. -public function is_orig() : bool &cxxname="zeek::spicy::rt::is_orig"; +public function is_orig(): bool &cxxname="zeek::spicy::rt::is_orig"; ## Returns the current connection's UID. -public function uid() : string &cxxname="zeek::spicy::rt::uid"; +public function uid(): string &cxxname="zeek::spicy::rt::uid"; ## Returns the current connection's 4-tuple ID to make IP address and port information available. -public function conn_id() : tuple &cxxname="zeek::spicy::rt::conn_id"; +public function conn_id(): tuple &cxxname="zeek::spicy::rt::conn_id"; ## Instructs Zeek to flip the directionality of the current connection. -public function flip_roles() : void &cxxname="zeek::spicy::rt::flip_roles"; +public function flip_roles(): void &cxxname="zeek::spicy::rt::flip_roles"; ## Returns the number of packets seen so far on the current side of the current connection. -public function number_packets() : uint64 &cxxname="zeek::spicy::rt::number_packets"; +public function number_packets(): uint64 &cxxname="zeek::spicy::rt::number_packets"; ## Opaque handle to a protocol analyzer. public type ProtocolHandle = __library_type("zeek::spicy::rt::ProtocolHandle"); @@ -65,7 +65,7 @@ public type ProtocolHandle = __library_type("zeek::spicy::rt::ProtocolHandle"); ## Note: For backwards compatibility, the analyzer argument can be left unset to add ## a DPD analyzer. This use is deprecated, though; use the single-argument version of ## `protocol_begin` for that instead. -public function protocol_begin(analyzer: optional, protocol: spicy::Protocol = spicy::Protocol::TCP) : void &cxxname="zeek::spicy::rt::protocol_begin"; +public function protocol_begin(analyzer: optional, protocol: spicy::Protocol = spicy::Protocol::TCP): void &cxxname="zeek::spicy::rt::protocol_begin"; ## Adds a Zeek-side DPD child protocol analyzer performing dynamic protocol detection ## on subsequently provided data. @@ -78,7 +78,7 @@ public function protocol_begin(analyzer: optional, protocol: spicy::Prot ## ## protocol: the transport-layer protocol on which to perform protocol detection; ## only TCP is currently supported here -public function protocol_begin(protocol: spicy::Protocol = spicy::Protocol::TCP) : void &cxxname="zeek::spicy::rt::protocol_begin"; +public function protocol_begin(protocol: spicy::Protocol = spicy::Protocol::TCP): void &cxxname="zeek::spicy::rt::protocol_begin"; ## Gets a handle to a Zeek-side child protocol analyzer for the current connection. ## @@ -98,7 +98,7 @@ public function protocol_begin(protocol: spicy::Protocol = spicy::Protocol::TCP) ## protocol: the transport-layer protocol that the analyser uses; only TCP is ## currently supported here ## -public function protocol_handle_get_or_create(analyzer: string, protocol: spicy::Protocol = spicy::Protocol::TCP) : ProtocolHandle &cxxname="zeek::spicy::rt::protocol_handle_get_or_create"; +public function protocol_handle_get_or_create(analyzer: string, protocol: spicy::Protocol = spicy::Protocol::TCP): ProtocolHandle &cxxname="zeek::spicy::rt::protocol_handle_get_or_create"; ## Forwards protocol data to all previously instantiated Zeek-side child protocol analyzers of a given transport-layer. ## @@ -107,7 +107,7 @@ public function protocol_handle_get_or_create(analyzer: string, protocol: spicy: ## data: chunk of data to forward to child analyzer ## ## protocol: the transport-layer protocol of the children to forward to; only TCP is currently supported here -public function protocol_data_in(is_orig: bool, data: bytes, protocol: spicy::Protocol = spicy::Protocol::TCP) : void &cxxname="zeek::spicy::rt::protocol_data_in"; +public function protocol_data_in(is_orig: bool, data: bytes, protocol: spicy::Protocol = spicy::Protocol::TCP): void &cxxname="zeek::spicy::rt::protocol_data_in"; ## Forwards protocol data to a specific previously instantiated Zeek-side child analyzer. ## @@ -116,7 +116,7 @@ public function protocol_data_in(is_orig: bool, data: bytes, protocol: spicy::Pr ## data: chunk of data to forward to child analyzer ## ## h: handle to the child analyzer to forward data into -public function protocol_data_in(is_orig: bool, data: bytes, h: ProtocolHandle) : void &cxxname="zeek::spicy::rt::protocol_data_in"; +public function protocol_data_in(is_orig: bool, data: bytes, h: ProtocolHandle): void &cxxname="zeek::spicy::rt::protocol_data_in"; ## Signals a gap in input data to all previously instantiated Zeek-side child protocol analyzers. ## @@ -127,11 +127,11 @@ public function protocol_data_in(is_orig: bool, data: bytes, h: ProtocolHandle) ## len: size of gap ## ## h: optional handle to the child analyzer signal a gap to, else signal to all child analyzers -public function protocol_gap(is_orig: bool, offset: uint64, len: uint64, h: optional = Null) : void &cxxname="zeek::spicy::rt::protocol_gap"; +public function protocol_gap(is_orig: bool, offset: uint64, len: uint64, h: optional = Null): void &cxxname="zeek::spicy::rt::protocol_gap"; ## Signals end-of-data to all previously instantiated Zeek-side child protocol ## analyzers and removes them. -public function protocol_end() : void &cxxname="zeek::spicy::rt::protocol_end"; +public function protocol_end(): void &cxxname="zeek::spicy::rt::protocol_end"; ## Signals end-of-data to the given child analyzer and removes it. ## @@ -147,54 +147,55 @@ public function protocol_handle_close(handle: ProtocolHandle): void &cxxname="ze ## Optionally, a mime type can be provided. It will be passed on to Zeek's file analysis framework. ## Optionally, a file ID can be provided. It will be passed on to Zeek's file analysis framework. ## Returns the Zeek-side file ID of the new file. -public function file_begin(mime_type: optional = Null, fuid: optional = Null) : string &cxxname="zeek::spicy::rt::file_begin"; +public function file_begin(mime_type: optional = Null, fuid: optional = Null): string &cxxname="zeek::spicy::rt::file_begin"; ## Returns the current file's FUID. -public function fuid() : string &cxxname="zeek::spicy::rt::fuid"; +public function fuid(): string &cxxname="zeek::spicy::rt::fuid"; ## Terminates the currently active Zeek-side session, flushing all state. Any ## subsequent activity will start a new session from scratch. This can only be ## called from inside a protocol analyzer. -public function terminate_session() : void &cxxname="zeek::spicy::rt::terminate_session"; +public function terminate_session(): void &cxxname="zeek::spicy::rt::terminate_session"; ## Tells Zeek to skip sending any further input data to the current analyzer. ## This is supported for protocol and file analyzers. -public function skip_input() : void &cxxname="zeek::spicy::rt::skip_input"; +public function skip_input(): void &cxxname="zeek::spicy::rt::skip_input"; ## Signals the expected size of a file to Zeek's file analysis. ## ## size: expected size of file ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_set_size(size: uint64, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_set_size"; +public function file_set_size(size: uint64, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_set_size"; ## Passes file content on to Zeek's file analysis. ## ## data: chunk of raw data to pass into analysis ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_data_in(data: bytes, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_data_in"; +public function file_data_in(data: bytes, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_data_in"; ## Passes file content at a specific offset on to Zeek's file analysis. ## ## data: chunk of raw data to pass into analysis ## offset: position in file where data starts ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_data_in_at_offset(data: bytes, offset: uint64, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_data_in_at_offset"; +public function file_data_in_at_offset(data: bytes, offset: uint64, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_data_in_at_offset"; ## Signals a gap in a file to Zeek's file analysis. ## ## offset: position in file where gap starts ## len: size of gap ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_gap(offset: uint64, len: uint64, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_gap"; +public function file_gap(offset: uint64, len: uint64, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_gap"; ## Signals the end of a file to Zeek's file analysis. ## ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_end(fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_end"; +public function file_end(fid: optional = Null): void &cxxname="zeek::spicy::rt::file_end"; ## Inside a packet analyzer, forwards what data remains after parsing the top-level unit ## on to another analyzer. The index specifies the target, per the current dispatcher table. -public function forward_packet(identifier: uint32) : void &cxxname="zeek::spicy::rt::forward_packet"; +public function forward_packet(identifier: uint32): void &cxxname="zeek::spicy::rt::forward_packet"; ## Gets the network time from Zeek. -public function network_time() : time &cxxname="zeek::spicy::rt::network_time"; +public function network_time(): time &cxxname="zeek::spicy::rt::network_time"; + From 5dfff4492c063e180efd76979f4807c2cdc5c06a Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Wed, 12 Jun 2024 14:23:44 +0200 Subject: [PATCH 40/89] ssh: Revert half-duplex robustness This reverts part of commit a0888b7e36308d241f4c62b42715a94d499aab23 due to inhibiting analyzer violations when parsing non SSH traffic when the &restofdata path is entered. @J-Gras reported the analyzer not being disabled when sending HTTP traffic on port 22. This adds the verbose analyzer.log baselines such that future improvements of these scenarios become visible. --- src/analyzer/protocol/ssh/ssh-protocol.pac | 1 - .../analyzer.log | 124 +++++++++++++++++ .../analyzer.log | 128 ++++++++++++++++++ .../protocols/ssh/half-duplex-client.zeek | 2 +- .../protocols/ssh/half-duplex-server.zeek | 2 +- 5 files changed, 254 insertions(+), 3 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-client/analyzer.log create mode 100644 testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-server/analyzer.log diff --git a/src/analyzer/protocol/ssh/ssh-protocol.pac b/src/analyzer/protocol/ssh/ssh-protocol.pac index 3709238405..c152f29b93 100644 --- a/src/analyzer/protocol/ssh/ssh-protocol.pac +++ b/src/analyzer/protocol/ssh/ssh-protocol.pac @@ -37,7 +37,6 @@ type SSH_Key_Exchange(is_orig: bool) = record { key_ex: case $context.connection.get_version() of { SSH1 -> ssh1_msg : SSH1_Key_Exchange(is_orig, packet_length); SSH2 -> ssh2_msg : SSH2_Key_Exchange(is_orig, packet_length); - default -> terminate : bytestring &restofdata &transient; }; } &length = $context.flow.get_kex_length($context.connection.get_version(), packet_length); diff --git a/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-client/analyzer.log b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-client/analyzer.log new file mode 100644 index 0000000000..d96d1809f8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-client/analyzer.log @@ -0,0 +1,124 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path analyzer +#open XXXX-XX-XX-XX-XX-XX +#fields ts cause analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data +#types time string string string string string addr port addr port string string +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-server/analyzer.log b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-server/analyzer.log new file mode 100644 index 0000000000..4296d41ecf --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-server/analyzer.log @@ -0,0 +1,128 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path analyzer +#open XXXX-XX-XX-XX-XX-XX +#fields ts cause analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data +#types time string string string string string addr port addr port string string +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek b/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek index 4657a44107..6e1242f286 100644 --- a/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek +++ b/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek @@ -2,7 +2,7 @@ # analyzer.log output. # @TEST-EXEC: zeek -r $TRACES/ssh/ssh.client-side-half-duplex.pcap %INPUT -# @TEST-EXEC: test ! -e analyzer.log +# @TEST-EXEC: btest-diff analyzer.log # @TEST-EXEC: btest-diff ssh.log # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek b/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek index 423148950d..232aa251fb 100644 --- a/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek +++ b/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek @@ -2,7 +2,7 @@ # analyzer.log output. # @TEST-EXEC: zeek -r $TRACES/ssh/ssh.server-side-half-duplex.pcap %INPUT -# @TEST-EXEC: test ! -e analyzer.log +# @TEST-EXEC: btest-diff analyzer.log # @TEST-EXEC: btest-diff ssh.log # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff .stdout From 5c56969ca49645dc013124df1a5a6d9e44c462f0 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Wed, 19 Jun 2024 19:47:54 +0200 Subject: [PATCH 41/89] zeek-testing-private: Update baseline --- testing/external/commit-hash.zeek-testing-private | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/external/commit-hash.zeek-testing-private b/testing/external/commit-hash.zeek-testing-private index 169d80e658..c5f7f949f0 100644 --- a/testing/external/commit-hash.zeek-testing-private +++ b/testing/external/commit-hash.zeek-testing-private @@ -1 +1 @@ -8dd88e9b33da35feaae860b158bc91586ff17136 +1d47e303f6192786e8279481ea7be00b317f033a From 4fc57294f10e949dceb65534e4c3b3c7c318dc97 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Mon, 17 Jun 2024 09:00:37 +0200 Subject: [PATCH 42/89] Spicy: Provide runtime API to access Zeek-side globals. This allows to read Zeek global variables from inside Spicy code. The main challenge here is supporting all of Zeek's data type in a type-safe manner. The most straight-forward API is a set of functions `get_()`, where `` is the Zeek-side type name (e.g., `count`, `string`, `bool`) and `` is the fully scoped name of the Zeek-side global (e.g., `MyModule::Boolean`). These functions then return the corresponding Zeek value, converted in an appropriate Spicy type. Example: Zeek: module Foo; const x: count = 42; const y: string = "xxx"; Spicy: import zeek; assert zeek::get_count("Foo::x") == 42; assert zeek::get_string("Foo::y") == b"xxx"; # returns bytes(!) For container types, the `get_*` function returns an opaque types that can be used to access the containers' values. An additional set of functions `as_` allows converting opaque values of atomic types to Spicy equivalents. Example: Zeek: module Foo; const s: set[count] = { 1, 2 }; const t: table[count] of string = { [1] = "One", [2] = "Two" } Spicy: # Check set membership. local set_ = zeek::get_set("Foo::s"); assert zeek::set_contains(set_, 1) == True # Look up table element. local table_ = zeek::get_table("Foo::t"); local value = zeek::table_lookup(t, 1); assert zeek::as_string(value) == b"One" There are also functions for accessing elements of Zeek-side vectors and records. If any of these `zeek::*` conversion functions fails (e.g., due to a global of that name not existing), it will throw an exception. Design considerations: - We support only reading Zeek variables, not writing. This is both to simplify the API, and also conceptually to avoid offering backdoors into Zeek state that could end up with a very tight coupling of Spicy and Zeek code. - We accept that a single access might be relatively slow due to name lookup and data conversion. This is primarily meant for configuration-style data, not for transferring lots of dynamic state over. - In that spirit, we don't support deep-copying complex data types from Zeek over to Spicy. This is (1) to avoid performance problems when accidentally copying large containers over, potentially even at every access; and (2) to avoid the two sides getting out of sync if one ends up modifying a container without the other being able to see it. --- scripts/spicy/zeek.spicy | 309 ++++++++++++++++ src/spicy/runtime-support.cc | 6 +- src/spicy/runtime-support.h | 339 +++++++++++++++++- .../btest/Baseline/spicy.get-values/output | 6 + testing/btest/spicy/get-values.spicy | 99 +++++ 5 files changed, 750 insertions(+), 9 deletions(-) create mode 100644 testing/btest/Baseline/spicy.get-values/output create mode 100644 testing/btest/spicy/get-values.spicy diff --git a/scripts/spicy/zeek.spicy b/scripts/spicy/zeek.spicy index 4af1621a59..cc24f96117 100644 --- a/scripts/spicy/zeek.spicy +++ b/scripts/spicy/zeek.spicy @@ -199,3 +199,312 @@ public function forward_packet(identifier: uint32): void &cxxname="zeek::spicy:: ## Gets the network time from Zeek. public function network_time(): time &cxxname="zeek::spicy::rt::network_time"; +## Opaque handle for a Zeek-side value. +public type ZeekVal = __library_type("::zeek::ValPtr"); + +## Opaque handle for a Zeek-side record value. +public type ZeekRecord = __library_type("::zeek::spicy::rt::ValRecordPtr"); + +## Opaque handle for a Zeek-side set value. +public type ZeekSet = __library_type("::zeek::spicy::rt::ValSetPtr"); + +## Opaque handle for a Zeek-side table value. +public type ZeekTable = __library_type("::zeek::spicy::rt::ValTablePtr"); + +## Opaque handle for a Zeek-side vector value. +public type ZeekVector = __library_type("::zeek::spicy::rt::ValVectorPtr"); + +## Returns the value of a global Zeek script variable of Zeek type ``addr``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_address(id: string): addr &cxxname="zeek::spicy::rt::get_address"; + +## Returns the value of a global Zeek script variable of Zeek type ``bool``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_bool(id: string): bool &cxxname="zeek::spicy::rt::get_bool"; + +## Returns the value of a global Zeek script variable of Zeek type ``count``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_count(id: string): uint64 &cxxname="zeek::spicy::rt::get_count"; + +## Returns the value of a global Zeek script variable of Zeek type ``double``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_double(id: string): real &cxxname="zeek::spicy::rt::get_double"; + +## Returns the value of a global Zeek script variable of Zeek type ``enum``. +## The value is returned as a string containing the enum's label name, without +## any scope. Throws an exception if there's no such Zeek of that name, or if +## it's not of the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_enum(id: string): string &cxxname="zeek::spicy::rt::get_enum"; + +## Returns the value of a global Zeek script variable of Zeek type ``int``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_int(id: string): int64 &cxxname="zeek::spicy::rt::get_int"; + +## Returns the value of a global Zeek script variable of Zeek type +## ``interval``. Throws an exception if there's no such Zeek of that name, or +## if it's not of the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_interval(id: string): interval &cxxname="zeek::spicy::rt::get_interval"; + +## Returns the value of a global Zeek script variable of Zeek type ``port``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_port(id: string): port &cxxname="zeek::spicy::rt::get_port"; + +## Returns the value of a global Zeek script variable of Zeek type ``record``. +## The value is returned as an opaque handle to the record, which can be used +## with the ``zeek::record_*()`` functions to access the record's fields. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_record(id: string): ZeekRecord &cxxname="zeek::spicy::rt::get_record"; + +## Returns the value of a global Zeek script variable of Zeek type ``set``. The +## value is returned as an opaque handle to the set, which can be used with the +## ``zeek::set_*()`` functions to access the set's content. Throws an exception +## if there's no such Zeek of that name, or if it's not of the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_set(id: string): ZeekSet &cxxname="zeek::spicy::rt::get_set"; + +## Returns the value of a global Zeek script variable of Zeek type ``string``. +## The string's value is returned as a Spicy ``bytes`` value. Throws an +## exception if there's no such Zeek of that name, or if it's not of the +## expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_string(id: string): bytes &cxxname="zeek::spicy::rt::get_string"; + +## Returns the value of a global Zeek script variable of Zeek type ``subnet``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_subnet(id: string): network &cxxname="zeek::spicy::rt::get_subnet"; + +## Returns the value of a global Zeek script variable of Zeek type ``table``. +## The value is returned as an opaque handle to the set, which can be used with +## the ``zeek::set_*()`` functions to access the set's content. Throws an +## exception if there's no such Zeek of that name, or if it's not of the +## expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_table(id: string): ZeekTable &cxxname="zeek::spicy::rt::get_table"; + +## Returns the value of a global Zeek script variable of Zeek type ``time``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_time(id: string): time &cxxname="zeek::spicy::rt::get_time"; + +## Returns the value of a global Zeek script variable of Zeek type ``vector``. +## The value is returned as an opaque handle to the vector, which can be used +## with the ``zeek::vector_*()`` functions to access the vector's content. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_vector(id: string): ZeekVector &cxxname="zeek::spicy::rt::get_vector"; + +## Returns an opaque handle to a global Zeek script variable. The handle can be +## used with the ``zeek::as_*()`` functions to access the variable's value. +## Throws an exception if there's no Zeek variable of that name. +public function get_value(id: string): ZeekVal &cxxname="zeek::spicy::rt::get_value"; + +## Returns a Zeek ``addr`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_address(v: ZeekVal): addr &cxxname="zeek::spicy::rt::as_address"; + +## Returns a Zeek ``bool`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_bool(v: ZeekVal): bool &cxxname="zeek::spicy::rt::as_bool"; + +## Returns a Zeek ``count`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_count(v: ZeekVal): uint64 &cxxname="zeek::spicy::rt::as_count"; + +## Returns a Zeek ``double`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_double(v: ZeekVal): real &cxxname="zeek::spicy::rt::as_double"; + +## Returns a Zeek ``enum`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_enum(v: ZeekVal): string &cxxname="zeek::spicy::rt::as_enum"; + +## Returns a Zeek ``int`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_int(v: ZeekVal): int64 &cxxname="zeek::spicy::rt::as_int"; + +## Returns a Zeek ``interval`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_interval(v: ZeekVal): interval &cxxname="zeek::spicy::rt::as_interval"; + +## Returns a Zeek ``port`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_port(v: ZeekVal): port &cxxname="zeek::spicy::rt::as_port"; + +## Returns a Zeek ``record`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_record(v: ZeekVal): ZeekRecord &cxxname="zeek::spicy::rt::as_record"; + +## Returns a Zeek ``set`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_set(v: ZeekVal): ZeekSet &cxxname="zeek::spicy::rt::as_set"; + +## Returns a Zeek ``string`` value refereced by an opaque handle. The string's +## value is returned as a Spicy ``bytes`` value. Throws an exception if the +## referenced value is not of the expected type. +public function as_string(v: ZeekVal): bytes &cxxname="zeek::spicy::rt::as_string"; + +## Returns a Zeek ``subnet`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_subnet(v: ZeekVal): network &cxxname="zeek::spicy::rt::as_subnet"; + +## Returns a Zeek ``table`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_table(v: ZeekVal): ZeekTable &cxxname="zeek::spicy::rt::as_table"; + +## Returns a Zeek ``time`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_time(v: ZeekVal): time &cxxname="zeek::spicy::rt::as_time"; + +## Returns a Zeek ``vector`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_vector(v: ZeekVal): ZeekVector &cxxname="zeek::spicy::rt::as_vector"; + +## Returns true if a Zeek set contains a given value. Throws an exception if +## the given ID does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek set to check +## v: value to check for, which must be of the Spicy-side equivalent of the set's key type +public function set_contains(id: string, v: any): bool &cxxname="zeek::spicy::rt::set_contains"; + +## Returns true if a Zeek set contains a given value. Throws an exception if +## the set does not have the expected type. +## +## s: opaque handle to the Zeek set, as returned by other functions +## v: value to check for, which must be of the Spicy-side equivalent of the set's key type +public function set_contains(s: ZeekSet, v: any): bool &cxxname="zeek::spicy::rt::set_contains"; + +## Returns true if a Zeek table contains a given value. Throws an exception if +## the given ID does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek table to check +## v: value to check for, which must be of the Spicy-side equivalent of the table's key type +public function table_contains(id: string, v: any): bool &cxxname="zeek::spicy::rt::table_contains"; + +## Returns true if a Zeek table contains a given value. Throws an exception if +## the given ID does not exist, or does not have the expected type. +## +## t: opaque handle to the Zeek table, as returned by other functions +## v: value to check for, which must be of the Spicy-side equivalent of the table's key type +public function table_contains(t: ZeekTable, v: any): bool &cxxname="zeek::spicy::rt::table_contains"; + +## Returns the value associated with a key in a Zeek table. Returns an error +## result if the key does not exist in the table. Throws an exception if the +## given table ID does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek table to check +## v: value to lookup, which must be of the Spicy-side equivalent of the table's key type +public function table_lookup(id: string, v: any): optional &cxxname="zeek::spicy::rt::table_lookup"; + +## Returns the value associated with a key in a Zeek table. Returns an error +## result if the key does not exist in the table. Throws an exception if the +## given table ID does not exist, or does not have the expected type. +## +## t: opaque handle to the Zeek table, as returned by other functions +## v: value to lookup, which must be of the Spicy-side equivalent of the table's key type +public function table_lookup(t: ZeekTable, v: any): optional &cxxname="zeek::spicy::rt::table_lookup"; + +## Returns true if a Zeek record provides a value for a given field. This +## includes fields with `&default` values. Throws an exception if the given ID +## does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek record to check field: name of +## the field to check +public function record_has_value(id: string, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns true if a Zeek record provides a value for a given field. +## This includes fields with `&default` values. +## +## r: opaque handle to the Zeek record, as returned by other functions +## field: name of the field to check +public function record_has_value(r: ZeekRecord, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns true if the type of a Zeek record has a field of a given name. +## Throws an exception if the given ID does not exist, or does not have the +## expected type. +## +## id: fully-qualified name of the global Zeek record to check +## field: name of the field to check +public function record_has_field(id: string, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns true if the type of a Zeek record has a field of a given name. +## +## r: opaque handle to the Zeek record, as returned by other functions +## field: name of the field to check +public function record_has_field(r: ZeekRecord, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns a field's value from a Zeek record. Throws an exception if the given +## ID does not exist, or does not have the expected type; or if there's no such +## field in the record type, or if the field does not have a value. +## +## id: fully-qualified name of the global Zeek record to check +## field: name of the field to retrieve +public function record_field(id: string, field: string): ZeekVal &cxxname="zeek::spicy::rt::record_field"; + +## Returns a field's value from a Zeek record. Throws an exception if the given +## record does not have such a field, or if the field does not have a value. +## +## r: opaque handle to the Zeek record, as returned by other functions +## field: name of the field to retrieve +public function record_field(r: ZeekRecord, field: string): ZeekVal &cxxname="zeek::spicy::rt::record_field"; + +## Returns the value of an index in a Zeek vector. Throws an exception if the +## given ID does not exist, or does not have the expected type; or if the index +## is out of bounds. +## +## id: fully-qualified name of the global Zeek vector to check +## index: index of the element to retrieve +public function vector_index(id: string, index: uint64): ZeekVal &cxxname="zeek::spicy::rt::vector_index"; + +## Returns the value of an index in a Zeek vector. Throws an exception if the +## index is out of bounds. +## +## v: opaque handle to the Zeek vector, as returned by other functions +## index: index of the element to retrieve +public function vector_index(v: ZeekVector, index: uint64): ZeekVal &cxxname="zeek::spicy::rt::vector_index"; + +## Returns the size of a Zeek vector. Throws an exception if the given ID does +## not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek vector to check +public function vector_size(id: string): uint64 &cxxname="zeek::spicy::rt::vector_size"; + +## Returns the size of a Zeek vector. +## +## v: opaque handle to the Zeek vector, as returned by other functions +public function vector_size(v: ZeekVector): uint64 &cxxname="zeek::spicy::rt::vector_size"; + diff --git a/src/spicy/runtime-support.cc b/src/spicy/runtime-support.cc index 610e0a0153..f5afd37461 100644 --- a/src/spicy/runtime-support.cc +++ b/src/spicy/runtime-support.cc @@ -133,15 +133,15 @@ TypePtr rt::create_record_type(const std::string& ns, const std::string& id, auto decls = std::make_unique(); for ( const auto& f : fields ) { - auto attrs = make_intrusive(nullptr, true, false); + auto attrs = make_intrusive<::zeek::detail::Attributes>(nullptr, true, false); if ( f.is_optional ) { - auto optional_ = make_intrusive(detail::ATTR_OPTIONAL); + auto optional_ = make_intrusive<::zeek::detail::Attr>(::zeek::detail::ATTR_OPTIONAL); attrs->AddAttr(std::move(optional_)); } if ( f.is_log ) { - auto log_ = make_intrusive(detail::ATTR_LOG); + auto log_ = make_intrusive<::zeek::detail::Attr>(::zeek::detail::ATTR_LOG); attrs->AddAttr(std::move(log_)); } diff --git a/src/spicy/runtime-support.h b/src/spicy/runtime-support.h index cae3fa322c..3c82f3004c 100644 --- a/src/spicy/runtime-support.h +++ b/src/spicy/runtime-support.h @@ -19,7 +19,10 @@ #include #include #include +#include +#include "IntrusivePtr.h" +#include "Type.h" #include "zeek/Desc.h" #include "zeek/Val.h" #include "zeek/spicy/cookie.h" @@ -71,9 +74,9 @@ class TypeMismatch : public UsageError { */ class ParameterMismatch : public TypeMismatch { public: - ParameterMismatch(const std::string_view& msg, std::string_view location = "") + ParameterMismatch(std::string_view msg, std::string_view location = "") : TypeMismatch(hilti::rt::fmt("Event parameter mismatch, %s", msg)) {} - ParameterMismatch(const std::string_view& have, const TypePtr& want, std::string_view location = "") + ParameterMismatch(std::string_view have, const TypePtr& want, std::string_view location = "") : ParameterMismatch(_fmt(have, want)) {} private: @@ -97,13 +100,13 @@ public: * Begins registration of a Spicy EVT module. All subsequent, other `register_*()` * function call will be associated with this module for documentation purposes. */ -void register_spicy_module_begin(const std::string& name, const std::string& description); +void register_spicy_module_begin(const std::string& id, const std::string& description); /** * Registers a Spicy protocol analyzer with its EVT meta information with the * plugin's runtime. */ -void register_protocol_analyzer(const std::string& name, hilti::rt::Protocol proto, +void register_protocol_analyzer(const std::string& id, hilti::rt::Protocol proto, const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports, const std::string& parser_orig, const std::string& parser_resp, const std::string& replaces, const std::string& linker_scope); @@ -112,7 +115,7 @@ void register_protocol_analyzer(const std::string& name, hilti::rt::Protocol pro * Registers a Spicy file analyzer with its EVT meta information with the * plugin's runtime. */ -void register_file_analyzer(const std::string& name, const hilti::rt::Vector& mime_types, +void register_file_analyzer(const std::string& id, const hilti::rt::Vector& mime_types, const std::string& parser, const std::string& replaces, const std::string& linker_scope); /** Reports a Zeek-side "weird". */ @@ -122,7 +125,7 @@ void weird(const std::string& id, const std::string& addl); * Registers a Spicy packet analyzer with its EVT meta information with the * plugin's runtime. */ -void register_packet_analyzer(const std::string& name, const std::string& parser, const std::string& replaces, +void register_packet_analyzer(const std::string& id, const std::string& parser, const std::string& replaces, const std::string& linker_scope); /** Registers a Spicy-generated type to make it available inside Zeek. */ @@ -991,4 +994,328 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { return target->AsEnumType()->GetEnumVal(bt); } + +/** + * Returns the Zeek value associated with a global Zeek-side ID. Throws if the + * ID does not exist. + */ +inline ValPtr get_value(const std::string& name) { + if ( auto id = zeek::detail::global_scope()->Find(name) ) + return id->GetVal(); + else + throw InvalidValue(util::fmt("no such Zeek variable: '%s'", name.c_str())); +} + +namespace detail { +/** Helper to raise a ``TypeMismatch`` exception. */ +inline auto type_mismatch(const ValPtr& v, const char* expected) { + throw TypeMismatch(util::fmt("type mismatch in Zeek value: expected %s, but got %s", expected, + ::zeek::type_name(v->GetType()->Tag()))); +} + +/** + * Helper to check the type of Zeek value against an expected type tag, raising + * a ``TypeMismatch`` exception on mismatch. + */ +inline auto check_type(const ValPtr& v, ::zeek::TypeTag type_tag, const char* expected) { + if ( v->GetType()->Tag() != type_tag ) + type_mismatch(v, expected); +} + +} // namespace detail + +/** Type for a Zeek record value. */ +using ValRecordPtr = ::zeek::IntrusivePtr<::zeek::RecordVal>; + +/** Type for a Zeek set value. */ +using ValSetPtr = ::zeek::IntrusivePtr<::zeek::TableVal>; + +/** Type for a Zeek table value. */ +using ValTablePtr = ::zeek::IntrusivePtr<::zeek::TableVal>; + +/** Type for a Zeek vector value. */ +using ValVectorPtr = ::zeek::IntrusivePtr<::zeek::VectorVal>; + +/** Converts a Zeek `addr` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Address as_address(const ValPtr& v) { + detail::check_type(v, TYPE_ADDR, "address"); + return ::hilti::rt::Address(v->AsAddr()); +} + +/** Converts a Zeek `bool` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Bool as_bool(const ValPtr& v) { + detail::check_type(v, TYPE_BOOL, "bool"); + return ::hilti::rt::Bool(v->AsBool()); +} + +/** Converts a Zeek `count` value to its Spicy equivalent. Throws on error. */ +inline hilti::rt::integer::safe as_count(const ValPtr& v) { + detail::check_type(v, TYPE_COUNT, "count"); + return v->AsCount(); +} + +/** Converts a Zeek `double` value to its Spicy equivalent. Throws on error. */ +inline double as_double(const ValPtr& v) { + detail::check_type(v, TYPE_DOUBLE, "double"); + return v->AsDouble(); +} + +/** + * Converts a Zeek `enum` value to a string containing the (unscoped) label + * name. Throws on error. + */ +inline std::string as_enum(const ValPtr& v) { + detail::check_type(v, TYPE_ENUM, "enum"); + // Zeek returns the name as "::", we just want the enum name. + return hilti::rt::rsplit1(v->GetType()->AsEnumType()->Lookup(v->AsEnum()), "::").second; +} + +/** Converts a Zeek `int` value to its Spicy equivalent. Throws on error. */ +inline hilti::rt::integer::safe as_int(const ValPtr& v) { + detail::check_type(v, TYPE_INT, "int"); + return v->AsInt(); +} + +/** Converts a Zeek `interval` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Interval as_interval(const ValPtr& v) { + detail::check_type(v, TYPE_INTERVAL, "interval"); + return ::hilti::rt::Interval(v->AsInterval(), hilti::rt::Interval::SecondTag{}); +} + +/** Converts a Zeek `port` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Port as_port(const ValPtr& v) { + detail::check_type(v, TYPE_PORT, "port"); + auto p = v->AsPortVal(); + // Wrap port number into safe integer to catch any overflows (Zeek returns + // an uint32, while HILTI wants an uint16). + return ::hilti::rt::Port(hilti::rt::integer::safe(p->Port()), p->PortType()); +} + +/** Converts a Zeek `record` value to its Spicy equivalent. Throws on error. */ +inline ValRecordPtr as_record(const ValPtr& v) { + detail::check_type(v, TYPE_RECORD, "record"); + return ::zeek::cast_intrusive<::zeek::RecordVal>(v); +} + +/** Converts a Zeek `set` value to its Spicy equivalent. Throws on error. */ +inline ValSetPtr as_set(const ValPtr& v) { + detail::check_type(v, TYPE_TABLE, "set"); + + if ( ! v->AsTableVal()->GetType()->IsSet() ) + detail::type_mismatch(v, "set"); + + return ::zeek::cast_intrusive<::zeek::TableVal>(v); +} + +/** Converts a Zeek `string` value to its Spicy equivalent. Throws on error. */ +inline hilti::rt::Bytes as_string(const ValPtr& v) { + detail::check_type(v, TYPE_STRING, "string"); + auto str = v->AsString(); + return hilti::rt::Bytes(reinterpret_cast(str->Bytes()), str->Len()); +} + +/** Converts a Zeek `subnet` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Network as_subnet(const ValPtr& v) { + detail::check_type(v, TYPE_SUBNET, "subnet"); + auto subnet = v->AsSubNet(); + return ::hilti::rt::Network(subnet.Prefix(), subnet.Length()); +} + +/** Converts a Zeek `table` value to its Spicy equivalent. Throws on error. */ +inline ValTablePtr as_table(const ValPtr& v) { + detail::check_type(v, TYPE_TABLE, "table"); + + if ( v->AsTableVal()->GetType()->IsSet() ) + detail::type_mismatch(v, "table"); + + return ::zeek::cast_intrusive<::zeek::TableVal>(v); +} + +/** Converts a Zeek `time` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Time as_time(const ValPtr& v) { + detail::check_type(v, TYPE_TIME, "time"); + return ::hilti::rt::Time(v->AsTime(), hilti::rt::Time::SecondTag{}); +} + +/** Converts a Zeek `vector` value to its Spicy equivalent. Throws on error. */ +inline ValVectorPtr as_vector(const ValPtr& v) { + detail::check_type(v, TYPE_VECTOR, "vector"); + return ::zeek::cast_intrusive<::zeek::VectorVal>(v); +} + + +/** Retrieves a global Zeek variable of assumed type `addr`. Throws on error. */ +inline hilti::rt::Address get_address(const std::string& name) { return as_address(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `bool`. Throws on error. */ +inline hilti::rt::Bool get_bool(const std::string& name) { return as_bool(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `count`. Throws on error. */ +inline hilti::rt::integer::safe get_count(const std::string& name) { return as_count(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `double`. Throws on error. */ +inline double get_double(const std::string& name) { return as_double(get_value(name)); } + +/** + * Retrieves a global Zeek variable of assumed type `enum` as a string + * containing the (unscoped) label name. Throws on error. + */ +inline std::string get_enum(const std::string& name) { return as_enum(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `int`. Throws on error. */ +inline hilti::rt::integer::safe get_int(const std::string& name) { return as_int(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `interval`. Throws on error. */ +inline hilti::rt::Interval get_interval(const std::string& name) { return as_interval(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `port`. Throws on error. */ +inline hilti::rt::Port get_port(const std::string& name) { return as_port(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `record`. Throws on error. */ +inline ValRecordPtr get_record(const std::string& name) { return as_record(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `set`. Throws on error. */ +inline ValSetPtr get_set(const std::string& name) { return as_set(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `string`. Throws on error. */ +inline hilti::rt::Bytes get_string(const std::string& name) { return as_string(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `subnet`. Throws on error. */ +inline hilti::rt::Network get_subnet(const std::string& name) { return as_subnet(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `table`. Throws on error. */ +inline ValTablePtr get_table(const std::string& name) { return as_table(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `time`. Throws on error. */ +inline hilti::rt::Time get_time(const std::string& name) { return as_time(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `vector`. Throws on error. */ +inline ValVectorPtr get_vector(const std::string& name) { return as_vector(get_value(name)); } + +/** Retrieves the value of Zeek record field. Throws on error. */ +inline ::zeek::ValPtr record_field(const zeek::spicy::rt::ValRecordPtr& v, const std::string& field) { + auto index = v->GetType()->AsRecordType()->FieldOffset(field.c_str()); + if ( index < 0 ) + throw InvalidValue(util::fmt("no such record field: %s", field.c_str())); + + if ( auto x = v->GetFieldOrDefault(index) ) + return x; + else + throw InvalidValue(util::fmt("record field is not set: %s", field.c_str())); +} + +/** Retrieves the value of Zeek record field. Throws on error. */ +inline ::zeek::ValPtr record_field(const std::string& name, const std::string& index) { + return record_field(get_record(name), index); +} + +/** Check if a Zeek record has a field's value set. Throws on errors. */ +inline hilti::rt::Bool record_has_value(const zeek::spicy::rt::ValRecordPtr& v, const std::string& field) { + auto index = v->GetType()->AsRecordType()->FieldOffset(field.c_str()); + if ( index < 0 ) + throw InvalidValue(util::fmt("no such field in record type: %s", field.c_str())); + + return v->HasField(index); +} + +/** Checks if a Zeek record has a field's value set. Throws on errors. */ +inline hilti::rt::Bool record_has_value(const std::string& name, const std::string& index) { + return record_has_value(get_record(name), index); +} + +/** Check if a Zeek record type has a field of a give name. Throws on errors. */ +inline hilti::rt::Bool record_has_field(const zeek::spicy::rt::ValRecordPtr& v, const std::string& field) { + return v->GetType()->AsRecordType()->FieldOffset(field.c_str()) >= 0; +} + +/** Check if a Zeek record type has a field of a give name. Throws on errors. */ +inline hilti::rt::Bool record_has_field(const std::string& name, const std::string& index) { + return record_has_value(get_record(name), index); +} + +/** Checks if a Zeek set contains a given element. Throws on errors. */ +template +::hilti::rt::Bool set_contains(const ValSetPtr& v, const T& key) { + auto index = v->GetType()->AsTableType()->GetIndexTypes()[0]; + return (v->Find(to_val(key, index)) != nullptr); +} + +/** Checks if a Zeek set contains a given element. Throws on errors. */ +template +::hilti::rt::Bool set_contains(const std::string& name, const T& key) { + return set_contains(get_set(name), key); +} + +/** Checks if a Zeek table contains a given element. Throws on errors. */ +template +::hilti::rt::Bool table_contains(const ValTablePtr& v, const T& key) { + auto index = v->GetType()->AsTableType()->GetIndexTypes()[0]; + return (v->Find(to_val(key, index)) != nullptr); +} + +/** Check if a Zeek table contains a given element. Throws on errors. */ +template +::hilti::rt::Bool table_contains(const std::string& name, const T& key) { + return table_contains(get_table(name), key); +} + +/** + * Retrieves a value from a Zeek table. Returns an error value if the key does + * not exist. Throws on other errors. + */ +template +std::optional<::zeek::ValPtr> table_lookup(const zeek::spicy::rt::ValTablePtr& v, const T& key) { + auto index = v->GetType()->AsTableType()->GetIndexTypes()[0]; + if ( auto x = v->FindOrDefault(to_val(key, index)) ) + return x; + else + return {}; +} + +/** + * Retrieves a value from a Zeek table. Returns an error value if the key does + * not exist. Throws on other errors. + */ +template +std::optional<::zeek::ValPtr> table_lookup(const std::string& name, const T& key) { + return table_lookup(get_table(name), key); +} + +/** Returns a Zeek vector element. Throws on errors. */ +inline ::zeek::ValPtr vector_index(const zeek::spicy::rt::ValVectorPtr& v, + const hilti::rt::integer::safe& index) { + if ( index >= v->Size() ) + throw InvalidValue(util::fmt("vector index out of bounds: %" PRIu64, index.Ref())); + + return v->ValAt(index); +} + +/** Returns a Zeek vector element. Throws on errors. */ +inline ::zeek::ValPtr vector_index(const std::string& name, const hilti::rt::integer::safe& index) { + return vector_index(get_vector(name), index); +} + +/** Returns the size of a Zeek vector. Throws on errors. */ +inline hilti::rt::integer::safe vector_size(const zeek::spicy::rt::ValVectorPtr& v) { return v->Size(); } + +/** Returns the size of a Zeek vector. Throws on errors. */ +inline hilti::rt::integer::safe vector_size(const std::string& name) { return vector_size(get_vector(name)); } + } // namespace zeek::spicy::rt + +namespace hilti::rt::detail::adl { +// Stringification for opaque type handles. +inline std::string to_string(const zeek::ValPtr& v, detail::adl::tag /* unused */) { return ""; } + +inline std::string to_string(const zeek::spicy::rt::ValRecordPtr& v, detail::adl::tag /* unused */) { + return ""; +} + +inline std::string to_string(const zeek::spicy::rt::ValTablePtr& v, detail::adl::tag /* unused */) { + return ""; +} + +inline std::string to_string(const zeek::spicy::rt::ValVectorPtr& v, detail::adl::tag /* unused */) { + return ""; +} +} // namespace hilti::rt::detail::adl diff --git a/testing/btest/Baseline/spicy.get-values/output b/testing/btest/Baseline/spicy.get-values/output new file mode 100644 index 0000000000..4e01755636 --- /dev/null +++ b/testing/btest/Baseline/spicy.get-values/output @@ -0,0 +1,6 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. + + + + + diff --git a/testing/btest/spicy/get-values.spicy b/testing/btest/spicy/get-values.spicy new file mode 100644 index 0000000000..e0e5ab6170 --- /dev/null +++ b/testing/btest/spicy/get-values.spicy @@ -0,0 +1,99 @@ +# @TEST-REQUIRES: have-spicy +# +# @TEST-EXEC: spicyz -d -o test.hlto %INPUT +# @TEST-EXEC: zeek globals.zeek test.hlto Spicy::enable_print=T >output +# @TEST-EXEC: btest-diff output +# +# @TEST-DOC: Test access to Zeek-side globals. + +module Foo; + +import zeek; + +assert zeek::get_address("Bar::address_") == 1.2.3.4; +assert zeek::get_bool("Bar::bool_") == True; +assert zeek::get_count("Bar::count_") == 42; +assert zeek::get_double("Bar::double_") == 42.0; +assert zeek::get_enum("Bar::enum_") == "Foo"; +assert zeek::get_int("Bar::int_") == 42; +assert zeek::get_interval("Bar::interval_") == interval(42); +assert zeek::get_port("Bar::port_") == 42/tcp; +assert zeek::get_string("Bar::string_") == b"xxx"; +assert zeek::get_subnet("Bar::subnet_") == 1.2.3.4/16; +assert zeek::get_time("Bar::time_") == time(42.0); + +assert zeek::as_address(zeek::get_value("Bar::address_")) == 1.2.3.4; +assert zeek::as_bool(zeek::get_value("Bar::bool_")) == True; +assert zeek::as_count(zeek::get_value("Bar::count_")) == 42; +assert zeek::as_double(zeek::get_value("Bar::double_")) == 42.0; +assert zeek::as_enum(zeek::get_value("Bar::enum_")) == "Foo"; +assert zeek::as_int(zeek::get_value("Bar::int_")) == 42; +assert zeek::as_interval(zeek::get_value("Bar::interval_")) == interval(42); +assert zeek::as_port(zeek::get_value("Bar::port_")) == 42/tcp; +assert zeek::as_string(zeek::get_value("Bar::string_")) == b"xxx"; +assert zeek::as_subnet(zeek::get_value("Bar::subnet_")) == 1.2.3.4/16; +assert zeek::as_time(zeek::get_value("Bar::time_")) == time(42.0); + +assert zeek::as_string(zeek::record_field("Bar::record_", "x")) == b"foo"; +assert zeek::as_int(zeek::record_field("Bar::record_", "y")) == 42; +assert zeek::as_int(zeek::record_field(zeek::get_record("Bar::record_"), "y")) == 42; +assert zeek::record_has_value("Bar::record_", "x"); +assert zeek::record_has_value(zeek::get_record("Bar::record_"), "y"); +assert zeek::record_has_value("Bar::record_", "y"); +assert ! zeek::record_has_value("Bar::record_", "z"); +assert zeek::record_has_field("Bar::record_", "x"); +assert ! zeek::record_has_field("Bar::record_", "z"); +assert-exception zeek::record_field("Bar::record_", "z"); # not set + +assert zeek::set_contains("Bar::set_", "foo"); +assert ! zeek::set_contains("Bar::set_", "xxx"); +assert zeek::set_contains(zeek::get_set("Bar::set_"), "foo"); + +assert zeek::table_contains("Bar::table_", "foo"); +assert ! zeek::table_contains("Bar::table_", "xxx"); +assert zeek::table_contains(zeek::get_table("Bar::table_"), "foo"); +assert zeek::as_string(*zeek::table_lookup("Bar::table_", "foo")) == b"bar"; +assert zeek::as_string(*zeek::table_lookup(zeek::get_table("Bar::table_"), "foo")) == b"bar"; +assert ! zeek::table_lookup("Bar::table_", "does-not-exist"); + +assert zeek::as_count(zeek::vector_index("Bar::vector_", 2)) == 2; +assert zeek::as_count(zeek::vector_index(zeek::get_vector("Bar::vector_"), 2)) == 2; + +assert-exception zeek::get_bool("Bar::does_not_exist"); +assert-exception zeek::get_bool("Bar::string_"); + +# Test stringifcation. +print zeek::get_value("Bar::bool_"); +print zeek::get_record("Bar::record_"); +print zeek::get_set("Bar::set_"); +print zeek::get_table("Bar::table_"); +print zeek::get_vector("Bar::vector_"); + +# @TEST-START-FILE globals.zeek +module Bar; + +type Record: record { + x: string; + y: int &default=42; + z: bool &optional; +}; + +type Enum: enum { Foo, Bar }; + +const address_: addr = 1.2.3.4; +const bool_: bool = T; +const count_: count = 42; +const double_: double = 42.0; +const enum_: Enum = Foo; +const int_: int = 42; +const interval_: interval = 42sec; +const port_: port = 42/tcp; +const record_: Record = [$x="foo"]; +const set_: set[string] = set("foo", "bar"); +const string_: string = "xxx"; +const subnet_: subnet = 1.2.3.4/16; +const table_: table[string] of string = table(["foo"] = "bar"); +const time_: time = double_to_time(42.0); +const vector_: vector of count = vector(0, 1, 2); + +# @TEST-END-FILE From 98760a068394c358061295878f7421ebab45cb0f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 19 Jun 2024 11:43:36 +0200 Subject: [PATCH 43/89] Bump Spicy and documentation submodules. --- auxil/spicy | 2 +- doc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/auxil/spicy b/auxil/spicy index 5ff0cfea39..588c79a8c4 160000 --- a/auxil/spicy +++ b/auxil/spicy @@ -1 +1 @@ -Subproject commit 5ff0cfea39ece44d1ef94f9762926b4bb4138d58 +Subproject commit 588c79a8c4d3cb7e7e8265a9e85d59cb4bd5f972 diff --git a/doc b/doc index 5c377d2320..44651a4526 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit 5c377d232043cfcaf23df260880ca1613b19a9f4 +Subproject commit 44651a45261613b14dbd44e0ea8376346a689bd8 From ad543a4803cc1df97288876c0a354bfdc05c7069 Mon Sep 17 00:00:00 2001 From: Michael Dopheide Date: Thu, 20 Jun 2024 17:05:03 -0500 Subject: [PATCH 44/89] Fixes build error of OpenVPN spicy plugin --- src/spicy/runtime-support.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spicy/runtime-support.h b/src/spicy/runtime-support.h index 3c82f3004c..9b148bb4aa 100644 --- a/src/spicy/runtime-support.h +++ b/src/spicy/runtime-support.h @@ -21,8 +21,8 @@ #include #include -#include "IntrusivePtr.h" -#include "Type.h" +#include "zeek/IntrusivePtr.h" +#include "zeek/Type.h" #include "zeek/Desc.h" #include "zeek/Val.h" #include "zeek/spicy/cookie.h" From 4b719ef45a3a727c006840b45cdc825779d87986 Mon Sep 17 00:00:00 2001 From: Vern Paxson Date: Fri, 10 May 2024 20:22:04 -0700 Subject: [PATCH 45/89] script optimization for record operations sourced (in part) from other records --- src/Expr.cc | 13 +- src/Expr.h | 8 +- src/script_opt/CSE.cc | 1 + src/script_opt/Expr.cc | 250 +++++++++++++++++- src/script_opt/Expr.h | 92 +++++++ src/script_opt/ProfileFunc.cc | 3 +- src/script_opt/Reduce.cc | 26 +- src/script_opt/ScriptOpt.cc | 2 +- src/script_opt/Stmt.cc | 206 ++++++++++++++- src/script_opt/UseDefs.cc | 7 + src/script_opt/ZAM/Compile.h | 5 +- src/script_opt/ZAM/Expr.cc | 157 ++++++++++- src/script_opt/ZAM/IterInfo.h | 4 +- src/script_opt/ZAM/OPs/ZAM.op | 143 +++++++++- src/script_opt/ZAM/Stmt.cc | 4 +- src/script_opt/ZAM/ZInst.h | 26 +- .../language.record-chain-assign/output | 3 + .../btest/Baseline/opt.opt-no-files/.stderr | 2 +- .../btest/Baseline/opt.opt-no-func/.stderr | 2 +- .../btest/Baseline/opt.opt-no-func2/.stderr | 2 +- .../btest/language/record-chain-assign.zeek | 47 ++++ 21 files changed, 953 insertions(+), 50 deletions(-) create mode 100644 testing/btest/Baseline/language.record-chain-assign/output create mode 100644 testing/btest/language/record-chain-assign.zeek diff --git a/src/Expr.cc b/src/Expr.cc index 7231e888dc..abbb07a3d4 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -99,11 +99,14 @@ const char* expr_name(ExprTag t) { "vec+=", "[]=", "$=", - "from_any_vec_coerce ", + "$=$", + "$+=$", + "[=+$]", + "from_any_vec_coerce", "any[]", "ZAM-builtin()", - "nop", + "nop", // don't add after this, it's used to compute NUM_EXPRS }; if ( int(t) >= NUM_EXPRS ) { @@ -2916,7 +2919,8 @@ RecordConstructorExpr::RecordConstructorExpr(ListExprPtr constructor_list) Error("bad type in record constructor", constructor_error_expr); } -RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list) +RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list, + bool check_mandatory_fields) : Expr(EXPR_RECORD_CONSTRUCTOR), op(std::move(constructor_list)) { if ( IsError() ) return; @@ -2957,6 +2961,9 @@ RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr if ( IsError() ) return; + if ( ! check_mandatory_fields ) + return; + auto n = known_rt->NumFields(); for ( i = 0; i < n; ++i ) if ( fields_seen.count(i) == 0 ) { diff --git a/src/Expr.h b/src/Expr.h index 0cab292c92..73929a2114 100644 --- a/src/Expr.h +++ b/src/Expr.h @@ -103,6 +103,9 @@ enum ExprTag : int { EXPR_APPEND_TO, EXPR_INDEX_ASSIGN, EXPR_FIELD_LHS_ASSIGN, + EXPR_REC_ASSIGN_FIELDS, + EXPR_REC_ADD_FIELDS, + EXPR_REC_CONSTRUCT_WITH_REC, EXPR_FROM_ANY_VEC_COERCE, EXPR_ANY_INDEX, EXPR_SCRIPT_OPT_BUILTIN, @@ -1166,7 +1169,10 @@ public: explicit RecordConstructorExpr(ListExprPtr constructor_list); // This form is used to construct records of a known (ultimate) type. - explicit RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list); + // The flag allows skipping of checking for mandatory fields, for + // script optimization that may elide them. + explicit RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list, + bool check_mandatory_fields = true); ListExprPtr Op() const { return op; } const auto& Map() const { return map; } diff --git a/src/script_opt/CSE.cc b/src/script_opt/CSE.cc index 9830399ffe..6f122468af 100644 --- a/src/script_opt/CSE.cc +++ b/src/script_opt/CSE.cc @@ -133,6 +133,7 @@ TraversalCode CSE_ValidityChecker::PreExpr(const Expr* e) { case EXPR_RECORD_COERCE: case EXPR_RECORD_CONSTRUCTOR: + case EXPR_REC_CONSTRUCT_WITH_REC: // Note, record coercion behaves like constructors in terms of // potentially executing &default functions. In either case, // the type of the expression reflects the type we want to analyze diff --git a/src/script_opt/Expr.cc b/src/script_opt/Expr.cc index 8e3d1fcb9a..fbda0031fa 100644 --- a/src/script_opt/Expr.cc +++ b/src/script_opt/Expr.cc @@ -1783,7 +1783,7 @@ ExprPtr RecordConstructorExpr::Duplicate() { if ( map ) { auto rt = cast_intrusive(type); - return SetSucc(new RecordConstructorExpr(rt, op_l)); + return SetSucc(new RecordConstructorExpr(rt, op_l, false)); } else return SetSucc(new RecordConstructorExpr(op_l)); @@ -1807,6 +1807,11 @@ bool RecordConstructorExpr::HasReducedOps(Reducer* c) const { } ExprPtr RecordConstructorExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { + if ( ConstructFromRecordExpr::FindMostCommonRecordSource(op) ) { + auto cfr = with_location_of(make_intrusive(this), this); + return cfr->Reduce(c, red_stmt); + } + red_stmt = ReduceToSingletons(c); if ( c->Optimizing() ) @@ -2846,6 +2851,249 @@ void FieldLHSAssignExpr::ExprDescribe(ODesc* d) const { op2->Describe(d); } +// Helper functions. +// This first one mines out of a given statement in an assignment chain the +// variable that occurs as a LHS target, so 'x' for "x$foo = y$bar". +static NameExprPtr get_RFU_LHS_var(const Stmt* s) { + auto s_e = s->AsExprStmt()->StmtExpr(); + auto var = s_e->GetOp1()->GetOp1()->GetOp1(); + ASSERT(var->Tag() == EXPR_NAME); + return cast_intrusive(var); +} + +// This one mines out the RHS, so 'y' for "x$foo = y$bar", or for +// "x$foo = x$foo + y$bar" (which is what "x$foo += y$bar" is at this point). +static NameExprPtr get_RFU_RHS_var(const Stmt* s) { + auto s_e = s->AsExprStmt()->StmtExpr(); + auto rhs = s_e->GetOp2(); + + ExprPtr var; + if ( rhs->Tag() == EXPR_FIELD ) + var = rhs->GetOp1(); + else + var = rhs->GetOp2()->GetOp1(); + + ASSERT(var->Tag() == EXPR_NAME); + return cast_intrusive(var); +} + +RecordFieldUpdatesExpr::RecordFieldUpdatesExpr(ExprTag t, const std::vector& stmts, + std::set& stmt_pool) + : BinaryExpr(t, get_RFU_LHS_var(stmts[0]), get_RFU_RHS_var(stmts[0])) { + // Build up the LHS map (record fields we're assigning/adding) and RHS map + // (record fields from which we're assigning). + for ( auto s : stmts ) { + auto s_e = s->AsExprStmt()->StmtExpr(); + auto lhs = s_e->GetOp1()->GetOp1(); + auto lhs_field = lhs->AsFieldExpr()->Field(); + + auto rhs = s_e->GetOp2(); + if ( rhs->Tag() != EXPR_FIELD ) + // It's "x$foo = x$foo + y$bar". + rhs = rhs->GetOp2(); + + auto rhs_field = rhs->AsFieldExpr()->Field(); + + lhs_map.push_back(lhs_field); + rhs_map.push_back(rhs_field); + + // Consistency check that the statement is indeed in the pool, + // before we remove it. + ASSERT(stmt_pool.count(s) > 0); + stmt_pool.erase(s); + } +} + +RecordFieldUpdatesExpr::RecordFieldUpdatesExpr(ExprTag t, ExprPtr e1, ExprPtr e2, std::vector _lhs_map, + std::vector _rhs_map) + : BinaryExpr(t, std::move(e1), std::move(e2)) { + lhs_map = std::move(_lhs_map); + rhs_map = std::move(_rhs_map); +} + +ValPtr RecordFieldUpdatesExpr::Fold(Val* v1, Val* v2) const { + auto rv1 = v1->AsRecordVal(); + auto rv2 = v2->AsRecordVal(); + + for ( size_t i = 0; i < lhs_map.size(); ++i ) + FoldField(rv1, rv2, i); + + return nullptr; +} + +bool RecordFieldUpdatesExpr::IsReduced(Reducer* c) const { return HasReducedOps(c); } + +void RecordFieldUpdatesExpr::ExprDescribe(ODesc* d) const { + op1->Describe(d); + d->Add(expr_name(tag)); + op2->Describe(d); +} + +ExprPtr RecordFieldUpdatesExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { + if ( c->Optimizing() ) { + op1 = c->UpdateExpr(op1); + op2 = c->UpdateExpr(op2); + } + + red_stmt = nullptr; + + if ( ! op1->IsSingleton(c) ) + op1 = op1->ReduceToSingleton(c, red_stmt); + + StmtPtr red2_stmt; + if ( ! op2->IsSingleton(c) ) + op2 = op2->ReduceToSingleton(c, red2_stmt); + + red_stmt = MergeStmts(red_stmt, std::move(red2_stmt)); + + return ThisPtr(); +} + +ExprPtr AssignRecordFieldsExpr::Duplicate() { + auto e1 = op1->Duplicate(); + auto e2 = op2->Duplicate(); + return SetSucc(new AssignRecordFieldsExpr(e1, e2, lhs_map, rhs_map)); +} + +void AssignRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const { + rv1->Assign(lhs_map[i], rv2->GetField(rhs_map[i])); +} + +ConstructFromRecordExpr::ConstructFromRecordExpr(const RecordConstructorExpr* orig) + : AssignRecordFieldsExpr(nullptr, nullptr, {}, {}) { + tag = EXPR_REC_CONSTRUCT_WITH_REC; + SetType(orig->GetType()); + + // Arguments used in original and final constructor. + auto& orig_args = orig->Op()->Exprs(); + // The one we'll build up below: + auto args = with_location_of(make_intrusive(), orig); + + auto src_id = FindMostCommonRecordSource(orig->Op()); + auto& map = orig->Map(); + + for ( size_t i = 0; i < orig_args.size(); ++i ) { + auto e = orig_args[i]; + auto src = FindRecordSource(e); + if ( src && src->GetOp1()->AsNameExpr()->IdPtr() == src_id ) { + // "map" might be nil if we're optimize [$x = foo$bar]. + lhs_map.push_back(map ? (*map)[i] : i); + rhs_map.push_back(src->Field()); + } + else + args->Append({NewRef{}, e}); + } + + auto rt = cast_intrusive(orig->GetType()); + op1 = with_location_of(make_intrusive(std::move(rt), std::move(args), false), orig); + op2 = with_location_of(make_intrusive(std::move(src_id)), orig); +} + +IDPtr ConstructFromRecordExpr::FindMostCommonRecordSource(const ListExprPtr& exprs) { + // Maps identifiers to how often they appear in the constructor's + // arguments as a field reference. Used to find the most common. + std::unordered_map id_cnt; + + for ( auto e : exprs->Exprs() ) { + auto src = FindRecordSource(e); + if ( src ) { + auto id = src->GetOp1()->AsNameExpr()->IdPtr(); + ++id_cnt[id] = 1; +#if 0 + auto ic = id_cnt.find(id); + if ( ic == id_cnt.end() ) + id_cnt[id] = 1; + else + ++ic->second; +#endif + } + } + + if ( id_cnt.empty() ) + return nullptr; + + // Return the most common. + auto max_entry = std::max_element(id_cnt.begin(), id_cnt.end(), + [](const std::pair& p1, const std::pair& p2) { + return p1.second < p2.second; + }); + return max_entry->first; +} + +FieldExprPtr ConstructFromRecordExpr::FindRecordSource(const Expr* const_e) { + // The following cast just saves us from having to define a "const" version + // of AsFieldAssignExprPtr(). + auto e = const_cast(const_e); + const auto fa = e->AsFieldAssignExprPtr(); + auto fa_rhs = e->GetOp1(); + + if ( fa_rhs->Tag() != EXPR_FIELD ) + return nullptr; + + auto rhs_rec = fa_rhs->GetOp1(); + if ( rhs_rec->Tag() != EXPR_NAME ) + return nullptr; + + return cast_intrusive(fa_rhs); +} + +ExprPtr ConstructFromRecordExpr::Duplicate() { + auto e1 = op1->Duplicate(); + auto e2 = op2->Duplicate(); + return SetSucc(new ConstructFromRecordExpr(e1, e2, lhs_map, rhs_map)); +} + +bool ConstructFromRecordExpr::IsReduced(Reducer* c) const { return op1->HasReducedOps(c) && op2->IsReduced(c); } + +bool ConstructFromRecordExpr::HasReducedOps(Reducer* c) const { return IsReduced(c); } + +ExprPtr ConstructFromRecordExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { + if ( c->Optimizing() ) { + op1 = c->UpdateExpr(op1); + op2 = c->UpdateExpr(op2); + } + + red_stmt = nullptr; + + if ( ! op1->HasReducedOps(c) ) + red_stmt = op1->ReduceToSingletons(c); + + StmtPtr red2_stmt; + if ( ! op2->IsSingleton(c) ) + op2 = op2->ReduceToSingleton(c, red2_stmt); + + red_stmt = MergeStmts(red_stmt, std::move(red2_stmt)); + + if ( c->Optimizing() ) + return ThisPtr(); + else + return AssignToTemporary(c, red_stmt); +} + +ExprPtr AddRecordFieldsExpr::Duplicate() { + auto e1 = op1->Duplicate(); + auto e2 = op2->Duplicate(); + return SetSucc(new AddRecordFieldsExpr(e1, e2, lhs_map, rhs_map)); +} + +void AddRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const { + // The goal here is correctness, not efficiency, since normally this + // expression only exists temporarily before being compiled to ZAM. + // Doing it this way saves us from having to switch on the type of the '+' + // operands. + auto lhs_val = rv1->GetField(lhs_map[i]); + auto rhs_val = rv2->GetField(rhs_map[i]); + + auto lhs_const = make_intrusive(lhs_val); + auto rhs_const = make_intrusive(rhs_val); + + auto add_expr = make_intrusive(lhs_const, rhs_const); + auto sum = add_expr->Eval(nullptr); + ASSERT(sum); + + rv1->Assign(lhs_map[i], sum); +} + CoerceToAnyExpr::CoerceToAnyExpr(ExprPtr arg_op) : UnaryExpr(EXPR_TO_ANY_COERCE, std::move(arg_op)) { type = base_type(TYPE_ANY); } diff --git a/src/script_opt/Expr.h b/src/script_opt/Expr.h index 3d7bc382c0..31ebeaf2b7 100644 --- a/src/script_opt/Expr.h +++ b/src/script_opt/Expr.h @@ -104,6 +104,98 @@ protected: int field; }; +// Base class for updating a number of record fields from fields in +// another record. +class RecordFieldUpdatesExpr : public BinaryExpr { +public: + const auto& LHSMap() const { return lhs_map; } + const auto& RHSMap() const { return rhs_map; } + + // Only needed if we're transforming-but-not-compiling. + ValPtr Fold(Val* v1, Val* v2) const override; + + bool IsPure() const override { return false; } + bool IsReduced(Reducer* c) const override; + ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override; + +protected: + RecordFieldUpdatesExpr(ExprTag t, const std::vector& stmts, std::set& stmt_pool); + RecordFieldUpdatesExpr(ExprTag t, ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map); + + // Apply the operation for the given index 'i' from rv2 to rv1. + // Does not return a value since we're modifying rv1 in-place. + virtual void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const = 0; + + void ExprDescribe(ODesc* d) const override; + + std::vector lhs_map; + std::vector rhs_map; +}; + +// Assign a bunch of record fields en masse from fields in another record. +class AssignRecordFieldsExpr : public RecordFieldUpdatesExpr { +public: + AssignRecordFieldsExpr(const std::vector& stmts, std::set& stmt_pool) + : RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, stmts, stmt_pool) {} + + ExprPtr Duplicate() override; + +protected: + // Used for duplicating. + AssignRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) + : RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, e1, e2, _lhs_map, _rhs_map) {} + + void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override; +}; + +// Construct a record with some of the fields taken directly from another +// record. After full construction, the first operand is the base constructor +// (a subset of the original) and the second is the source record being used +// for some of the initialization. +using FieldExprPtr = IntrusivePtr; +class ConstructFromRecordExpr : public AssignRecordFieldsExpr { +public: + ConstructFromRecordExpr(const RecordConstructorExpr* orig); + + // Helper function that finds the most common source value. + // Returns its identifier, or nil if there is no "$field = x$y" + // to leverage. + static IDPtr FindMostCommonRecordSource(const ListExprPtr& exprs); + + ExprPtr Duplicate() override; + + bool IsReduced(Reducer* c) const override; + bool HasReducedOps(Reducer* c) const override; + ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override; + +protected: + ConstructFromRecordExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) + : AssignRecordFieldsExpr(e1, e2, _lhs_map, _rhs_map) { + tag = EXPR_REC_CONSTRUCT_WITH_REC; + } + + // Helper function that for a given "$field = x$y" returns the + // "x$y" node, or nil if that's not the nature of the expression. + static FieldExprPtr FindRecordSource(const Expr* e); +}; + +// Add en masse fields from one record to fields in another record. +// We could add additional such expressions for other common operations +// like "x$foo -= y$bar", but in practice these are quite rare. +class AddRecordFieldsExpr : public RecordFieldUpdatesExpr { +public: + AddRecordFieldsExpr(const std::vector& stmts, std::set& stmt_pool) + : RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, stmts, stmt_pool) {} + + ExprPtr Duplicate() override; + +protected: + AddRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) + : RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, e1, e2, _lhs_map, _rhs_map) {} + + void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override; +}; + // ... and for conversion from a "vector of any" type. class CoerceFromAnyVecExpr : public UnaryExpr { public: diff --git a/src/script_opt/ProfileFunc.cc b/src/script_opt/ProfileFunc.cc index b10fbda940..a62e436774 100644 --- a/src/script_opt/ProfileFunc.cc +++ b/src/script_opt/ProfileFunc.cc @@ -442,7 +442,8 @@ TraversalCode ProfileFunc::PreExpr(const Expr* e) { return TC_ABORTSTMT; } - case EXPR_RECORD_CONSTRUCTOR: CheckRecordConstructor(e->GetType()); break; + case EXPR_RECORD_CONSTRUCTOR: + case EXPR_REC_CONSTRUCT_WITH_REC: CheckRecordConstructor(e->GetType()); break; case EXPR_SET_CONSTRUCTOR: { auto sc = static_cast(e); diff --git a/src/script_opt/Reduce.cc b/src/script_opt/Reduce.cc index 801353efce..344f8ec07b 100644 --- a/src/script_opt/Reduce.cc +++ b/src/script_opt/Reduce.cc @@ -57,7 +57,7 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) { return def_1 == def_2 && def_1 != NO_DEF; } - else if ( op1->Tag() == EXPR_CONST ) { + if ( op1->Tag() == EXPR_CONST ) { auto op1_c = op1->AsConstExpr(); auto op2_c = op2->AsConstExpr(); @@ -67,7 +67,7 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) { return same_val(op1_v, op2_v); } - else if ( op1->Tag() == EXPR_LIST ) { + if ( op1->Tag() == EXPR_LIST ) { auto op1_l = op1->AsListExpr()->Exprs(); auto op2_l = op2->AsListExpr()->Exprs(); @@ -81,8 +81,22 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) { return true; } - reporter->InternalError("bad singleton tag"); - return false; + // We only get here if dealing with non-reduced operands. + auto subop1_1 = op1->GetOp1(); + auto subop1_2 = op2->GetOp1(); + ASSERT(subop1_1 && subop1_2); + + if ( ! same_expr(subop1_1, subop1_2) ) + return false; + + auto subop2_1 = op1->GetOp2(); + auto subop2_2 = op2->GetOp2(); + if ( subop2_1 && ! same_expr(subop2_1, subop2_2) ) + return false; + + auto subop3_1 = op1->GetOp3(); + auto subop3_2 = op2->GetOp3(); + return ! subop3_1 || same_expr(subop3_1, subop3_2); } static bool same_op(const ExprPtr& op1, const ExprPtr& op2, bool check_defs) { @@ -107,6 +121,7 @@ static bool same_expr(const Expr* e1, const Expr* e2, bool check_defs) { case EXPR_CLONE: case EXPR_RECORD_CONSTRUCTOR: + case EXPR_REC_CONSTRUCT_WITH_REC: case EXPR_TABLE_CONSTRUCTOR: case EXPR_SET_CONSTRUCTOR: case EXPR_VECTOR_CONSTRUCTOR: @@ -473,7 +488,8 @@ bool Reducer::ExprValid(const ID* id, const Expr* e1, const Expr* e2) const { has_side_effects = true; } - else if ( e1->Tag() == EXPR_RECORD_CONSTRUCTOR || e1->Tag() == EXPR_RECORD_COERCE ) + else if ( e1->Tag() == EXPR_RECORD_CONSTRUCTOR || e1->Tag() == EXPR_REC_CONSTRUCT_WITH_REC || + e1->Tag() == EXPR_RECORD_COERCE ) has_side_effects = pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, e1->GetType()); e1_se = ExprSideEffects(has_side_effects); diff --git a/src/script_opt/ScriptOpt.cc b/src/script_opt/ScriptOpt.cc index 08d156b692..c41e6cf429 100644 --- a/src/script_opt/ScriptOpt.cc +++ b/src/script_opt/ScriptOpt.cc @@ -592,7 +592,7 @@ void analyze_scripts(bool no_unused_warnings) { func.SetShouldNotAnalyze(); if ( ! have_one_to_do ) - reporter->FatalError("no matching functions/files for C++ compilation"); + reporter->FatalError("no matching functions/files for script optimization"); if ( CPP_init_hook ) { (*CPP_init_hook)(); diff --git a/src/script_opt/Stmt.cc b/src/script_opt/Stmt.cc index 6a237a07be..7bea5a42c5 100644 --- a/src/script_opt/Stmt.cc +++ b/src/script_opt/Stmt.cc @@ -143,8 +143,13 @@ StmtPtr ExprStmt::DoReduce(Reducer* c) { // it has a non-void type it'll generate an // assignment to a temporary. red_e_stmt = e->ReduceToSingletons(c); - else + else { e = e->Reduce(c, red_e_stmt); + // It's possible that 'e' has gone away because it was a call + // to an inlined function that doesn't have a return value. + if ( ! e ) + return red_e_stmt; + } if ( red_e_stmt ) { auto s = make_intrusive(red_e_stmt, ThisPtr()); @@ -735,11 +740,210 @@ StmtPtr StmtList::DoReduce(Reducer* c) { return ThisPtr(); } +static unsigned int find_rec_assignment_chain(const std::vector& stmts, unsigned int i) { + const NameExpr* targ_rec = nullptr; + std::set fields_seen; + + for ( ; i < stmts.size(); ++i ) { + const auto& s = stmts[i]; + + // We're looking for either "x$a = y$b" or "x$a = x$a + y$b". + if ( s->Tag() != STMT_EXPR ) + // No way it's an assignment. + return i; + + auto se = s->AsExprStmt()->StmtExpr(); + if ( se->Tag() != EXPR_ASSIGN ) + return i; + + // The LHS of an assignment starts with a RefExpr. + auto lhs_ref = se->GetOp1(); + ASSERT(lhs_ref->Tag() == EXPR_REF); + + auto lhs = lhs_ref->GetOp1(); + if ( lhs->Tag() != EXPR_FIELD ) + // Not of the form "x$a = ...". + return i; + + auto lhs_field = lhs->AsFieldExpr()->Field(); + if ( fields_seen.count(lhs_field) > 0 ) + // Earlier in this chain we've already seen "x$a", so end the + // chain at this repeated use because it's no longer a simple + // block of field assignments. + return i; + + fields_seen.insert(lhs_field); + + auto lhs_rec = lhs->GetOp1(); + if ( lhs_rec->Tag() != EXPR_NAME ) + // Not a simple field reference, e.g. "x$y$a". + return i; + + auto lhs_rec_n = lhs_rec->AsNameExpr(); + + if ( targ_rec ) { + if ( lhs_rec_n->Id() != targ_rec->Id() ) + // It's no longer "x$..." but some new variable "z$...". + return i; + } + else + targ_rec = lhs_rec_n; + } + + return i; +} + +using OpChain = std::map>; + +static void update_assignment_chains(const StmtPtr& s, OpChain& assign_chains, OpChain& add_chains) { + auto se = s->AsExprStmt()->StmtExpr(); + ASSERT(se->Tag() == EXPR_ASSIGN); + + // The first GetOp1() here accesses the EXPR_ASSIGN's first operand, + // which is a RefExpr; the second gets its operand, which we've guaranteed + // in find_rec_assignment_chain is a FieldExpr. + auto lhs_fe = se->GetOp1()->GetOp1()->AsFieldExpr(); + auto lhs_id = lhs_fe->GetOp1()->AsNameExpr()->Id(); + auto rhs = se->GetOp2(); + const FieldExpr* f; + OpChain* c; + + // Check whether RHS is either "y$b" or "x$a + y$b". + + if ( rhs->Tag() == EXPR_ADD ) { + auto rhs_op1 = rhs->GetOp1(); // need to see that it's "x$a" + + if ( rhs_op1->Tag() != EXPR_FIELD ) + return; + + auto rhs1_fe = rhs_op1->AsFieldExpr(); + auto rhs_op1_rec = rhs1_fe->GetOp1(); + if ( rhs_op1_rec->Tag() != EXPR_NAME || rhs_op1_rec->AsNameExpr()->Id() != lhs_id || + rhs1_fe->Field() != lhs_fe->Field() ) + return; + + auto rhs_op2 = rhs->GetOp2(); // need to see that it's "y$b" + if ( rhs_op2->Tag() != EXPR_FIELD ) + return; + + if ( ! IsArithmetic(rhs_op2->GetType()->Tag()) ) + // Avoid esoteric forms of adding. + return; + + f = rhs_op2->AsFieldExpr(); + c = &add_chains; + } + + else if ( rhs->Tag() == EXPR_FIELD ) { + f = rhs->AsFieldExpr(); + c = &assign_chains; + } + + else + // Not a RHS we know how to leverage. + return; + + auto f_rec = f->GetOp1(); + if ( f_rec->Tag() != EXPR_NAME ) + // Not a simple RHS, instead something like "y$z$b". + return; + + // If we get here, it's a keeper, record the associated statement. + auto id = f_rec->AsNameExpr()->Id(); + (*c)[id].push_back(s.get()); +#if 0 + auto cf = c->find(id); + if ( cf == c->end() ) + (*c)[id] = std::vector{s.get()}; + else + cf->second.push_back(s.get()); +#endif +} + +static StmtPtr transform_chain(const OpChain& c, ExprTag t, std::set& chain_stmts) { + IntrusivePtr sl; + + for ( auto& id_stmts : c ) { + auto orig_s = id_stmts.second; + + if ( ! sl ) + // Now that we have a statement, create our list and associate + // its location with the statement. + sl = with_location_of(make_intrusive(), orig_s[0]); + + ExprPtr e; + if ( t == EXPR_ASSIGN ) + e = make_intrusive(orig_s, chain_stmts); + else if ( t == EXPR_ADD ) + e = make_intrusive(orig_s, chain_stmts); + else + reporter->InternalError("inconsistency transforming assignment chain"); + + e->SetLocationInfo(sl->GetLocationInfo()); + auto es = with_location_of(make_intrusive(std::move(e)), sl); + sl->Stmts().emplace_back(std::move(es)); + } + + return sl; +} + +static bool simplify_chain(const std::vector& stmts, unsigned int start, unsigned int end, + std::vector& f_stmts) { + OpChain assign_chains; + OpChain add_chains; + std::set chain_stmts; + + for ( auto i = start; i <= end; ++i ) { + auto& s = stmts[i]; + chain_stmts.insert(s.get()); + update_assignment_chains(s, assign_chains, add_chains); + } + + // An add-chain of any size is a win. For an assign-chain to be a win, + // it needs to have at least two elements, because a single "x$a = y$b" + // can be expressed using one ZAM instructino (but "x$a += y$b" cannot). + if ( add_chains.empty() ) { + bool have_useful_assign_chain = false; + for ( auto& ac : assign_chains ) + if ( ac.second.size() > 1 ) { + have_useful_assign_chain = true; + break; + } + + if ( ! have_useful_assign_chain ) + // No gains available. + return false; + } + + auto as_c = transform_chain(assign_chains, EXPR_ASSIGN, chain_stmts); + auto ad_c = transform_chain(add_chains, EXPR_ADD, chain_stmts); + + ASSERT(as_c || ad_c); + + if ( as_c ) + f_stmts.push_back(as_c); + if ( ad_c ) + f_stmts.push_back(ad_c); + + // At this point, chain_stmts has only the remainders that weren't removed. + for ( auto s : stmts ) + if ( chain_stmts.count(s.get()) > 0 ) + f_stmts.push_back(s); + + return true; +} + bool StmtList::ReduceStmt(unsigned int& s_i, std::vector& f_stmts, Reducer* c) { bool did_change = false; auto& stmt_i = stmts[s_i]; auto old_stmt = stmt_i; + auto chain_end = find_rec_assignment_chain(stmts, s_i); + if ( chain_end > s_i && simplify_chain(stmts, s_i, chain_end - 1, f_stmts) ) { + s_i = chain_end - 1; + return true; + } + auto stmt = stmt_i->Reduce(c); if ( stmt != old_stmt ) diff --git a/src/script_opt/UseDefs.cc b/src/script_opt/UseDefs.cc index 2ff5013c90..e0edc9a85b 100644 --- a/src/script_opt/UseDefs.cc +++ b/src/script_opt/UseDefs.cc @@ -464,6 +464,13 @@ UDs UseDefs::ExprUDs(const Expr* e) { break; } + case EXPR_REC_CONSTRUCT_WITH_REC: { + auto constructor_UDs = ExprUDs(e->GetOp1().get()); + AddInExprUDs(uds, e->GetOp2().get()); + uds = UD_Union(uds, constructor_UDs); + break; + } + case EXPR_TABLE_CONSTRUCTOR: { auto t = static_cast(e); AddInExprUDs(uds, t->GetOp1().get()); diff --git a/src/script_opt/ZAM/Compile.h b/src/script_opt/ZAM/Compile.h index 84e455418c..50d2c1ce1c 100644 --- a/src/script_opt/ZAM/Compile.h +++ b/src/script_opt/ZAM/Compile.h @@ -189,6 +189,7 @@ private: const ZAMStmt CompileAddToExpr(const AddToExpr* e); const ZAMStmt CompileRemoveFromExpr(const RemoveFromExpr* e); const ZAMStmt CompileAssignExpr(const AssignExpr* e); + const ZAMStmt CompileRecFieldUpdates(const RecordFieldUpdatesExpr* e); const ZAMStmt CompileZAMBuiltin(const NameExpr* lhs, const ScriptOptBuiltinExpr* zbi); const ZAMStmt CompileAssignToIndex(const NameExpr* lhs, const IndexExpr* rhs); const ZAMStmt CompileFieldLHSAssignExpr(const FieldLHSAssignExpr* e); @@ -244,7 +245,9 @@ private: const ZAMStmt ConstructTable(const NameExpr* n, const Expr* e); const ZAMStmt ConstructSet(const NameExpr* n, const Expr* e); - const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e); + const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e) { return ConstructRecord(n, e, false); } + const ZAMStmt ConstructRecordFromRecord(const NameExpr* n, const Expr* e) { return ConstructRecord(n, e, true); } + const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e, bool is_from_rec); const ZAMStmt ConstructVector(const NameExpr* n, const Expr* e); const ZAMStmt ArithCoerce(const NameExpr* n, const Expr* e); diff --git a/src/script_opt/ZAM/Expr.cc b/src/script_opt/ZAM/Expr.cc index f1e660efb7..4670033811 100644 --- a/src/script_opt/ZAM/Expr.cc +++ b/src/script_opt/ZAM/Expr.cc @@ -26,6 +26,9 @@ const ZAMStmt ZAMCompiler::CompileExpr(const Expr* e) { case EXPR_ASSIGN: return CompileAssignExpr(static_cast(e)); + case EXPR_REC_ASSIGN_FIELDS: + case EXPR_REC_ADD_FIELDS: return CompileRecFieldUpdates(static_cast(e)); + case EXPR_INDEX_ASSIGN: { auto iae = static_cast(e); auto t = iae->GetOp1()->GetType()->Tag(); @@ -281,6 +284,77 @@ const ZAMStmt ZAMCompiler::CompileAssignExpr(const AssignExpr* e) { #include "ZAM-GenExprsDefsV.h" } +const ZAMStmt ZAMCompiler::CompileRecFieldUpdates(const RecordFieldUpdatesExpr* e) { + auto rhs = e->GetOp2()->AsNameExpr(); + + auto& rhs_map = e->RHSMap(); + + auto aux = new ZInstAux(0); + aux->map = e->LHSMap(); + aux->rhs_map = rhs_map; + + // Used to track the different types present, so we can see whether + // we can use a homogeneous operator or need a mixed one. Won't be + // needed if we're doing assignments, but handy if we're doing adds. + std::set field_tags; + + size_t num_managed = 0; + + for ( auto i : rhs_map ) { + auto rt = rhs->GetType()->AsRecordType(); + auto rt_ft_i = rt->GetFieldType(i); + field_tags.insert(rt_ft_i->Tag()); + + if ( ZVal::IsManagedType(rt_ft_i) ) { + aux->is_managed.push_back(true); + ++num_managed; + } + else + // This will only be needed if is_managed winds up being true, + // but it's harmless to build it up in any case. + aux->is_managed.push_back(false); + + // The following is only needed for non-homogeneous "add"s, but + // likewise it's harmless to build it anyway. + aux->types.push_back(rt_ft_i); + } + + bool homogeneous = field_tags.size() == 1; + // Here we leverage the fact that C++ "+=" works identically for + // signed and unsigned int's. + if ( ! homogeneous && field_tags.size() == 2 && field_tags.count(TYPE_INT) > 0 && field_tags.count(TYPE_COUNT) > 0 ) + homogeneous = true; + + ZOp op; + + if ( e->Tag() == EXPR_REC_ASSIGN_FIELDS ) { + if ( num_managed == rhs_map.size() ) + // This operand allows for a simpler implementation. + op = OP_REC_ASSIGN_FIELDS_ALL_MANAGED_VV; + else if ( num_managed > 0 ) + op = OP_REC_ASSIGN_FIELDS_MANAGED_VV; + else + op = OP_REC_ASSIGN_FIELDS_VV; + } + + else if ( homogeneous ) { + if ( field_tags.count(TYPE_DOUBLE) > 0 ) + op = OP_REC_ADD_DOUBLE_FIELDS_VV; + else + // Here we leverage that += will work for both signed/unsigned. + op = OP_REC_ADD_INT_FIELDS_VV; + } + + else + op = OP_REC_ADD_FIELDS_VV; + + auto lhs = e->GetOp1()->AsNameExpr(); + auto z = GenInst(op, lhs, rhs); + z.aux = aux; + + return AddInst(z); +} + const ZAMStmt ZAMCompiler::CompileZAMBuiltin(const NameExpr* lhs, const ScriptOptBuiltinExpr* zbi) { auto op1 = zbi->GetOp1(); auto op2 = zbi->GetOp2(); @@ -1253,10 +1327,11 @@ const ZAMStmt ZAMCompiler::ConstructSet(const NameExpr* n, const Expr* e) { return AddInst(z); } -const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) { - ASSERT(e->Tag() == EXPR_RECORD_CONSTRUCTOR); - auto rc = static_cast(e); - auto rt = e->GetType()->AsRecordType(); +const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e, bool is_from_rec) { + auto rec_e = is_from_rec ? e->GetOp1().get() : e; + ASSERT(rec_e->Tag() == EXPR_RECORD_CONSTRUCTOR); + auto rc = static_cast(rec_e); + auto rt = rec_e->GetType()->AsRecordType(); auto aux = InternalBuildVals(rc->Op().get()); @@ -1266,7 +1341,7 @@ const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) { // constructor. aux->zvec.resize(rt->NumFields()); - if ( pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, e->GetType()) ) + if ( pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, rec_e->GetType()) ) aux->can_change_non_locals = true; ZOp op; @@ -1331,33 +1406,89 @@ const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) { else op = OP_CONSTRUCT_DIRECT_RECORD_V; - ZInstI z = network_time_index >= 0 ? GenInst(op, n, network_time_index) : GenInst(op, n); + ZInstI z; + + if ( is_from_rec ) { + // Map non-from-rec operand to the from-rec equivalent. + switch ( op ) { + case OP_CONSTRUCT_KNOWN_RECORD_WITH_NT_VV: op = OP_CONSTRUCT_KNOWN_RECORD_WITH_NT_FROM_VVV; break; + + case OP_CONSTRUCT_KNOWN_RECORD_V: op = OP_CONSTRUCT_KNOWN_RECORD_FROM_VV; break; + + case OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_AND_NT_VV: + op = OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_AND_NT_FROM_VVV; + break; + + case OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_V: + op = OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_FROM_VV; + break; + + // Note, no case for OP_CONSTRUCT_DIRECT_RECORD_V - shouldn't + // happen given how we construct ConstructFromRecordExpr's. + default: reporter->InternalError("bad op in ZAMCompiler::ConstructRecord"); + } + + auto cfr = static_cast(e); + auto from_n = cfr->GetOp2()->AsNameExpr(); + if ( network_time_index >= 0 ) + z = GenInst(op, n, from_n, network_time_index); + else + z = GenInst(op, n, from_n); + + aux->lhs_map = cfr->LHSMap(); + aux->rhs_map = cfr->RHSMap(); + + for ( auto i : aux->lhs_map ) { + auto& field_t = rt->GetFieldType(i); + aux->is_managed.push_back(ZVal::IsManagedType(field_t)); + } + } + + else + z = network_time_index >= 0 ? GenInst(op, n, network_time_index) : GenInst(op, n); z.aux = aux; - z.t = e->GetType(); + z.t = rec_e->GetType(); auto inst = AddInst(z); // If one of the initialization values is an unspecified vector (which // in general we can't know until run-time) then we'll need to // "concretize" it. We first see whether this is a possibility, since - // it usually isn't, by counting up how many of the record fields are - // vectors. - std::vector vector_fields; // holds indices of the vector fields + // it usually isn't, by counting up how many of the initialized record + // fields are vectors. + + // First just gather up the types of all the fields, and their location + // in the target. + std::vector> init_field_types; + for ( int i = 0; i < z.aux->n; ++i ) { auto field_ind = map ? (*map)[i] : i; auto& field_t = rt->GetFieldType(field_ind); - if ( field_t->Tag() == TYPE_VECTOR && field_t->Yield()->Tag() != TYPE_ANY ) - vector_fields.push_back(field_ind); + init_field_types.emplace_back(field_t, field_ind); } + if ( is_from_rec ) + // Need to also check the source record. + for ( auto i : aux->lhs_map ) { + auto& field_t = rt->GetFieldType(i); + init_field_types.emplace_back(field_t, i); + } + + // Now spin through to find the vector fields. + + std::vector vector_fields; // holds indices of the vector fields + for ( auto& ft : init_field_types ) + if ( ft.first->Tag() == TYPE_VECTOR && ft.first->Yield()->Tag() != TYPE_ANY ) + vector_fields.push_back(ft.second); + if ( vector_fields.empty() ) // Common case of no vector fields, we're done. return inst; // Need to add a separate instruction for concretizing the fields. z = GenInst(OP_CONCRETIZE_VECTOR_FIELDS_V, n); - z.t = e->GetType(); + z.t = rec_e->GetType(); int nf = static_cast(vector_fields.size()); z.aux = new ZInstAux(nf); z.aux->elems_has_slots = false; // we're storing field offsets, not slots diff --git a/src/script_opt/ZAM/IterInfo.h b/src/script_opt/ZAM/IterInfo.h index 5a66680cb3..be81e00e4b 100644 --- a/src/script_opt/ZAM/IterInfo.h +++ b/src/script_opt/ZAM/IterInfo.h @@ -51,9 +51,9 @@ public: if ( lv < 0 ) continue; auto& var = frame[lv]; - if ( aux->lvt_is_managed[i] ) + if ( aux->is_managed[i] ) ZVal::DeleteManagedType(var); - auto& t = aux->loop_var_types[i]; + auto& t = aux->types[i]; var = ZVal(ind_lv_p, t); } diff --git a/src/script_opt/ZAM/OPs/ZAM.op b/src/script_opt/ZAM/OPs/ZAM.op index a44927af6c..6b18b4156e 100644 --- a/src/script_opt/ZAM/OPs/ZAM.op +++ b/src/script_opt/ZAM/OPs/ZAM.op @@ -1230,6 +1230,8 @@ eval ConstructTableOrSetPre() direct-unary-op Record-Constructor ConstructRecord +direct-unary-op Rec-Construct-With-Rec ConstructRecordFromRecord + macro ConstructRecordPost() auto& r = frame[z.v1].record_val; Unref(r); @@ -1245,29 +1247,150 @@ type V eval auto init_vals = z.aux->ToZValVecWithMap(frame); ConstructRecordPost() +macro AssignFromRec() + /* The following is defined below, for use by Rec-Assign-Fields */ + SetUpRecFieldOps(lhs_map) + auto is_managed = aux->is_managed; + for ( size_t i = 0U; i < n; ++i ) + { + auto rhs_i = rhs->RawField(rhs_map[i]); + if ( is_managed[i] ) + zeek::Ref(rhs_i.ManagedVal()); + init_vals[lhs_map[i]] = rhs_i; + } + +op Construct-Known-Record-From +type VV +eval auto init_vals = z.aux->ToZValVecWithMap(frame); + AssignFromRec() + ConstructRecordPost() + +macro DoNetworkTimeInit(slot) + init_vals[slot] = ZVal(run_state::network_time); + op Construct-Known-Record-With-NT type VV eval auto init_vals = z.aux->ToZValVecWithMap(frame); - ASSERT(! init_vals[z.v2]); - init_vals[z.v2] = ZVal(run_state::network_time); + DoNetworkTimeInit(z.v2) ConstructRecordPost() +op Construct-Known-Record-With-NT-From +type VVV +eval auto init_vals = z.aux->ToZValVecWithMap(frame); + DoNetworkTimeInit(z.v3) + AssignFromRec() + ConstructRecordPost() + +macro GenInits() + auto init_vals = z.aux->ToZValVecWithMap(frame); + for ( auto& fi : *z.aux->field_inits ) + init_vals[fi.first] = fi.second->Generate(); + op Construct-Known-Record-With-Inits type V -eval auto init_vals = z.aux->ToZValVecWithMap(frame); - for ( auto& fi : *z.aux->field_inits ) - init_vals[fi.first] = fi.second->Generate(); +eval GenInits() + ConstructRecordPost() + +op Construct-Known-Record-With-Inits-From +type VV +eval GenInits() + AssignFromRec() ConstructRecordPost() op Construct-Known-Record-With-Inits-And-NT type VV -eval auto init_vals = z.aux->ToZValVecWithMap(frame); - for ( auto& fi : *z.aux->field_inits ) - init_vals[fi.first] = fi.second->Generate(); - ASSERT(! init_vals[z.v2]); - init_vals[z.v2] = ZVal(run_state::network_time); +eval GenInits() + DoNetworkTimeInit(z.v2) ConstructRecordPost() +op Construct-Known-Record-With-Inits-And-NT-From +type VVV +eval GenInits() + DoNetworkTimeInit(z.v3) + AssignFromRec() + ConstructRecordPost() + +macro SetUpRecFieldOps(which_lhs_map) + auto lhs = frame[z.v1].record_val; + auto rhs = frame[z.v2].record_val; + auto aux = z.aux; + auto& lhs_map = aux->which_lhs_map; + auto& rhs_map = aux->rhs_map; + auto n = rhs_map.size(); + +op Rec-Assign-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + lhs->RawOptField(lhs_map[i]) = rhs->RawField(rhs_map[i]); + +macro DoManagedRecAssign() + auto is_managed = aux->is_managed; + for ( size_t i = 0U; i < n; ++i ) + if ( is_managed[i] ) + { + auto& lhs_i = lhs->RawOptField(lhs_map[i]); + auto rhs_i = rhs->RawField(rhs_map[i]); + zeek::Ref(rhs_i.ManagedVal()); + if ( lhs_i ) + ZVal::DeleteManagedType(*lhs_i); + lhs_i = rhs_i; + } + else + lhs->RawOptField(lhs_map[i]) = rhs->RawField(rhs_map[i]); +op Rec-Assign-Fields-Managed +op1-read +type VV +eval SetUpRecFieldOps(map) + DoManagedRecAssign() + +op Rec-Assign-Fields-All-Managed +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + { + auto& lhs_i = lhs->RawOptField(lhs_map[i]); + auto rhs_i = rhs->RawField(rhs_map[i]); + zeek::Ref(rhs_i.ManagedVal()); + if ( lhs_i ) + ZVal::DeleteManagedType(*lhs_i); + lhs_i = rhs_i; + } + +op Rec-Add-Int-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + lhs->RawField(lhs_map[i]).int_val += rhs->RawField(rhs_map[i]).int_val; + +op Rec-Add-Double-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + lhs->RawField(lhs_map[i]).double_val += rhs->RawField(rhs_map[i]).double_val; + +op Rec-Add-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + auto& types = aux->types; + for ( size_t i = 0U; i < n; ++i ) + { + auto& lhs_i = lhs->RawField(lhs_map[i]); + auto rhs_i = rhs->RawField(rhs_map[i]); + auto tag = types[i]->Tag(); + if ( tag == TYPE_INT ) + lhs_i.int_val += rhs_i.int_val; + else if ( tag == TYPE_COUNT ) + lhs_i.uint_val += rhs_i.uint_val; + else + lhs_i.double_val += rhs_i.double_val; + } + # Special instruction for concretizing vectors that are fields in a # newly-constructed record. "aux" holds which fields in the record to # inspect. diff --git a/src/script_opt/ZAM/Stmt.cc b/src/script_opt/ZAM/Stmt.cc index 5e71bdc05e..a8372ba332 100644 --- a/src/script_opt/ZAM/Stmt.cc +++ b/src/script_opt/ZAM/Stmt.cc @@ -711,8 +711,8 @@ const ZAMStmt ZAMCompiler::LoopOverTable(const ForStmt* f, const NameExpr* val) int slot = id->IsBlank() ? -1 : FrameSlot(id); aux->loop_vars.push_back(slot); auto& t = id->GetType(); - aux->loop_var_types.push_back(t); - aux->lvt_is_managed.push_back(ZVal::IsManagedType(t)); + aux->types.push_back(t); + aux->is_managed.push_back(ZVal::IsManagedType(t)); } bool no_loop_vars = (num_unused == loop_vars->length()); diff --git a/src/script_opt/ZAM/ZInst.h b/src/script_opt/ZAM/ZInst.h index 8bde9bc571..a0d56e9c73 100644 --- a/src/script_opt/ZAM/ZInst.h +++ b/src/script_opt/ZAM/ZInst.h @@ -484,20 +484,34 @@ public: // store here. bool can_change_non_locals = false; - // The following is used for constructing records, to map elements in - // slots/constants/types to record field offsets. + // The following is used for constructing records or in record chain + // operations, to map elements in slots/constants/types to record field + // offsets. std::vector map; + // The following is used when we need two maps, a LHS one (done with + // the above) and a RHS one. + std::vector rhs_map; + + // ... and the following when we need *three* (for constructing certain + // types of records). We could hack it in by adding onto "map" but + // this is cleaner, and we're not really concerned with the size of + // ZAM auxiliary information as it's not that commonly used, and doesn't + // grow during execution. + std::vector lhs_map; + + // For operations that need to track types corresponding to other vectors. + std::vector types; + + // For operations that mix managed and unmanaged assignments. + std::vector is_managed; + ///// The following four apply to looping over the elements of tables. // Frame slots of iteration variables, such as "[v1, v2, v3] in aggr". // A negative value means "skip assignment". std::vector loop_vars; - // Their types and whether they're managed. - std::vector loop_var_types; - std::vector lvt_is_managed; - // Type associated with the "value" entry, for "k, value in aggr" // iteration. TypePtr value_var_type; diff --git a/testing/btest/Baseline/language.record-chain-assign/output b/testing/btest/Baseline/language.record-chain-assign/output new file mode 100644 index 0000000000..996852b152 --- /dev/null +++ b/testing/btest/Baseline/language.record-chain-assign/output @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +[a=-42.3, b=-12, c=3, d=3, e=-10012, f=1003.0, g=tail] +[a=-84.6, b=-24, c=1006, d=1006, e=-20024, f=-9039.3, g=intervening] diff --git a/testing/btest/Baseline/opt.opt-no-files/.stderr b/testing/btest/Baseline/opt.opt-no-files/.stderr index ba158c1e5e..3fa6d06e85 100644 --- a/testing/btest/Baseline/opt.opt-no-files/.stderr +++ b/testing/btest/Baseline/opt.opt-no-files/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -fatal error: no matching functions/files for C++ compilation +fatal error: no matching functions/files for script optimization diff --git a/testing/btest/Baseline/opt.opt-no-func/.stderr b/testing/btest/Baseline/opt.opt-no-func/.stderr index ba158c1e5e..3fa6d06e85 100644 --- a/testing/btest/Baseline/opt.opt-no-func/.stderr +++ b/testing/btest/Baseline/opt.opt-no-func/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -fatal error: no matching functions/files for C++ compilation +fatal error: no matching functions/files for script optimization diff --git a/testing/btest/Baseline/opt.opt-no-func2/.stderr b/testing/btest/Baseline/opt.opt-no-func2/.stderr index ba158c1e5e..3fa6d06e85 100644 --- a/testing/btest/Baseline/opt.opt-no-func2/.stderr +++ b/testing/btest/Baseline/opt.opt-no-func2/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -fatal error: no matching functions/files for C++ compilation +fatal error: no matching functions/files for script optimization diff --git a/testing/btest/language/record-chain-assign.zeek b/testing/btest/language/record-chain-assign.zeek new file mode 100644 index 0000000000..acccacc7cc --- /dev/null +++ b/testing/btest/language/record-chain-assign.zeek @@ -0,0 +1,47 @@ +# @TEST-DOC: Test for correct ZAM optimization of record "chains". +# +# @TEST-EXEC: zeek -b -O ZAM %INPUT >output +# @TEST-EXEC: btest-diff output + +type R: record { + a: count; + b: int; + c: double; +}; + +type Rev_R: record { + a: double; + b: int; + c: count; + + d: count; + e: int; + f: double; + + g: string; +}; + +global r1 = R($a = 3, $b = -12, $c = -42.3); +global r2 = R($a = 1003, $b = -10012, $c = -10042.3); + +global r3: Rev_R; + +r3$a = r1$c; +r3$b = r1$b; +r3$c = r1$a; +r3$d = r1$a; +r3$e = r2$b; +r3$f = r2$a; +r3$g = "tail"; + +print r3; + +r3$a += r1$c; +r3$b += r1$b; +r3$g = "intervening"; +r3$c += r2$a; +r3$d += r2$a; +r3$e += r2$b; +r3$f += r2$c; + +print r3; From 4b26dfa71571439fffb42b430773b4028cf26c47 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Mon, 24 Jun 2024 11:25:21 +0200 Subject: [PATCH 46/89] zeek-testing-private: Update baseline, after merge --- testing/external/commit-hash.zeek-testing-private | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/external/commit-hash.zeek-testing-private b/testing/external/commit-hash.zeek-testing-private index c5f7f949f0..b41c8fda5c 100644 --- a/testing/external/commit-hash.zeek-testing-private +++ b/testing/external/commit-hash.zeek-testing-private @@ -1 +1 @@ -1d47e303f6192786e8279481ea7be00b317f033a +3df94cb39ab9c0079e82a7f2cd5edb561c2ec07b From f0dad976e64847715104d41757506238926813f1 Mon Sep 17 00:00:00 2001 From: Benjamin Bannier Date: Tue, 25 Jun 2024 11:36:14 +0200 Subject: [PATCH 47/89] Bump auxil/spicy to latest development snapshot --- auxil/spicy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/spicy b/auxil/spicy index 588c79a8c4..b8299609eb 160000 --- a/auxil/spicy +++ b/auxil/spicy @@ -1 +1 @@ -Subproject commit 588c79a8c4d3cb7e7e8265a9e85d59cb4bd5f972 +Subproject commit b8299609eb579722d1ba9b261f00660f5cc84e10 From 5248f608060d234fdf584b05dc49def2a4ca5136 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Tue, 25 Jun 2024 17:16:16 +0200 Subject: [PATCH 48/89] coverage/lcov_html: Allow missing coveralls token This is a fixup for 0cd023b83919fca7bfac55e75a41f724e820fd26 which currently causes ASAN coverage builds to fail for non-master branches when due to a missing COVERALLS_REPO_TOKEN. Instead of bailing out for non-master branches, pass `--dry-run` to the coveralls-lcov invocation to test more of the script. --- testing/coverage/lcov_html.sh | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/testing/coverage/lcov_html.sh b/testing/coverage/lcov_html.sh index 18898048b4..f94c70184f 100755 --- a/testing/coverage/lcov_html.sh +++ b/testing/coverage/lcov_html.sh @@ -63,15 +63,14 @@ while (("$#")); do fi ;; --coveralls) - if [ ${#2} -eq 0 ]; then - echo "ERROR: Coveralls repo token must be passed with --coveralls argument." - echo - usage - fi - HTML_REPORT=0 - COVERALLS_REPO_TOKEN=$2 - shift 2 + if [ ${#2} -eq 0 ]; then + echo "WARN: No coveralls token, running coveralls-lcov --dry-run." + shift 1 + else + COVERALLS_REPO_TOKEN=$2 + shift 2 + fi ;; --help) usage @@ -131,19 +130,19 @@ if [ $HTML_REPORT -eq 1 ]; then echo -n "Creating HTML files... " verify_run "genhtml --ignore-errors empty -o $COVERAGE_HTML_DIR $COVERAGE_FILE" else - if [ "${CIRRUS_BRANCH}" != "master" ]; then - echo "Coverage upload skipped for non-master branches" - exit 0 - fi - # The data we send to coveralls has a lot of duplicate files in it because of the # zeek symlink in the src directory. Run a script that cleans that up. echo -n "Cleaning coverage data for Coveralls..." COVERAGE_FILE_CLEAN="${COVERAGE_FILE}.clean" verify_run "testing/coverage/coverage_cleanup.py ${COVERAGE_FILE} > ${COVERAGE_FILE_CLEAN} 2>&1" - echo -n "Reporting to Coveralls..." - coveralls_cmd="coveralls-lcov -t ${COVERALLS_REPO_TOKEN}" + if [ "${CIRRUS_BRANCH}" == "master" ] && [ -n "${COVERALLS_REPO_TOKEN}" ]; then + echo -n "Reporting to Coveralls..." + coveralls_cmd="coveralls-lcov -t ${COVERALLS_REPO_TOKEN}" + else + echo "Reporting to Coveralls in --dry-run mode" + coveralls_cmd="coveralls-lcov --dry-run" + fi # If we're being called by Cirrus, add some additional information to the output. if [ -n "${CIRRUS_BUILD_ID}" ]; then From fcca8670d3cf98cf2bd71172d0d8993470cc4f40 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Thu, 27 Jun 2024 14:03:12 +0200 Subject: [PATCH 49/89] iosource/Manager: Honor manage_lifetime and dont_count for short-lived IO sources If an IO source is registered and becomes dry at runtime, the IO manager would not honor its manage_lifetime or dont_count attribute during collection, resulting in memory leaks. This probably hasn't mattered so far as there's no IO sources registered in-tree at runtime using manage_lifetime=true. --- src/iosource/Manager.cc | 19 +++++++++++++++++-- src/iosource/Manager.h | 28 ++++++++++++++++++++++------ 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index fcb4a1e961..952bb13704 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -104,6 +104,22 @@ void Manager::Wakeup(std::string_view where) { wakeup->Ping(where); } +void Manager::ReapSource(Source* src) { + auto* iosource = src->src; + assert(! iosource->IsOpen()); + + DBG_LOG(DBG_MAINLOOP, "Reaping %s", src->src->Tag()); + iosource->Done(); + + if ( src->manage_lifetime ) + delete iosource; + + if ( src->dont_count ) + dont_counts--; + + delete src; +} + void Manager::FindReadySources(ReadySources* ready) { ready->clear(); @@ -111,8 +127,7 @@ void Manager::FindReadySources(ReadySources* ready) { // remove at most one each time. for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) if ( ! (*i)->src->IsOpen() ) { - (*i)->src->Done(); - delete *i; + ReapSource(*i); sources.erase(i); break; } diff --git a/src/iosource/Manager.h b/src/iosource/Manager.h index c533afb982..48f8814b2b 100644 --- a/src/iosource/Manager.h +++ b/src/iosource/Manager.h @@ -143,6 +143,15 @@ public: void Wakeup(std::string_view where); private: + /** + * Internal data structure for managing registered IOSources. + */ + struct Source { + IOSource* src = nullptr; + bool dont_count = false; + bool manage_lifetime = false; + }; + /** * Calls the appropriate poll method to gather a set of IOSources that are * ready for processing. @@ -170,6 +179,19 @@ private: void RemoveAll(); + /** + * Reap a closed IO source. + * + * Reaping involves calling IOSource::Done() on the underlying IOSource, + * freeing it if Source.manage_lifetime is \c true, updating \c dont_counts + * and freeing \a src, making it invalid. + * + * The caller ensures \a src is removed from Manager.sources. + * + * @param src The source to reap. + */ + void ReapSource(Source* src); + class WakeupHandler final : public IOSource { public: WakeupHandler(); @@ -192,12 +214,6 @@ private: zeek::detail::Flare flare; }; - struct Source { - IOSource* src = nullptr; - bool dont_count = false; - bool manage_lifetime = false; - }; - using SourceList = std::vector; SourceList sources; From 0451a4038c84980afddd243d1343b27dadb9011f Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Tue, 18 Jun 2024 12:38:53 +0200 Subject: [PATCH 50/89] iosource/Manager: Do not manage lifetime of pkt_src Now that dry sources are properly reaped and freed, an offline packet source would be deleted once dry, resulting in GetPktSrc() returning a wild pointer. Don't manage the packet source lifetime and instead free it during Manager destruction. --- src/iosource/Manager.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index 952bb13704..a7821e1a5f 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -76,6 +76,9 @@ Manager::~Manager() { pkt_dumpers.clear(); + // Was registered without lifetime management. + delete pkt_src; + #ifndef _MSC_VER // There's a bug here with builds on Windows that causes an assertion with debug builds // related to libkqueue returning a zero for the file descriptor. The assert happens @@ -357,7 +360,7 @@ void Manager::Register(IOSource* src, bool dont_count, bool manage_lifetime) { void Manager::Register(PktSrc* src) { pkt_src = src; - Register(src, false); + Register(src, false, false); // Once we know if the source is live or not, adapt the // poll_interval accordingly. From b3118d2a4847914bb4c7caf84b674dca97b6d853 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Tue, 18 Jun 2024 09:41:08 +0200 Subject: [PATCH 51/89] threading/MsgThread: Decouple IO source and thread lifetimes MsgThread acting as an IO source can result in the situation where the threading manager's heartbeat timer deletes a finished MsgThread instance, but at the same time this thread is in the list of ready IO sources the main loop is currently processing. Fix this by decoupling the lifetime of the IO source part and properly registering as lifetime managed IO sources with the IO manager. Fixes #3682 --- src/threading/MsgThread.cc | 73 ++++++++++++++++++++++++++++++++------ src/threading/MsgThread.h | 11 +++--- 2 files changed, 68 insertions(+), 16 deletions(-) diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index a1d2e5e3da..022a8ce2b4 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -165,6 +165,50 @@ bool ReporterMessage::Process() { return true; } +// This is the IO source used by MsgThread. +// +// The lifetime of the IO source is decoupled from +// the thread. The thread may be terminated prior +// to the IO source being properly unregistered and +// removed by the IO manager. +class IOSource : public iosource::IOSource { +public: + explicit IOSource(MsgThread* thread) : thread(thread) { + if ( ! iosource_mgr->RegisterFd(flare.FD(), this) ) + reporter->InternalError("Failed to register MsgThread FD with iosource_mgr"); + + SetClosed(false); + } + + ~IOSource() override { + if ( ! iosource_mgr->UnregisterFd(flare.FD(), this) ) + reporter->InternalError("Failed to unregister MsgThread FD from iosource_mgr"); + } + + void Process() override { + flare.Extinguish(); + + if ( thread ) + thread->Process(); + } + + const char* Tag() override { return thread ? thread->Name() : ""; } + + double GetNextTimeout() override { return -1; } + + + void Fire() { flare.Fire(); }; + + void Close() { + thread = nullptr; + SetClosed(true); + } + +private: + MsgThread* thread = nullptr; + zeek::detail::Flare flare; +}; + } // namespace detail ////// Methods. @@ -181,16 +225,20 @@ MsgThread::MsgThread() : BasicThread(), queue_in(this, nullptr), queue_out(nullp failed = false; thread_mgr->AddMsgThread(this); - if ( ! iosource_mgr->RegisterFd(flare.FD(), this) ) - reporter->FatalError("Failed to register MsgThread fd with iosource_mgr"); + io_source = new detail::IOSource(this); - SetClosed(false); + // Register IOSource as non-counting lifetime managed IO source. + iosource_mgr->Register(io_source, true); } MsgThread::~MsgThread() { - // Unregister this thread from the iosource manager so it doesn't wake - // up the main poll anymore. - iosource_mgr->UnregisterFd(flare.FD(), this); + // Unregister this thread from the IO source so we don't + // get Process() callbacks anymore. The IO source itself + // is life-time managed by the IO manager. + if ( io_source ) { + io_source->Close(); + io_source = nullptr; + } } void MsgThread::OnSignalStop() { @@ -253,7 +301,13 @@ void MsgThread::OnWaitForStop() { } void MsgThread::OnKill() { - SetClosed(true); + // Ensure the IO source is closed and won't call Process() on this + // thread anymore. The thread got killed, so the threading manager will + // remove it forcefully soon. + if ( io_source ) { + io_source->Close(); + io_source = nullptr; + } // Send a message to unblock the reader if its currently waiting for // input. This is just an optimization to make it terminate more @@ -345,7 +399,8 @@ void MsgThread::SendOut(BasicOutputMessage* msg, bool force) { ++cnt_sent_out; - flare.Fire(); + if ( io_source ) + io_source->Fire(); } void MsgThread::SendEvent(const char* name, const int num_vals, Value** vals) { @@ -418,8 +473,6 @@ void MsgThread::GetStats(Stats* stats) { } void MsgThread::Process() { - flare.Extinguish(); - while ( HasOut() ) { Message* msg = RetrieveOut(); assert(msg); diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 55b8f0ba1d..259e64b11f 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -26,6 +26,7 @@ class HeartbeatMessage; class FinishMessage; class FinishedMessage; class KillMeMessage; +class IOSource; } // namespace detail @@ -40,7 +41,7 @@ class KillMeMessage; * that happens, the thread stops accepting any new messages, finishes * processes all remaining ones still in the queue, and then exits. */ -class MsgThread : public BasicThread, public iosource::IOSource { +class MsgThread : public BasicThread { public: /** * Constructor. It automatically registers the thread with the @@ -209,11 +210,9 @@ public: void GetStats(Stats* stats); /** - * Overridden from iosource::IOSource. + * Process() forwarded to from detail::IOSource. */ - void Process() override; - const char* Tag() override { return Name(); } - double GetNextTimeout() override { return -1; } + void Process(); protected: friend class Manager; @@ -362,7 +361,7 @@ private: bool child_sent_finish; // Child thread asked to be finished. bool failed; // Set to true when a command failed. - zeek::detail::Flare flare; + detail::IOSource* io_source = nullptr; // IO source registered with the IO manager. }; /** From 739a8ac5090b2b2299c2d4c89ac329a1907f89e4 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Thu, 27 Jun 2024 14:14:39 +0200 Subject: [PATCH 52/89] iosource/Manager: Reap dry sources while computing timeout Avoids looping over the sources vector twice and should result in the same behavior. --- src/iosource/Manager.cc | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index a7821e1a5f..bbef07a922 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -126,20 +126,6 @@ void Manager::ReapSource(Source* src) { void Manager::FindReadySources(ReadySources* ready) { ready->clear(); - // Remove sources which have gone dry. For simplicity, we only - // remove at most one each time. - for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) - if ( ! (*i)->src->IsOpen() ) { - ReapSource(*i); - sources.erase(i); - break; - } - - // If there aren't any sources and exit_only_after_terminate is false, just - // return an empty set of sources. We want the main loop to end. - if ( Size() == 0 && (! BifConst::exit_only_after_terminate || run_state::terminating) ) - return; - double timeout = -1; IOSource* timeout_src = nullptr; bool time_to_poll = false; @@ -151,7 +137,8 @@ void Manager::FindReadySources(ReadySources* ready) { } // Find the source with the next timeout value. - for ( auto src : sources ) { + for ( auto i = sources.begin(); i != sources.end(); /* noop */ ) { + auto* src = *i; auto iosource = src->src; if ( iosource->IsOpen() ) { double next = iosource->GetNextTimeout(); @@ -179,7 +166,19 @@ void Manager::FindReadySources(ReadySources* ready) { ready->push_back({pkt_src, -1, 0}); } } + ++i; } + else { + ReapSource(src); + i = sources.erase(i); + } + } + + // If there aren't any sources and exit_only_after_terminate is false, just + // return an empty set of sources. We want the main loop to end. + if ( Size() == 0 && (! BifConst::exit_only_after_terminate || run_state::terminating) ) { + ready->clear(); + return; } DBG_LOG(DBG_MAINLOOP, "timeout: %f ready size: %zu time_to_poll: %d\n", timeout, ready->size(), time_to_poll); From f050d96503b9d551543b0503e32c51de5c500aa8 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Tue, 2 Jul 2024 12:12:15 +0200 Subject: [PATCH 53/89] threading/Manager: Warn if threads are added after termination The core.file-analyzer-violation test showed that it's possible to create new threads (log writers) when Zeek is in the process of terminating. This can result in the IO manager's deconstructor deleting IO sources for threads that are still running. This is sort of a scripting issue, so for now log a reporter warning when it happens to have a bit of a bread-crumb what might be going on. In the future it might make sense to plug APIs with zeek_is_terminating(). --- src/threading/Manager.cc | 9 +++++++++ src/threading/Manager.h | 1 + .../Baseline/core.file-analyzer-violation/.stderr | 2 ++ .../core.file-analyzer-violation/files.log | 1 + testing/btest/core/file-analyzer-violation.zeek | 14 ++++++++++++++ 5 files changed, 27 insertions(+) create mode 100644 testing/btest/Baseline/core.file-analyzer-violation/.stderr diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 9bcdc925f4..5620a0bf80 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -28,6 +28,7 @@ Manager::Manager() { did_process = true; next_beat = 0; terminating = false; + terminated = false; } Manager::~Manager() { @@ -61,10 +62,18 @@ void Manager::Terminate() { all_threads.clear(); msg_threads.clear(); terminating = false; + terminated = true; } void Manager::AddThread(BasicThread* thread) { DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name()); + + // This can happen when log writers or other threads are + // created during the shutdown phase and results in unclean + // shutdowns. + if ( terminated ) + reporter->Warning("Thread %s added after threading manager terminated", thread->Name()); + all_threads.push_back(thread); if ( ! heartbeat_timer_running ) diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 875e35290a..b075e6a70d 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -146,6 +146,7 @@ private: bool did_process; // True if the last Process() found some work to do. double next_beat; // Timestamp when the next heartbeat will be sent. bool terminating; // True if we are in Terminate(). + bool terminated; // True if Terminate() finished. msg_stats_list stats; diff --git a/testing/btest/Baseline/core.file-analyzer-violation/.stderr b/testing/btest/Baseline/core.file-analyzer-violation/.stderr new file mode 100644 index 0000000000..e3f6131b1d --- /dev/null +++ b/testing/btest/Baseline/core.file-analyzer-violation/.stderr @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +received termination signal diff --git a/testing/btest/Baseline/core.file-analyzer-violation/files.log b/testing/btest/Baseline/core.file-analyzer-violation/files.log index abef87d6a6..3b26732673 100644 --- a/testing/btest/Baseline/core.file-analyzer-violation/files.log +++ b/testing/btest/Baseline/core.file-analyzer-violation/files.log @@ -8,3 +8,4 @@ #fields ts fuid uid id.orig_h id.orig_p id.resp_h id.resp_p source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid #types time string string addr port addr port string count set[string] string string interval bool bool count count count count bool string XXXXXXXXXX.XXXXXX FKPuH630Tmj6UQUMP7 - - - - - ./myfile.exe 0 PE application/x-dosexec - 0.000000 - - 64 - 0 0 F - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/core/file-analyzer-violation.zeek b/testing/btest/core/file-analyzer-violation.zeek index 6d73d2bfb6..70757bc8d2 100644 --- a/testing/btest/core/file-analyzer-violation.zeek +++ b/testing/btest/core/file-analyzer-violation.zeek @@ -1,20 +1,34 @@ # @TEST-DOC: Verify analyzer_violation_info is raised for an invalid PE file. # @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff .stderr # @TEST-EXEC: btest-diff .stdout # @TEST-EXEC: btest-diff files.log @load base/frameworks/files @load base/files/pe +redef exit_only_after_terminate = T; + event analyzer_violation_info(tag: AllAnalyzers::Tag, info: AnalyzerViolationInfo) { print tag, info$reason, info$f$id, cat(info$f$info$analyzers); + terminate(); + } + +event force_terminate() + { + if ( zeek_is_terminating() ) + return; + + Reporter::error("force_terminate called - timeout?"); + terminate(); } event zeek_init() { local source: string = "./myfile.exe"; Input::add_analysis([$source=source, $name=source]); + schedule 10sec { force_terminate() }; } # This file triggers a binpac exception for PE that is reported through From c2dd3dfad070cfe658b9a01eff1d049e624a1fd4 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Tue, 2 Jul 2024 19:42:10 +0200 Subject: [PATCH 54/89] Bump cmake submodule [nomail] --- CHANGES | 4 ++++ VERSION | 2 +- cmake | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 747464bfbe..b70d52dbe8 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,7 @@ +7.0.0-dev.414 | 2024-07-02 19:42:10 +0200 + + * Bump cmake submodule [nomail] (Arne Welzel, Corelight) + 7.0.0-dev.413 | 2024-07-02 14:41:27 +0200 * threading/Manager: Warn if threads are added after termination (Arne Welzel, Corelight) diff --git a/VERSION b/VERSION index d44b431827..e432e78074 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -7.0.0-dev.413 +7.0.0-dev.414 diff --git a/cmake b/cmake index 2df3b8e82a..d996924f5c 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 2df3b8e82a843b7b8187963d259d32a9fb42b873 +Subproject commit d996924f5c94231290ec6991397d04df2adef6c3 From a29f862f9539c5ed47eb0b6e7fd9787eae44217c Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Fri, 21 Jun 2024 17:45:59 -0700 Subject: [PATCH 55/89] Document the field_escape_pattern in the to_json() BiF This argument, and its corresponding use in Val.cc's BuildJSON(), were never explained. --- src/Val.cc | 23 +++++++++++++++++++++-- src/Val.h | 14 ++++++++++++++ src/zeek.bif | 6 ++++++ 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/src/Val.cc b/src/Val.cc index f97601236a..2df395eb03 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -329,8 +329,27 @@ TableValPtr Val::GetRecordFields() { return rt->GetRecordFieldsVal(rv); } -// This is a static method in this file to avoid including rapidjson's headers in Val.h because -// they're huge. +// A predicate to identify those types we render as a string in JSON. +static bool IsQuotedJSONType(const TypePtr& t) { + if ( t == nullptr ) + return false; + + switch ( t->Tag() ) { + case TYPE_ADDR: + case TYPE_ENUM: + case TYPE_FILE: + case TYPE_FUNC: + case TYPE_INTERVAL: + case TYPE_PATTERN: + case TYPE_STRING: + case TYPE_SUBNET: + case TYPE_OPAQUE: return true; + default: return false; + } +} + +// This is a static method in this file to avoid including rapidjson's headers +// in Val.h, because they're huge. static void BuildJSON(json::detail::NullDoubleWriter& writer, Val* val, bool only_loggable = false, RE_Matcher* re = nullptr, const string& key = "") { if ( ! key.empty() ) diff --git a/src/Val.h b/src/Val.h index c8392bbcfd..a74f68f8d6 100644 --- a/src/Val.h +++ b/src/Val.h @@ -236,6 +236,20 @@ public: TableValPtr GetRecordFields(); + /** + * Renders the Val into JSON string representation. For record values + * contained anywhere in the Val, two arguments control the JSON result + * (they have no effect on other types): + * + * @param only_loggable If true, skips any fields that don't have the &log + * attribute. + * + * @param re The regular expression matcher, if given, is used to strip the + * first match on any record field name in the resulting output. See the + * to_json() BiF for context. + * + * @return JSON data representing the Val. + */ StringValPtr ToJSON(bool only_loggable = false, RE_Matcher* re = nullptr); template diff --git a/src/zeek.bif b/src/zeek.bif index 2d3d73fcda..eccc7f0292 100644 --- a/src/zeek.bif +++ b/src/zeek.bif @@ -5061,6 +5061,12 @@ function anonymize_addr%(a: addr, cl: IPAddrAnonymizationClass%): addr ## only_loggable: If the v value is a record this will only cause ## fields with the &log attribute to be included in the JSON. ## +## field_escape_pattern: If the v value is a record, the given pattern is +## matched against the field names of its type, and +## the first match, if any, is stripped from the +## rendered name. The default pattern strips a leading +## underscore. +## ## returns: a JSON formatted string. ## ## .. zeek:see:: fmt cat cat_sep string_cat print_raw from_json From df645e9bb2bd6019d61dd00ebf01d9666b14a493 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 24 Jun 2024 21:59:59 -0700 Subject: [PATCH 56/89] Support map-based definition of ports in from_json() The from_json() BiF and its underlying code in Val.cc currently expect ports expressed as a string ('80/tcp' etc). Zeek's own serialization via ToJSON() renders them as an object ('{"port":80, "proto":"tcp"}'). This adds support for the latter format to from_json(), so serialized values can be read back. --- src/Val.cc | 62 +++++++++++++------ .../btest/Baseline/bifs.from_json-6/.stderr | 3 +- .../btest/Baseline/bifs.from_json-6/.stdout | 2 + testing/btest/bifs/from_json.zeek | 6 +- 4 files changed, 51 insertions(+), 22 deletions(-) diff --git a/src/Val.cc b/src/Val.cc index 2df395eb03..3cb497ebe6 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -943,28 +943,50 @@ static std::variant BuildVal(const rapidjson::Value& j, con } case TYPE_PORT: { - if ( ! j.IsString() ) - return mismatch_err(); - - int port = 0; - if ( j.GetStringLength() > 0 && j.GetStringLength() < 10 ) { - char* slash; - errno = 0; - port = strtol(j.GetString(), &slash, 10); - if ( ! errno ) { - ++slash; - if ( util::streq(slash, "tcp") ) - return val_mgr->Port(port, TRANSPORT_TCP); - else if ( util::streq(slash, "udp") ) - return val_mgr->Port(port, TRANSPORT_UDP); - else if ( util::streq(slash, "icmp") ) - return val_mgr->Port(port, TRANSPORT_ICMP); - else if ( util::streq(slash, "unknown") ) - return val_mgr->Port(port, TRANSPORT_UNKNOWN); + if ( j.IsString() ) { + int port = 0; + if ( j.GetStringLength() > 0 && j.GetStringLength() < 10 ) { + char* slash; + errno = 0; + port = strtol(j.GetString(), &slash, 10); + if ( ! errno ) { + ++slash; + if ( util::streq(slash, "tcp") ) + return val_mgr->Port(port, TRANSPORT_TCP); + else if ( util::streq(slash, "udp") ) + return val_mgr->Port(port, TRANSPORT_UDP); + else if ( util::streq(slash, "icmp") ) + return val_mgr->Port(port, TRANSPORT_ICMP); + else if ( util::streq(slash, "unknown") ) + return val_mgr->Port(port, TRANSPORT_UNKNOWN); + } } - } - return "wrong port format, must be /[0-9]{1,5}\\/(tcp|udp|icmp|unknown)/"; + return "wrong port format, string must be /[0-9]{1,5}\\/(tcp|udp|icmp|unknown)/"; + } + else if ( j.IsObject() ) { + if ( ! j.HasMember("port") || ! j.HasMember("proto") ) + return "wrong port format, object must have 'port' and 'proto' members"; + if ( ! j["port"].IsNumber() ) + return "wrong port format, port must be a number"; + if ( ! j["proto"].IsString() ) + return "wrong port format, protocol must be a string"; + + std::string proto{j["proto"].GetString()}; + + if ( proto == "tcp" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_TCP); + if ( proto == "udp" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_UDP); + if ( proto == "icmp" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_ICMP); + if ( proto == "unknown" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_UNKNOWN); + + return "wrong port format, invalid protocol string"; + } + else + return "wrong port format, must be string or object"; } case TYPE_PATTERN: { diff --git a/testing/btest/Baseline/bifs.from_json-6/.stderr b/testing/btest/Baseline/bifs.from_json-6/.stderr index bafb1a49e9..0b278db5ae 100644 --- a/testing/btest/Baseline/bifs.from_json-6/.stderr +++ b/testing/btest/Baseline/bifs.from_json-6/.stderr @@ -1,2 +1,3 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 5: wrong port format, must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", to_any_coerce port_t, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 8: wrong port format, string must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", to_any_coerce port_t, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 9: wrong port format, object must have 'port' and 'proto' members (from_json({}, to_any_coerce port_t, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline/bifs.from_json-6/.stdout b/testing/btest/Baseline/bifs.from_json-6/.stdout index aee95c8a8e..a4da3aa3e4 100644 --- a/testing/btest/Baseline/bifs.from_json-6/.stdout +++ b/testing/btest/Baseline/bifs.from_json-6/.stdout @@ -1,2 +1,4 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +[v=80/tcp, valid=T] +[v=, valid=F] [v=, valid=F] diff --git a/testing/btest/bifs/from_json.zeek b/testing/btest/bifs/from_json.zeek index 9351c3b9ca..1348933123 100644 --- a/testing/btest/bifs/from_json.zeek +++ b/testing/btest/bifs/from_json.zeek @@ -73,10 +73,14 @@ event zeek_init() @TEST-START-NEXT type port_t: port; -# wrong port format +# additional & incorrect port formats event zeek_init() { + # Ports can also be given as objects: + print from_json("{\"port\":80,\"proto\":\"tcp\"}", port_t); + # These are violations: print from_json("\"80\"", port_t); + print from_json("{}", port_t); } @TEST-START-NEXT From 92c1098e97431147ac0f7e08a818a6cb4b3d5308 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Fri, 28 Jun 2024 15:24:52 -0700 Subject: [PATCH 57/89] Support table deserialization in from_json() This allows additional data roundtripping through JSON since to_json() already supports tables. There are some subtleties around the formatting of strings in JSON object keys, for which this adds a bit of helper infrastructure. This also expands the language.table test to verify the roundtrips, and adapts bif.from_json to include a table in the test record. --- src/Val.cc | 83 +++++++++---- .../btest/Baseline/bifs.from_json-5/.stderr | 2 +- testing/btest/Baseline/bifs.from_json/.stdout | 2 + testing/btest/Baseline/language.table/out | 8 ++ testing/btest/bifs/from_json.zeek | 3 +- testing/btest/language/table.zeek | 114 ++++++++++++------ 6 files changed, 148 insertions(+), 64 deletions(-) diff --git a/src/Val.cc b/src/Val.cc index 3cb497ebe6..cbc883347c 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -330,7 +330,7 @@ TableValPtr Val::GetRecordFields() { } // A predicate to identify those types we render as a string in JSON. -static bool IsQuotedJSONType(const TypePtr& t) { +static bool UsesJSONStringType(const TypePtr& t) { if ( t == nullptr ) return false; @@ -440,8 +440,10 @@ static void BuildJSON(json::detail::NullDoubleWriter& writer, Val* val, bool onl BuildJSON(key_writer, entry_key, only_loggable, re); string key_str = buffer.GetString(); - if ( key_str.length() >= 2 && key_str[0] == '"' && key_str[key_str.length() - 1] == '"' ) - // Strip quotes. + // Strip the quotes for any type we render as a string. This + // makes the JSON object's keys look more natural, yielding + // '{ "foo": ... }', not '{ "\"foo\"": ... }', for such types. + if ( UsesJSONStringType(entry_key->GetType()) ) key_str = key_str.substr(1, key_str.length() - 2); BuildJSON(writer, entry->GetVal().get(), only_loggable, re, key_str); @@ -1064,34 +1066,69 @@ static std::variant BuildVal(const rapidjson::Value& j, con } case TYPE_TABLE: { - if ( ! j.IsArray() ) - return mismatch_err(); - - if ( ! t->IsSet() ) - return util::fmt("tables are not supported"); - - auto tt = t->AsSetType(); - auto tl = tt->GetIndices(); + auto tt = t->AsTableType(); // The table vs set type does not matter below auto tv = make_intrusive(IntrusivePtr{NewRef{}, tt}); + auto tl = tt->GetIndices(); - for ( const auto& item : j.GetArray() ) { - std::variant v; + if ( t->IsSet() ) { + if ( ! j.IsArray() ) + return mismatch_err(); - if ( tl->GetTypes().size() == 1 ) - v = BuildVal(item, tl->GetPureType(), key_func); - else - v = BuildVal(item, tl, key_func); + for ( const auto& item : j.GetArray() ) { + std::variant v; - if ( ! get_if(&v) ) - return v; + if ( tl->GetTypes().size() == 1 ) + v = BuildVal(item, tl->GetPureType(), key_func); + else + v = BuildVal(item, tl, key_func); - if ( ! std::get(v) ) - continue; + if ( ! get_if(&v) ) + return v; + if ( ! std::get(v) ) + continue; - tv->Assign(std::move(std::get(v)), nullptr); + tv->Assign(std::move(std::get(v)), nullptr); + } + + return tv; } + else { + if ( ! j.IsObject() ) + return mismatch_err(); - return tv; + for ( auto it = j.MemberBegin(); it != j.MemberEnd(); ++it ) { + rapidjson::Document idxstr; + idxstr.Parse(it->name.GetString(), it->name.GetStringLength()); + + std::variant idx; + + if ( tl->GetTypes().size() > 1 ) + idx = BuildVal(idxstr, tl, key_func); + else if ( UsesJSONStringType(tl->GetPureType()) ) + // Parse this with the quotes the string came with. This + // mirrors the quote-stripping in BuildJSON(). + idx = BuildVal(it->name, tl->GetPureType(), key_func); + else + // Parse the string's content, not the full JSON string. + idx = BuildVal(idxstr, tl->GetPureType(), key_func); + + if ( ! get_if(&idx) ) + return idx; + if ( ! std::get(idx) ) + continue; + + auto v = BuildVal(it->value, tt->Yield(), key_func); + + if ( ! get_if(&v) ) + return v; + if ( ! std::get(v) ) + continue; + + tv->Assign(std::move(std::get(idx)), std::move(std::get(v))); + } + + return tv; + } } case TYPE_RECORD: { diff --git a/testing/btest/Baseline/bifs.from_json-5/.stderr b/testing/btest/Baseline/bifs.from_json-5/.stderr index 93cbb432cf..6bdd60e118 100644 --- a/testing/btest/Baseline/bifs.from_json-5/.stderr +++ b/testing/btest/Baseline/bifs.from_json-5/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 4: tables are not supported (from_json([], to_any_coerce table_string_of_string, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 4: cannot convert JSON type 'array' to Zeek type 'table' (from_json([], to_any_coerce table_string_of_string, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline/bifs.from_json/.stdout b/testing/btest/Baseline/bifs.from_json/.stdout index 24f35f7b9b..584cb2ca60 100644 --- a/testing/btest/Baseline/bifs.from_json/.stdout +++ b/testing/btest/Baseline/bifs.from_json/.stdout @@ -5,4 +5,6 @@ aa:bb::/32, }, se={ [192.168.0.1, 80/tcp] , [2001:db8::1, 8080/udp] +}, tbl={ +[192.168.0.1, 80/tcp] = foo }], valid=T] diff --git a/testing/btest/Baseline/language.table/out b/testing/btest/Baseline/language.table/out index 358a2f70a6..64105b72ca 100644 --- a/testing/btest/Baseline/language.table/out +++ b/testing/btest/Baseline/language.table/out @@ -53,21 +53,29 @@ table index non-membership (PASS) table index lookup (PASS) table index reduced size (PASS) table index iteration (PASS) +table index JSON roundtrip success (PASS) +table index JSON roundtrip correct (PASS) vector index size (PASS) vector index membership (PASS) vector index non-membership (PASS) vector index lookup (PASS) vector index reduced size (PASS) vector index iteration (PASS) +vector index JSON roundtrip success (PASS) +vector index JSON roundtrip (PASS) set index size (PASS) set index membership (PASS) set index non-membership (PASS) set index lookup (PASS) set index reduced size (PASS) set index iteration (PASS) +set index JSON roundtrip success (PASS) +set index JSON roundtrip (PASS) pattern index size (PASS) pattern index membership (PASS) pattern index non-membership (PASS) pattern index lookup (PASS) pattern index reduced size (PASS) pattern index iteration (PASS) +pattern index JSON roundtrip success (PASS) +pattern index JSON roundtrip (PASS) diff --git a/testing/btest/bifs/from_json.zeek b/testing/btest/bifs/from_json.zeek index 1348933123..31d3438e7c 100644 --- a/testing/btest/bifs/from_json.zeek +++ b/testing/btest/bifs/from_json.zeek @@ -29,11 +29,12 @@ type Foo: record { re: pattern; su: subnet_set; se: set[addr, port]; + tbl: table[addr, port] of string; }; event zeek_init() { - local json = "{\"hello\":\"world\",\"t\":true,\"f\":false,\"se\":[[\"192.168.0.1\", \"80/tcp\"], [\"2001:db8::1\", \"8080/udp\"]],\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[\"1\",\"2\",\"3\",\"4\"],\"su\":[\"[aa:bb::0]/32\",\"192.168.0.0/16\"],\"c1\":\"A::Blue\",\"p\":\"1500/tcp\",\"it\":5000,\"ad\":\"127.0.0.1\",\"s\":\"[::1/128]\",\"re\":\"/a/\",\"ti\":1681652265.042767}"; + local json = "{\"hello\":\"world\",\"t\":true,\"f\":false,\"se\":[[\"192.168.0.1\", \"80/tcp\"], [\"2001:db8::1\", \"8080/udp\"]],\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[\"1\",\"2\",\"3\",\"4\"],\"su\":[\"[aa:bb::0]/32\",\"192.168.0.0/16\"],\"c1\":\"A::Blue\",\"p\":\"1500/tcp\",\"it\":5000,\"ad\":\"127.0.0.1\",\"s\":\"[::1/128]\",\"re\":\"/a/\",\"ti\":1681652265.042767,\"tbl\":{\"[\\\"192.168.0.1\\\",\\\"80/tcp\\\"]\":\"foo\"}}"; print from_json(json, Foo); } diff --git a/testing/btest/language/table.zeek b/testing/btest/language/table.zeek index db95f43d41..c4f73a3328 100644 --- a/testing/btest/language/table.zeek +++ b/testing/btest/language/table.zeek @@ -162,94 +162,130 @@ function basic_functionality() test_case( "!in operator", [cid, T] !in t11 ); } -function complex_index_types() +type tss_table: table[table[string] of string] of string; + +function complex_index_type_table() { # Initialization - local t1: table[table[string] of string] of string = { + local t: tss_table = { [table(["k1"] = "v1")] = "res1" }; # Adding a member - t1[table(["k2"] = "v2")] = "res2"; + t[table(["k2"] = "v2")] = "res2"; # Various checks, including membership test and lookup - test_case( "table index size", |t1| == 2 ); - test_case( "table index membership", table(["k2"] = "v2") in t1 ); - test_case( "table index non-membership", table(["k2"] = "v3") !in t1 ); - test_case( "table index lookup", t1[table(["k2"] = "v2")] == "res2" ); + test_case( "table index size", |t| == 2 ); + test_case( "table index membership", table(["k2"] = "v2") in t ); + test_case( "table index non-membership", table(["k2"] = "v3") !in t ); + test_case( "table index lookup", t[table(["k2"] = "v2")] == "res2" ); # Member deletion - delete t1[table(["k1"] = "v1")]; - test_case( "table index reduced size", |t1| == 1 ); + delete t[table(["k1"] = "v1")]; + test_case( "table index reduced size", |t| == 1 ); # Iteration - for ( ti in t1 ) + for ( ti in t ) { test_case( "table index iteration", to_json(ti) == to_json(table(["k2"] = "v2")) ); break; } - # As above, for other index types - local t2: table[vector of string] of string = { + # JSON serialize/unserialize + local fjr = from_json(to_json(t), tss_table); + test_case( "table index JSON roundtrip success", fjr$valid ); + test_case( "table index JSON roundtrip correct", to_json(t) == to_json(fjr$v) ); +} + +type vs_table: table[vector of string] of string; + +function complex_index_type_vector() +{ + local t: vs_table = { [vector("v1", "v2")] = "res1" }; - t2[vector("v3", "v4")] = "res2"; - test_case( "vector index size", |t2| == 2 ); - test_case( "vector index membership", vector("v3", "v4") in t2 ); - test_case( "vector index non-membership", vector("v4", "v5") !in t2 ); - test_case( "vector index lookup", t2[vector("v3", "v4")] == "res2" ); + t[vector("v3", "v4")] = "res2"; + test_case( "vector index size", |t| == 2 ); + test_case( "vector index membership", vector("v3", "v4") in t ); + test_case( "vector index non-membership", vector("v4", "v5") !in t ); + test_case( "vector index lookup", t[vector("v3", "v4")] == "res2" ); - delete t2[vector("v1", "v2")]; - test_case( "vector index reduced size", |t2| == 1 ); + delete t[vector("v1", "v2")]; + test_case( "vector index reduced size", |t| == 1 ); - for ( vi in t2 ) + for ( vi in t ) { test_case( "vector index iteration", to_json(vi) == to_json(vector("v3", "v4")) ); break; } - local t3: table[set[string]] of string = { + local fjr = from_json(to_json(t), vs_table); + test_case( "vector index JSON roundtrip success", fjr$valid ); + test_case( "vector index JSON roundtrip", to_json(t) == to_json(fjr$v) ); +} + +type ss_table: table[set[string]] of string; + +function complex_index_type_set() +{ + local t: ss_table = { [set("s1", "s2")] = "res1" }; - t3[set("s3", "s4")] = "res2"; - test_case( "set index size", |t3| == 2 ); - test_case( "set index membership", set("s3", "s4") in t3 ); - test_case( "set index non-membership", set("s4", "s5") !in t3 ); - test_case( "set index lookup", t3[set("s3", "s4")] == "res2" ); + t[set("s3", "s4")] = "res2"; + test_case( "set index size", |t| == 2 ); + test_case( "set index membership", set("s3", "s4") in t ); + test_case( "set index non-membership", set("s4", "s5") !in t ); + test_case( "set index lookup", t[set("s3", "s4")] == "res2" ); - delete t3[set("s1", "s2")]; - test_case( "set index reduced size", |t3| == 1 ); + delete t[set("s1", "s2")]; + test_case( "set index reduced size", |t| == 1 ); - for ( si in t3 ) + for ( si in t ) { test_case( "set index iteration", to_json(si) == to_json(set("s3", "s4")) ); break; } - local t4: table[pattern] of string = { + local fjr = from_json(to_json(t), ss_table); + test_case( "set index JSON roundtrip success", fjr$valid ); + test_case( "set index JSON roundtrip", to_json(t) == to_json(fjr$v) ); +} + +type tp_table: table[pattern] of string; + +function complex_index_type_pattern() +{ + local t: tp_table = { [/pat1/] = "res1" }; - t4[/pat2/] = "res2"; - test_case( "pattern index size", |t4| == 2 ); - test_case( "pattern index membership", /pat2/ in t4 ); - test_case( "pattern index non-membership", /pat3/ !in t4 ); - test_case( "pattern index lookup", t4[/pat2/] == "res2" ); + t[/pat2/] = "res2"; + test_case( "pattern index size", |t| == 2 ); + test_case( "pattern index membership", /pat2/ in t ); + test_case( "pattern index non-membership", /pat3/ !in t ); + test_case( "pattern index lookup", t[/pat2/] == "res2" ); - delete t4[/pat1/]; - test_case( "pattern index reduced size", |t4| == 1 ); + delete t[/pat1/]; + test_case( "pattern index reduced size", |t| == 1 ); - for ( pi in t4 ) + for ( pi in t ) { test_case( "pattern index iteration", to_json(pi) == to_json(/pat2/) ); break; } + + local fjr = from_json(to_json(t), tp_table); + test_case( "pattern index JSON roundtrip success", fjr$valid ); + test_case( "pattern index JSON roundtrip", to_json(t) == to_json(fjr$v) ); } event zeek_init() { basic_functionality(); - complex_index_types(); + complex_index_type_table(); + complex_index_type_vector(); + complex_index_type_set(); + complex_index_type_pattern(); } From 0179a5e75caa9cea42e0607c93afefffbadd7ef6 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Fri, 28 Jun 2024 17:27:47 -0700 Subject: [PATCH 58/89] Support JSON roundtripping via to_json()/from_json() for patterns This needed a small tweak in the deserialization, since each roundtrip would otherwise pad the prior pattern with an extra /^?(...)$?/. This expands the language.set test to also verify serializing/unserializing for sets, similarly to tables in the previous commit. --- src/Val.cc | 8 +- testing/btest/Baseline/language.set/out | 8 ++ testing/btest/language/set.zeek | 105 ++++++++++++++++-------- 3 files changed, 86 insertions(+), 35 deletions(-) diff --git a/src/Val.cc b/src/Val.cc index cbc883347c..bf86360b70 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -996,11 +996,17 @@ static std::variant BuildVal(const rapidjson::Value& j, con return mismatch_err(); std::string candidate(j.GetString(), j.GetStringLength()); + // Remove any surrounding '/'s, not needed when creating an RE_matcher. if ( candidate.size() > 2 && candidate.front() == candidate.back() && candidate.back() == '/' ) { - // Remove the '/'s candidate.erase(0, 1); candidate.erase(candidate.size() - 1); } + // Remove any surrounding "^?(" and ")$?", automatically added below. + if ( candidate.size() > 6 && candidate.substr(0, 3) == "^?(" && + candidate.substr(candidate.size() - 3, 3) == ")$?" ) { + candidate.erase(0, 3); + candidate.erase(candidate.size() - 3); + } auto re = std::make_unique(candidate.c_str()); if ( ! re->Compile() ) diff --git a/testing/btest/Baseline/language.set/out b/testing/btest/Baseline/language.set/out index 51d2a72711..c53cae332c 100644 --- a/testing/btest/Baseline/language.set/out +++ b/testing/btest/Baseline/language.set/out @@ -76,18 +76,26 @@ table index membership (PASS) table index non-membership (PASS) table index reduced size (PASS) table index iteration (PASS) +table index JSON roundtrip success (PASS) +table index JSON roundtrip correct (PASS) vector index size (PASS) vector index membership (PASS) vector index non-membership (PASS) vector index reduced size (PASS) vector index iteration (PASS) +vector index JSON roundtrip success (PASS) +vector index JSON roundtrip correct (PASS) set index size (PASS) set index membership (PASS) set index non-membership (PASS) set index reduced size (PASS) set index iteration (PASS) +set index JSON roundtrip success (PASS) +set index JSON roundtrip correct (PASS) pattern index size (PASS) pattern index membership (PASS) pattern index non-membership (PASS) pattern index reduced size (PASS) pattern index iteration (PASS) +pattern index JSON roundtrip success (PASS) +pattern index JSON roundtrip correct (PASS) diff --git a/testing/btest/language/set.zeek b/testing/btest/language/set.zeek index 110856a7b4..24e2ce402c 100644 --- a/testing/btest/language/set.zeek +++ b/testing/btest/language/set.zeek @@ -186,82 +186,119 @@ function basic_functionality() test_case( "magnitude", |a_and_b| == |a_or_b|); } -function complex_index_types() +type tss_set: set[table[string] of string]; + +function complex_index_type_table() { # Initialization - local s1: set[table[string] of string] = { table(["k1"] = "v1") }; + local s: tss_set = { table(["k1"] = "v1") }; # Adding a member - add s1[table(["k2"] = "v2")]; + add s[table(["k2"] = "v2")]; # Various checks, including membership test - test_case( "table index size", |s1| == 2 ); - test_case( "table index membership", table(["k2"] = "v2") in s1 ); - test_case( "table index non-membership", table(["k2"] = "v3") !in s1 ); + test_case( "table index size", |s| == 2 ); + test_case( "table index membership", table(["k2"] = "v2") in s ); + test_case( "table index non-membership", table(["k2"] = "v3") !in s ); # Member deletion - delete s1[table(["k1"] = "v1")]; - test_case( "table index reduced size", |s1| == 1 ); + delete s[table(["k1"] = "v1")]; + test_case( "table index reduced size", |s| == 1 ); # Iteration - for ( ti in s1 ) + for ( ti in s ) { test_case( "table index iteration", to_json(ti) == to_json(table(["k2"] = "v2")) ); break; } + # JSON serialize/unserialize + local fjr = from_json(to_json(s), tss_set); + test_case( "table index JSON roundtrip success", fjr$valid ); + test_case( "table index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); +} + +type vs_set: set[vector of string]; + +function complex_index_type_vector() +{ # As above, for other index types - local s2: set[vector of string] = { vector("v1", "v2") }; + local s: vs_set = { vector("v1", "v2") }; - add s2[vector("v3", "v4")]; - test_case( "vector index size", |s2| == 2 ); - test_case( "vector index membership", vector("v3", "v4") in s2 ); - test_case( "vector index non-membership", vector("v4", "v5") !in s2 ); + add s[vector("v3", "v4")]; + test_case( "vector index size", |s| == 2 ); + test_case( "vector index membership", vector("v3", "v4") in s ); + test_case( "vector index non-membership", vector("v4", "v5") !in s ); - delete s2[vector("v1", "v2")]; - test_case( "vector index reduced size", |s2| == 1 ); + delete s[vector("v1", "v2")]; + test_case( "vector index reduced size", |s| == 1 ); - for ( vi in s2 ) + for ( vi in s ) { test_case( "vector index iteration", to_json(vi) == to_json(vector("v3", "v4")) ); break; } - local s3: set[set[string]] = { set("s1", "s2") }; + local fjr = from_json(to_json(s), vs_set); + test_case( "vector index JSON roundtrip success", fjr$valid ); + test_case( "vector index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); +} - add s3[set("s3", "s4")]; - test_case( "set index size", |s3| == 2 ); - test_case( "set index membership", set("s3", "s4") in s3 ); - test_case( "set index non-membership", set("s4", "s5") !in s3 ); +type ss_set: set[set[string]]; - delete s3[set("s1", "s2")]; - test_case( "set index reduced size", |s3| == 1 ); +function complex_index_type_set() +{ + local s: ss_set = { set("s1", "s2") }; - for ( si in s3 ) + add s[set("s3", "s4")]; + test_case( "set index size", |s| == 2 ); + test_case( "set index membership", set("s3", "s4") in s ); + test_case( "set index non-membership", set("s4", "s5") !in s ); + + delete s[set("s1", "s2")]; + test_case( "set index reduced size", |s| == 1 ); + + for ( si in s ) { test_case( "set index iteration", to_json(si) == to_json(set("s3", "s4")) ); break; } - local s4: set[pattern] = { /pat1/ }; + local fjr = from_json(to_json(s), ss_set); + test_case( "set index JSON roundtrip success", fjr$valid ); + test_case( "set index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); +} - add s4[/pat2/]; - test_case( "pattern index size", |s4| == 2 ); - test_case( "pattern index membership", /pat2/ in s4 ); - test_case( "pattern index non-membership", /pat3/ !in s4 ); +type p_set: set[pattern]; - delete s4[/pat1/]; - test_case( "pattern index reduced size", |s4| == 1 ); +function complex_index_type_pattern() +{ + local s: p_set = { /pat1/ }; - for ( pi in s4 ) + add s[/pat2/]; + test_case( "pattern index size", |s| == 2 ); + test_case( "pattern index membership", /pat2/ in s ); + test_case( "pattern index non-membership", /pat3/ !in s ); + + delete s[/pat1/]; + test_case( "pattern index reduced size", |s| == 1 ); + + for ( pi in s ) { test_case( "pattern index iteration", to_json(pi) == to_json(/pat2/) ); break; } + + local fjr = from_json(to_json(s), p_set); + test_case( "pattern index JSON roundtrip success", fjr$valid ); + test_case( "pattern index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); } event zeek_init() { basic_functionality(); - complex_index_types(); + complex_index_type_table(); + complex_index_type_vector(); + complex_index_type_set(); + complex_index_type_pattern(); } From 5f8b6986a2670e432abcfeb7cf64eba22d8de38a Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 1 Jul 2024 11:54:08 -0700 Subject: [PATCH 59/89] Update NEWS file to cover JSON enhancements --- NEWS | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/NEWS b/NEWS index deedb7b927..5531f3a378 100644 --- a/NEWS +++ b/NEWS @@ -107,6 +107,11 @@ New Functionality Use ``Analyzer::get_tag()`` if you need to obtain an analyzer's tag from its name (such as "HTTP"). +- The ``from_json()`` function now supports ingesting JSON representations of + tables as produced by the ``to_json()`` function. It now also supports reading + the object-based representation of ports that ``to_json()`` generates for that + Zeek type. + Changed Functionality --------------------- From a98ec6b08b8c79ea97e3e7a3a244fb2c8cc589e7 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 24 Jun 2024 22:06:58 -0700 Subject: [PATCH 60/89] Provide a script-layer equivalent to Supervisor::__init_cluster(). If the script layer is able to access the current node's config via Supervisor::node(), it can handle populating Cluster::nodes. That code is much more straightforward than an equivalent in-core implementation (especially with the upcoming change to the cluster table's implementation). This introduces base/frameworks/cluster/supervisor.zeek and Cluster::Supervisor::__init_cluster_nodes() for that purpose. The @load of the Supervisor API in cluster/main.zeek isn't technically necessary since we already load it explicitly even in init-bare.zeek, but being explicit seems better. --- .../base/frameworks/cluster/supervisor.zeek | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 scripts/base/frameworks/cluster/supervisor.zeek diff --git a/scripts/base/frameworks/cluster/supervisor.zeek b/scripts/base/frameworks/cluster/supervisor.zeek new file mode 100644 index 0000000000..4e6ec51fff --- /dev/null +++ b/scripts/base/frameworks/cluster/supervisor.zeek @@ -0,0 +1,57 @@ +##! Cluster-related functionality specific to running under the Supervisor +##! framework. + +@load base/frameworks/supervisor/api + +module Cluster::Supervisor; + +export { + ## Populates the current node's :zeek:id:`Cluster::nodes` table from the + ## supervisor's node configuration in :zeek:id:`Supervisor::NodeConfig`. + ## + ## Returns: true if initialization completed, false otherwise. + global __init_cluster_nodes: function(): bool; +} + +function __init_cluster_nodes(): bool + { + local config = Supervisor::node(); + + if ( |config$cluster| == 0 ) + return F; + + local rolemap: table[Supervisor::ClusterRole] of Cluster::NodeType = { + [Supervisor::LOGGER] = Cluster::LOGGER, + [Supervisor::MANAGER] = Cluster::MANAGER, + [Supervisor::PROXY] = Cluster::PROXY, + [Supervisor::WORKER] = Cluster::WORKER, + }; + + local manager_name = ""; + local cnode: Cluster::Node; + local typ: Cluster::NodeType = Cluster::NONE; + + for ( node_name, endp in config$cluster ) + { + if ( endp$role == Supervisor::MANAGER ) + manager_name = node_name; + } + + for ( node_name, endp in config$cluster ) + { + if ( endp$role in rolemap ) + typ = rolemap[endp$role]; + + cnode = [$node_type=typ, $ip=endp$host, $p=endp$p]; +@pragma push ignore-deprecations + if ( endp?$interface ) + cnode$interface = endp$interface; +@pragma pop ignore-deprecations + if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER ) + cnode$manager = manager_name; + + Cluster::nodes[node_name] = cnode; + } + + return T; + } From 737b1a20138e3720b5b5e33eef69377e35ff46db Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 24 Jun 2024 22:19:29 -0700 Subject: [PATCH 61/89] Remove the Supervisor's internal ClusterEndpoint struct. This eliminates one place in which we currently need to mirror changes to the script-land Cluster::Node record. Instead of keeping an exact in-core equivalent, the Supervisor now treats the data structure as opaque, and stores the whole cluster table as a JSON string. We may replace the script-layer Supervisor::ClusterEndpoint in the future, using Cluster::Node directly. But that's a more invasive change that will affect how people invoke Supervisor::create() and similars. Relying on JSON for serialization has the side-effect of removing the Supervisor's earlier quirk of using 0/tcp, not 0/unknown, to indicate unused ports in the Supervisor::ClusterEndpoint record. --- NEWS | 4 + scripts/base/frameworks/cluster/__load__.zeek | 21 ++- .../frameworks/management/agent/main.zeek | 7 +- src/supervisor/Supervisor.cc | 150 +++--------------- src/supervisor/Supervisor.h | 40 +---- src/supervisor/supervisor.bif | 9 -- .../coverage.init-default/missing_loads | 1 + 7 files changed, 51 insertions(+), 181 deletions(-) diff --git a/NEWS b/NEWS index 5531f3a378..d757ddff05 100644 --- a/NEWS +++ b/NEWS @@ -151,6 +151,10 @@ Changed Functionality it aligns with the same requirement for traditional analyzers and enables customizing file handles for protocol-specific semantics. +- The Supervisor's API now returns NodeConfig records with a cluster table whose + ClusterEndpoints have a port value of 0/unknown, rather than 0/tcp, to + indicate that the node in question has no listening port. + Removed Functionality --------------------- diff --git a/scripts/base/frameworks/cluster/__load__.zeek b/scripts/base/frameworks/cluster/__load__.zeek index 47918e7d0d..a854302636 100644 --- a/scripts/base/frameworks/cluster/__load__.zeek +++ b/scripts/base/frameworks/cluster/__load__.zeek @@ -14,14 +14,21 @@ redef Broker::log_topic = Cluster::rr_log_topic; # Add a cluster prefix. @prefixes += cluster -# If this script isn't found anywhere, the cluster bombs out. -# Loading the cluster framework requires that a script by this name exists -# somewhere in the ZEEKPATH. The only thing in the file should be the -# cluster definition in the :zeek:id:`Cluster::nodes` variable. +@if ( Supervisor::is_supervised() ) +# When running a supervised cluster, populate Cluster::nodes from the node table +# the Supervisor provides to new Zeek nodes. The management framework configures +# the cluster this way. +@load ./supervisor +@if ( Cluster::Supervisor::__init_cluster_nodes() && Cluster::get_node_count(Cluster::LOGGER) > 0 ) +redef Cluster::manager_is_logger = F; +@endif +@endif -@if ( ! Supervisor::__init_cluster() ) -# When running a supervised cluster, Cluster::nodes is instead populated -# from the internal C++-layer directly via the above BIF. +@if ( |Cluster::nodes| == 0 ) +# Fall back to loading a cluster topology from cluster-layout.zeek. If Zeek +# cannot find this script in your ZEEKPATH, it will exit. The script should only +# contain the cluster definition in the :zeek:id:`Cluster::nodes` variable. +# The zeekctl tool manages this file for you. @load cluster-layout @endif diff --git a/scripts/policy/frameworks/management/agent/main.zeek b/scripts/policy/frameworks/management/agent/main.zeek index 6397313eab..7dbe963cf9 100644 --- a/scripts/policy/frameworks/management/agent/main.zeek +++ b/scripts/policy/frameworks/management/agent/main.zeek @@ -625,10 +625,9 @@ function get_nodes_request_finish(areq: Management::Request::Request) if ( node in g_nodes ) cns$state = g_nodes[node]$state; - # The supervisor's responses use 0/tcp (not 0/unknown) - # when indicating an unused port because its internal - # serialization always assumes TCP. - if ( sns$node$cluster[node]$p != 0/tcp ) + # The supervisor's responses use 0/unknown to indicate + # unused ports. (Prior to Zeek 7 this used to be 0/tcp.) + if ( sns$node$cluster[node]$p != 0/unknown ) cns$p = sns$node$cluster[node]$p; } else diff --git a/src/supervisor/Supervisor.cc b/src/supervisor/Supervisor.cc index a176f033e3..0585ec6467 100644 --- a/src/supervisor/Supervisor.cc +++ b/src/supervisor/Supervisor.cc @@ -18,6 +18,8 @@ #define RAPIDJSON_HAS_STDSTRING 1 #include +#include +#include extern "C" { #include "zeek/3rdparty/setsignal.h" @@ -1243,34 +1245,9 @@ Supervisor::NodeConfig Supervisor::NodeConfig::FromRecord(const RecordVal* node) rval.env[name] = v->GetVal()->AsStringVal()->ToStdString(); } - auto cluster_table_val = node->GetField("cluster")->AsTableVal(); - auto cluster_table = cluster_table_val->AsTable(); - - for ( const auto& cte : *cluster_table ) { - auto k = cte.GetHashKey(); - auto* v = cte.value; - - auto key = cluster_table_val->RecreateIndex(*k); - auto name = key->Idx(0)->AsStringVal()->ToStdString(); - auto rv = v->GetVal()->AsRecordVal(); - - Supervisor::ClusterEndpoint ep; - ep.role = static_cast(rv->GetFieldAs("role")); - ep.host = rv->GetFieldAs("host").AsString(); - ep.port = rv->GetFieldAs("p")->Port(); - - const auto& iface = rv->GetField("interface"); - - if ( iface ) - ep.interface = iface->AsStringVal()->ToStdString(); - - const auto& pcap_file = rv->GetField("pcap_file"); - - if ( pcap_file ) - ep.pcap_file = pcap_file->AsStringVal()->ToStdString(); - - rval.cluster.emplace(name, std::move(ep)); - } + auto cluster_table_val = node->GetField("cluster"); + auto re = std::make_unique("^_"); + rval.cluster = cluster_table_val->ToJSON(false, re.get())->ToStdString(); return rval; } @@ -1319,26 +1296,10 @@ Supervisor::NodeConfig Supervisor::NodeConfig::FromJSON(std::string_view json) { auto& cluster = j["cluster"]; - for ( auto it = cluster.MemberBegin(); it != cluster.MemberEnd(); ++it ) { - Supervisor::ClusterEndpoint ep; - - auto key = it->name.GetString(); - auto& val = it->value; - - auto& role_str = val["role"]; - ep.role = role_str_to_enum(role_str.GetString()); - - ep.host = val["host"].GetString(); - ep.port = val["p"]["port"].GetInt(); - - if ( auto it = val.FindMember("interface"); it != val.MemberEnd() ) - ep.interface = it->value.GetString(); - - if ( auto it = val.FindMember("pcap_file"); it != val.MemberEnd() ) - ep.pcap_file = it->value.GetString(); - - rval.cluster.emplace(key, std::move(ep)); - } + rapidjson::StringBuffer sb; + rapidjson::Writer writer(sb); + cluster.Accept(writer); + rval.cluster = sb.GetString(); return rval; } @@ -1349,7 +1310,7 @@ std::string Supervisor::NodeConfig::ToJSON() const { } RecordValPtr Supervisor::NodeConfig::ToRecord() const { - const auto& rt = BifType::Record::Supervisor::NodeConfig; + const auto& rt = id::find_type("Supervisor::NodeConfig"); auto rval = make_intrusive(rt); rval->AssignField("name", name); @@ -1401,27 +1362,18 @@ RecordValPtr Supervisor::NodeConfig::ToRecord() const { } auto tt = rt->GetFieldType("cluster"); - auto cluster_val = make_intrusive(std::move(tt)); - rval->AssignField("cluster", cluster_val); - - for ( const auto& e : cluster ) { - auto& name = e.first; - auto& ep = e.second; - auto key = make_intrusive(name); - const auto& ept = BifType::Record::Supervisor::ClusterEndpoint; - auto val = make_intrusive(ept); - - val->AssignField("role", BifType::Enum::Supervisor::ClusterRole->GetEnumVal(ep.role)); - val->AssignField("host", make_intrusive(ep.host)); - val->AssignField("p", val_mgr->Port(ep.port, TRANSPORT_TCP)); - - if ( ep.interface ) - val->AssignField("interface", *ep.interface); - - if ( ep.pcap_file ) - val->AssignField("pcap_file", *ep.pcap_file); - - cluster_val->Assign(std::move(key), std::move(val)); + auto json_res = detail::ValFromJSON(cluster, tt, Func::nil); + if ( auto val = std::get_if(&json_res) ) { + rval->AssignField("cluster", *val); + } + else { + // This should never happen: the JSON data comes from a table[string] of + // ClusterEndpoint and should therefore allow instantiation. Exiting + // here can be hard to debug. Other JSON code (see FromJSON()) fails + // silently when the JSON is misformatted. We just warn: + fprintf(stderr, "Could not parse %s's cluster table from '%s': %s\n", name.c_str(), cluster.c_str(), + std::get(json_res).c_str()); + rval->AssignField("cluster", make_intrusive(std::move(tt))); } return rval; @@ -1439,62 +1391,6 @@ RecordValPtr SupervisorNode::ToRecord() const { return rval; } -static ValPtr supervisor_role_to_cluster_node_type(BifEnum::Supervisor::ClusterRole role) { - static auto node_type = id::find_type("Cluster::NodeType"); - - switch ( role ) { - case BifEnum::Supervisor::LOGGER: return node_type->GetEnumVal(node_type->Lookup("Cluster", "LOGGER")); - case BifEnum::Supervisor::MANAGER: return node_type->GetEnumVal(node_type->Lookup("Cluster", "MANAGER")); - case BifEnum::Supervisor::PROXY: return node_type->GetEnumVal(node_type->Lookup("Cluster", "PROXY")); - case BifEnum::Supervisor::WORKER: return node_type->GetEnumVal(node_type->Lookup("Cluster", "WORKER")); - default: return node_type->GetEnumVal(node_type->Lookup("Cluster", "NONE")); - } -} - -bool SupervisedNode::InitCluster() const { - if ( config.cluster.empty() ) - return false; - - const auto& cluster_node_type = id::find_type("Cluster::Node"); - const auto& cluster_nodes_id = id::find("Cluster::nodes"); - const auto& cluster_manager_is_logger_id = id::find("Cluster::manager_is_logger"); - auto cluster_nodes = cluster_nodes_id->GetVal()->AsTableVal(); - auto has_logger = false; - std::optional manager_name; - - for ( const auto& e : config.cluster ) { - if ( e.second.role == BifEnum::Supervisor::MANAGER ) - manager_name = e.first; - else if ( e.second.role == BifEnum::Supervisor::LOGGER ) - has_logger = true; - } - - for ( const auto& e : config.cluster ) { - const auto& node_name = e.first; - const auto& ep = e.second; - - auto key = make_intrusive(node_name); - auto val = make_intrusive(cluster_node_type); - - auto node_type = supervisor_role_to_cluster_node_type(ep.role); - val->AssignField("node_type", std::move(node_type)); - val->AssignField("ip", make_intrusive(ep.host)); - val->AssignField("p", val_mgr->Port(ep.port, TRANSPORT_TCP)); - - // Remove in v7.1: Interface removed from Cluster::Node. - if ( ep.interface ) - val->AssignField("interface", *ep.interface); - - if ( manager_name && ep.role != BifEnum::Supervisor::MANAGER ) - val->AssignField("manager", *manager_name); - - cluster_nodes->Assign(std::move(key), std::move(val)); - } - - cluster_manager_is_logger_id->SetVal(val_mgr->Bool(! has_logger)); - return true; -} - void SupervisedNode::Init(Options* options) const { const auto& node_name = config.name; @@ -1546,7 +1442,7 @@ void SupervisedNode::Init(Options* options) const { } } - if ( ! config.cluster.empty() ) { + if ( ! config.cluster.empty() && config.cluster != "{}" ) { if ( setenv("CLUSTER_NODE", node_name.data(), true) == -1 ) { fprintf(stderr, "node '%s' failed to setenv: %s\n", node_name.data(), strerror(errno)); exit(1); diff --git a/src/supervisor/Supervisor.h b/src/supervisor/Supervisor.h index aa511e1209..641618dfde 100644 --- a/src/supervisor/Supervisor.h +++ b/src/supervisor/Supervisor.h @@ -110,35 +110,6 @@ public: std::string zeek_exe_path; }; - /** - * Configuration options that influence how a Supervised Zeek node - * integrates into the normal Zeek Cluster Framework. - */ - struct ClusterEndpoint { - /** - * The node's role within the cluster. E.g. manager, logger, worker. - */ - BifEnum::Supervisor::ClusterRole role; - /** - * The TCP port number at which the cluster node listens for connections. - */ - int port; - /** - * The host/IP at which the cluster node is listening for connections. - */ - std::string host; - /** - * The interface name from which the node read/analyze packets. - * Typically used by worker nodes. - */ - std::optional interface; - /** - * The PCAP file name from which the node read/analyze packets. - * Typically used by worker nodes. - */ - std::optional pcap_file; - }; - /** * Configuration options that influence behavior of a Supervised Zeek node. */ @@ -233,15 +204,16 @@ public: */ std::vector addl_user_scripts; /** - * Environment variables and values to define in the node. + * Environment variables and values to define in the node. */ std::map env; /** - * The Cluster Layout definition. Each node in the Cluster Framework - * knows about the full, static cluster topology to which it belongs. - * Entries in the map use node names for keys. + * The cluster layout definition. Each node in the Cluster Framework + * knows the full, static cluster topology to which it belongs. The + * layout is encoded as the JSON map resulting from ToJSON() on the + * corresponding cluster table in the script layer's NodeConfig record. */ - std::map cluster; + std::string cluster; }; /** diff --git a/src/supervisor/supervisor.bif b/src/supervisor/supervisor.bif index b749c773d5..d8bd06bd96 100644 --- a/src/supervisor/supervisor.bif +++ b/src/supervisor/supervisor.bif @@ -14,7 +14,6 @@ enum ClusterRole %{ WORKER, %} -type Supervisor::ClusterEndpoint: record; type Supervisor::Status: record; type Supervisor::NodeConfig: record; type Supervisor::NodeStatus: record; @@ -66,14 +65,6 @@ function Supervisor::__restart%(node: string%): bool return zeek::val_mgr->Bool(rval); %} -function Supervisor::__init_cluster%(%): bool - %{ - if ( zeek::Supervisor::ThisNode() ) - return zeek::val_mgr->Bool(zeek::Supervisor::ThisNode()->InitCluster()); - - return zeek::val_mgr->Bool(false); - %} - function Supervisor::__is_supervised%(%): bool %{ return zeek::val_mgr->Bool(zeek::Supervisor::ThisNode().has_value()); diff --git a/testing/btest/Baseline/coverage.init-default/missing_loads b/testing/btest/Baseline/coverage.init-default/missing_loads index fe23c7a04a..e16624e1fb 100644 --- a/testing/btest/Baseline/coverage.init-default/missing_loads +++ b/testing/btest/Baseline/coverage.init-default/missing_loads @@ -5,6 +5,7 @@ -./frameworks/cluster/nodes/proxy.zeek -./frameworks/cluster/nodes/worker.zeek -./frameworks/cluster/setup-connections.zeek +-./frameworks/cluster/supervisor.zeek -./frameworks/intel/cluster.zeek -./frameworks/netcontrol/cluster.zeek -./frameworks/openflow/cluster.zeek From 42a451c2e316a52ba505d63bae774f28e4146c49 Mon Sep 17 00:00:00 2001 From: zeek-bot Date: Wed, 3 Jul 2024 00:21:51 +0000 Subject: [PATCH 62/89] Update doc submodule [nomail] [skip ci] --- doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc b/doc index 44651a4526..bceb0f6035 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit 44651a45261613b14dbd44e0ea8376346a689bd8 +Subproject commit bceb0f6035cb1b98f6f2d9649e2fe67bba4f3999 From c6368fc3f000de8982c90316ab33bef7cd13f555 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Thu, 4 Jul 2024 11:35:28 +0200 Subject: [PATCH 63/89] ContentLineAnalyzer: Add getter for skip_partial --- src/analyzer/protocol/tcp/ContentLine.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/analyzer/protocol/tcp/ContentLine.h b/src/analyzer/protocol/tcp/ContentLine.h index 4d473a5f84..de069ebb62 100644 --- a/src/analyzer/protocol/tcp/ContentLine.h +++ b/src/analyzer/protocol/tcp/ContentLine.h @@ -22,6 +22,9 @@ public: // If enabled, flag (first) line with embedded NUL. Default off. void SetIsNULSensitive(bool enable) { flag_NULs = enable; } + // Returns true if skipping data above a hole. + bool SkipPartial() const { return skip_partial; } + // If enabled, skip data above a hole. Default off. void SetSkipPartial(bool enable) { skip_partial = enable; } From 377fd711bded6eff7cb3cd28126938794e099962 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Wed, 3 Jul 2024 14:13:03 +0200 Subject: [PATCH 64/89] HTTP: Implement FlipRoles() When Zeek flips roles of a HTTP connection subsequent to the HTTP analyzer being attached, that analyzer would not update its own ContentLine analyzer state, resulting in the wrong ContentLine analyzer being switched into plain delivery mode. In debug builds, this would result in assertion failures, in production builds, the HTTP analyzer would receive HTTP bodies as individual header lines, or conversely, individual header lines would be delivered as a large chunk from the ContentLine analyzer. PCAPs were generated locally using tcprewrite to select well-known-http ports for both endpoints, then editcap to drop the first SYN packet. Kudos to @JordanBarnartt for keeping at it. Closes #3789 --- src/analyzer/protocol/http/HTTP.cc | 30 ++++++++++++++++++ src/analyzer/protocol/http/HTTP.h | 1 + .../conn.log.cut | 3 ++ .../files.log.cut | 4 +++ .../http.log.cut | 3 ++ .../conn.log.cut | 3 ++ .../files.log.cut | 3 ++ .../http.log.cut | 3 ++ .../Traces/http/zeek-image-1080-80-x.pcap | Bin 0 -> 7980 bytes .../http/zeek-image-post-1080-8000-x.pcap | Bin 0 -> 8180 bytes .../http/flip-content-line-orig.zeek | 19 +++++++++++ .../http/flip-content-line-resp.zeek | 19 +++++++++++ 12 files changed, 88 insertions(+) create mode 100644 testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/conn.log.cut create mode 100644 testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/files.log.cut create mode 100644 testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/http.log.cut create mode 100644 testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/conn.log.cut create mode 100644 testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/files.log.cut create mode 100644 testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/http.log.cut create mode 100644 testing/btest/Traces/http/zeek-image-1080-80-x.pcap create mode 100644 testing/btest/Traces/http/zeek-image-post-1080-8000-x.pcap create mode 100644 testing/btest/scripts/base/protocols/http/flip-content-line-orig.zeek create mode 100644 testing/btest/scripts/base/protocols/http/flip-content-line-resp.zeek diff --git a/src/analyzer/protocol/http/HTTP.cc b/src/analyzer/protocol/http/HTTP.cc index 684abce01b..3d5bdc246f 100644 --- a/src/analyzer/protocol/http/HTTP.cc +++ b/src/analyzer/protocol/http/HTTP.cc @@ -1020,6 +1020,36 @@ void HTTP_Analyzer::Undelivered(uint64_t seq, int len, bool is_orig) { } } +void HTTP_Analyzer::FlipRoles() { + analyzer::tcp::TCP_ApplicationAnalyzer::FlipRoles(); + + // If FlipRoles() is invoked after we've upgraded to something, + // don't do anything. This shouldn't happen as flipping of TCP + // connections currently happens before any data is transferred, + // but better safe than sorry. + if ( upgraded || pia ) { + Weird("HTTP_late_flip_roles"); + return; + } + + // If we haven't upgraded but saw request or replies, just bail + // for the rest of this connection. Again, this should never happen + // right now, but raise a weird in case it starts to happen. + if ( num_requests > 0 || num_replies > 0 ) { + Weird("HTTP_late_flip_roles"); + SetSkip(true); + return; + } + + // IsOrig() of the support analyzer has been updated, but we still need + // to change the analyzer's local state and the partial skipping setting. + bool skip_partial_orig = content_line_orig->SkipPartial(); + bool skip_partial_resp = content_line_resp->SkipPartial(); + std::swap(content_line_orig, content_line_resp); + content_line_orig->SetSkipPartial(skip_partial_orig); + content_line_resp->SetSkipPartial(skip_partial_resp); +} + void HTTP_Analyzer::EndpointEOF(bool is_orig) { analyzer::tcp::TCP_ApplicationAnalyzer::EndpointEOF(is_orig); diff --git a/src/analyzer/protocol/http/HTTP.h b/src/analyzer/protocol/http/HTTP.h index 62f519201d..15feb9e313 100644 --- a/src/analyzer/protocol/http/HTTP.h +++ b/src/analyzer/protocol/http/HTTP.h @@ -167,6 +167,7 @@ public: void Done() override; void DeliverStream(int len, const u_char* data, bool orig) override; void Undelivered(uint64_t seq, int len, bool orig) override; + void FlipRoles() override; // Overridden from analyzer::tcp::TCP_ApplicationAnalyzer void EndpointEOF(bool is_orig) override; diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/conn.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/conn.log.cut new file mode 100644 index 0000000000..197f268773 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/conn.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p history service +127.0.0.1 1080 127.0.0.1 8000 ^hADadFf http diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/files.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/files.log.cut new file mode 100644 index 0000000000..b0788b8a39 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/files.log.cut @@ -0,0 +1,4 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 +127.0.0.1 1080 127.0.0.1 8000 SHA1 image/png 1991cedee47909e324ac1b8bee2020d5690891e1 +127.0.0.1 1080 127.0.0.1 8000 SHA1 text/json eae909a9c2827d827ef30a6675a6388770ddc88d diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/http.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/http.log.cut new file mode 100644 index 0000000000..a8665ed118 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/http.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg +127.0.0.1 1080 127.0.0.1 8000 localhost:8000 POST / 1.1 curl/7.81.0 200 OK diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/conn.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/conn.log.cut new file mode 100644 index 0000000000..14aa69299b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/conn.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p history service +127.0.0.1 1080 127.0.0.1 80 ^hADadFf http diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/files.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/files.log.cut new file mode 100644 index 0000000000..d3f231e710 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/files.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 +127.0.0.1 1080 127.0.0.1 80 SHA1 image/png 1991cedee47909e324ac1b8bee2020d5690891e1 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/http.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/http.log.cut new file mode 100644 index 0000000000..8b6d2e26ec --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/http.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg +127.0.0.1 1080 127.0.0.1 80 localhost GET /zeek.png 1.1 curl/7.81.0 200 OK diff --git a/testing/btest/Traces/http/zeek-image-1080-80-x.pcap b/testing/btest/Traces/http/zeek-image-1080-80-x.pcap new file mode 100644 index 0000000000000000000000000000000000000000..a22a01f276e6bad1065f76fcf818c48caad7185c GIT binary patch literal 7980 zcmcJU2{@G9`^U#N7zWuw%GgQPF-XdiJ?jt>vdu7-Ax4p1wk#ziTe7d&Q(0Q<5wb@Euj@GwC=`MNfk3(nA{->Z zb^nKvgLFY^_86RHrfU z`HS;CHj(l0M=&De_|`;h0?;Ow-|c`AR`dCQ5%G@OfM3o&-Sm3sumxqhEteBjnm;@A}Y-fjZ z2W}o0KMsXJAqR8)-+7S*=KSCD!rxDvGwagNIsaILw?J+dfV}KBtp0t@f8~Xi2m*|l z7v#=F>?{QcL@d7>F)tLO`~yVBA&vjW7?1nKI6_?GpP7J3Nl3u-&p{zZD32>B4|y2Y z0fY4hawLTigF_*zHaL_#%oJsR0ww{6X?wW<scR_P98Z?Y}y7P*?|?qdW{DCn*Dk=-7DTM0MTlF=!0R{=Wo=%S*z41;?W7 za2PkNJj@w|au>C6!CXN>A!^1p4)QP_85uc*l&!R+sHBXo9M8|y5=Giz9Z;U~Fk2rS z%9FTkHw^SNpb!oSkU6N9rYaK1Mk?S(Qjr1m=gKiJ;KGShGs79Vp>bDjJWwEIdpBDY zOda?^Uv=|v_Jo}^lKIq76$6Z;cGtY%2?CM5B^DB_h8+TMKn6O-Y8vi3dwYAkySqUB z%l7wwxkLx}+xXrFhEW#a0UvOv3MMED2@-do?dizCe<&S@Ney=w-H-B&bOCJDH z=S}SqBzh2@09OU;kq9`Cx94y2?Mg!*)*9iP` z&gA(&CjV<4e?0{*Bv2j<1_FW$Ccy>x?@v+YO#xj%oL@nJ-UBEw{zo)mCjkDzQ$S7l z9*VpGPf>{fzDa=80l~qA6I2D0w0ZA|BMS*)DKCH;2t{5z0^oqbU|@Vc0eA!mLS2OC zPpW&^)lUpqK%mK=pw1h+)H1!byKCJ#EmAqrIlqQ#|3G{@AV?veypTX%FiAzs#`if) z0PhHFWnjO8vc`ek0?xyOizZ16@X><{YdgE3JOW?&I5dB3W_@dDY2)PMN#ppoik3Cm>xeK{R)?@w7k79%3<0vgD-BI$lTc@{cLOFqlKHVnVGL&6wn>C^XXP_v$@Qn-)QopYfy7}-y8ngp@MsvA)Ta)^F8@E zA7o$8!EW-S?E3&DrfT0qK=^Tbj<3Rh#6f6+wj@2x|gSH7^2ZuwQ?qI zS*O>6QX*Wr<{1hJUM%dNMb+i}laHUPtdI?Q&j~FX z!YYLaee{lG(}Y~(P;*)gDpC)0(jy5LJR2jAKW^wJ)?S#UHZB2kOR|w|Bp32__}{s4J~RC(}uFDE}3K=qha*+EZ^gwA!Qs-y`AD z=FE5Y6K&d4hjAMGfuA(G#qd32=pw)n0@n^Co=#tOZln}#{c$R|P_Qq6_Dmb{vQTSK({Z$lzH zd2ilH?9#hrVOele`}s3G;ew3tGPb7I)?De~xI}r5+&zh1_%j~71?XgwNQAb?-B>m| zt`(WPALervH#!G;kTQ#dw^O4w4eutZ@Cnyg7dT@pd6MB#SHf(rG3X7obP3dQb&1hu zpUlqIxpYC8#7Ew+T6^^C!+us@Lexkpl%{>FzkE;HS`~%18;*V{uEZ}~kIMqHcJPT< ziE)Qn=WJv75qp%Mi*D0P826ZN>26(9IltZzpBPzLW>n1?)oDVv6|QUA$AEo-bV5(s z?|xvpGD7S6%rg3Q`pE{xUWKKRTyT8O<;3_nUvz3t;v+;E(vwaL*}eXaByFKxpl@6) zh*!*Tr5m*ttXdJH-n<@^PWq`TcAV)hBftJT#b?~IBkQe@H+IxoYz9_GB$1Em)Vn!w zj3rc@oZc|K$G+oR_Z*@<8}`>XPJbKc#aq>zM#$pV#&hmk8ap-a%~DO%HCCtc^Adtw zx4uN(@Y*T9>Xo=UhP*Q+XPx$~|6X3`g8-GS#W;~cW*9nBVB@PCgW=^0=nVNy<}>x1 zn;Q2m_gCN5IQtKm`ZtxVUAsSUWYuurPGdB4)1*0}Gww#GYgML=$Yyx@JF7P#&#dyF zkfkvLdG_QNKJ%`qh^BpF!;G?EYW{^_V@|6+Hek8@)&u|h>qSNb!~0BADK~{kO|H9++Y*#cVg&N!LD}C!lx%wU9=pgEEI}+(-%HMRN zsuKClgo_6`-3(=x-DP)F9FsG_N9d{d#Siaq?tU5HJWlszbWH}rex(5g$5F&yYTlE(M_^a=2itz?r5^|HWCf)-( zz3%8@#XZn_dt3I;UFP7@9HBr8uY56n3hg5LoLm8^*sG}sAw8}gk8y37MxpLR=K4Qt zfzzM0;LU*+sQq0F41|7afn@IAwcxsGocwk$p=-mRw(zzq*$|vDb&%Ya6+gDLW*3>~ z5{m5nT$mEKE7ij zEUC8E@#PgB(x%SOLi@wNAh{b^88YY(mq*YjTzX&nWciJ0mi%@Gq07Dz^y#Jcg(;7i zki(y1YM)%tq8HX43`N&_ybgM`l9}rsW`>Ng?f>}kE_Re5#b>8p9X{~%XpZuHqA|^d zxkuEc)^kVM_|=3^Jo9B0q3Nk7X!Mm&Pd|%RsT8QJPl#vt`5@^W`21oQx>AKs(fz_@&- z`Q%IZqaGECPmX-iR-9ob%qrU+$PY!IdwOrPXJs{`TaWC_I`8rzOs2Z0&w(Y*5*B+r}cqCNf;lP2UQbLqL5j`Ww>Mwe%W@5ojmV@=GINvFV2 zyq1&jXDOt8;SJs1d+cTv0?*BTSLHLO)qtrGmKq>-%V{x5h9~%NUlJtWJLbNq_j=BO zs6d)bSdpsa7G~?MuM8_zk~#%o<>2mRALo4EH24~^*ThcsHeDlKUBvXem7q9m=<~s) z54Jq8jUxBW#+Y5L*r6-W5_xW3$kdM%E^fKY{60td&#W-f&NgyiK_Z)7?+<8UbdcF?L*hr=w&x;Co`*2S z0%>RS7)%nuvJ};zdib_e%QZ`NAG}jxF09noz9-xIX}N9(OeJ%1%vy#Q@iYh?p1Rq1 z&7k@;n?BV1kw{qJJGK8rb8=WJfCM zlL{bC&Mk&`jBQT$+10ts$257ibcQRP1T~+DVuG~XrG}2aWY9^?{DDNIiCa^I>|>t# zK&r>Ac^NE6I=aNSw_EvB19als>Rv2exhJ2YKsn5raK1I{g>mE^vGD8WYjL*vx(Hi! z3&Ap_H&F08rw<95)S@}1+cw1Hk=SXOZ%tWJC8szlS*tv#vMbKjVh3TDHX#;`8Z>&P zS>Y+&PFft9k+$)4!|I1-#k7zbLuKc-bNuTbq8}QrFevOLjIHzXCLaT@J!#l-y00A% zwxE`*U$pWvw0CKq9<&r&Tuu~2?if6w72Q*No1ARmrrFTO>Kd*LqNw&^sk(T~Cih>WvLA<#tF+XLv`*(}T^tklVX}?@S%-8@njE5tvFu3Hk zN+=3`7?*>dV9>$kSk2@siJJw>RPULORA{q0^k2YJWemUFLccaH%zVVKpAE{Winff3 zODw=;=exU)JCFPEX1t^7o(_1!2W_10zNu#7hsTQgLV7YE@x_0=1J^0cc9BU)BENapLs#S?0@38ERLU%LU*Yt<2LiD#)7`Zo zxowZB>PgHSm-{~#&G4b&Dq*6#1?|~cdrpEeJdfK3td=BL&F4}VuIEZeJkjo!eiOuS ziGQxT^=@f_Q|KOzNJZNIl17N@FdZLL&d|7Yw^KC-OOzvXu=Xz1oxY_WvDyG=fB)gE z#PjDrOTJ~}U^S|`JaX}=NQ-&c5XFb21)49-Awj0M#LRK~yU`gw$c<{pzJ%AAviS3U zo~!qpMVug^K4nH#ELqnF-ssM+3A6SIrJ0T}8&X+g28A?7G*6)D5Qx!Eqz}`>6Df)p zEFLQayK(#LEWDyOU${mVd7;PO|I_)GB{jY)d(Rc0Z*q(0P7S@|(Gax0DvNw7uj30n zZrYpxR*kM(@=3kXf{bUhziX{k=lVcdi|gjYvRl~DtbBGu0TqYsfaEi0gT(iD@25?k zX{W_;hq&G#?U*-wn4i^Gsr2eiam%9mE9lqeZ-s+8YP#A9ez256jc%Pm*(MwHQYkLE zOYyE9Lenbow{BC=Pw?bVKh@u(DI_dtmV>~UIZyIaNa1Y5R=T1`%9mN`l}N1N=+mCu z@l^(fohP-lqgb7ClNfZb6hG4u;=;&^E>%)tzEOm3K6NuMRc%?F*!UE93FH($;(4YZ zb}5AdA#uXIq_+CfbJ#!v5E|>PBw4qUG6y-wVY*)9@ z6aSHr*yB2PZ76eY^lUh|DQc>BI|S~xM~c+Kqwj&ymNJHIq{p*wSQU)9-?R38@?LI} zq0j0*QkVkzoZ-}ErKpBZ`qQFXm=eN6!{%7mse4f@N#4|(3weTz^$en;J=;G0LfzGu zn3larmTv8qEOb`~cE(=~uAXLkxTmBhop;u9`COgr!hY8jvS9?Ih$R`R>zF!^cS86?$YVWT-q5P=o zm<6`m8)81bGwsvgev#kGursMmeQ+l3Zl;?Pj>>=DwIjkgSU5?Jef%sO>RVj&bt@NuCuu@i626BJ{DYy8N+)KI6vR>c~Cj9 zXeGoUzQx{gQfj(YwJ*q3N~fUpx(O^_Ubd+B^bOAfwZy^KX$}}C#fD111=Uhyk7}Y> zT%so*GTb2Y>J?d);STW#zBq@En2>$HIz8TNFd1>7A){PJN~$`Pz6_Sqt3#g8bo7R! zL>-rK0=2^_%zAcy+cfI4UBmB3~n)k!+gNg>R1}|S>0%=-D=1}-$hNN z#H%2hPZwx?l>{HAX=MyG;mHm9IF4cxy+65zf3YRgjb(M4Ka}mc53Ocpp3{zY-{ybcR4rs@$9}{mC1Zos#kHc#w2Rfv z3yc%iSFF7$jb}cjvWF&A$hf}IVi)V!xwM{@Ykc%Cr1ptIWkWT^o+89 zV`~97t++R}q=V>?_Ql3ik&dn3*gv!|t|r&^|B9Ri$-0r@0; zkNf>x%m>J|3UH5DezzazVzb0thY$bE^^bG01#S=RAmIFG?J@B*;`_N6@q65nzs`St zXZZe^F?YZi`cH;|5Wx6aS^94n{(ol7A28nhC*yoCz?gqP^KTe|e`YKkFb4mVanT!K zOy^_%4I}u^jKu@Sz<)9}Kru}FMHddH`4G{dr zIkb&uP~kg3@QmA>q}88EKp=ekRqzaw(P$PtBX9rT7^_-88LW6;w6%2gph8epJw0t9 z5ts;sLeTD+@0h1Jg0TXf-7APm+ z;VGe05DEoH2Mn-8P|w2&B?UEeao>ZYT!tqW8^SNvmrWjL^o+)4Ga3kXE?4&MkIR4fpH0@BvdQ&Mr#*xTFN z-Q5NHpR&LIQ^hO5(}9b zsRJPLyosNSzmpEeKQ{Gq^8ZBvd<5*07U28;S%`}Ma^ZhA^6MFq6yQh>4&ooJf%EZa z1pcvS;{5NM|9Or--vSrnNDmeR2EhfB-~#O7TcmkYz!YHTrx9TG01B-CZVk8zfPU~6 z(BlqG5f@-d3bBWm1PC2q99%d-RxnAG_a48p5C=+^BID}WL5 zA}n`&?W4{!`L5c<%rsg;?T39C5)U89o|^ zdzb)x1g)|*ge{6Q++u-u%nd(Wsn60JBB+rPrP<*2kxkuD(I^spLNisc3MM!;|_}#a2+m8kMSCt8NR-g~hzopok z5#h|{nXB@60XauCZ_UCu7L*E}rPr!`pO{C4U%YX?bb_4--oIK~=QH+j+;e^0HqyfM zP@+_<><1LZDbhm+wo<0*Zx2hlpE%X;o-UzPCV830 zTytU5pvN|Lqx{GU1Ha(T1T-Y9#d? zv(ByiC570t&(jv-T$^WVs%78}yg~H#V!jf0_Ef?h^ka1H-R&U8^^QM;st(_Z?ehz%dX zhYumgZsy)G71@>qcF{s)j_r&y8&!IyC*1>z)X9-UKILl{Z7Sq6=2Z3lHb~X?E+E8( zGxC01=f!JRjSJ4Gzj%(tT^8qGad_NgZY29?T=-#*WSa0U{5c2qD(FnSK#00PLc}o( z_EqtO5A(S)n;re#Nb$vi=;XUwItg*|T>Ots3v3-KI1=G^-Ga<+(Ow*E?&PUq?}X80 zpUKYFxOSPJz(Y!>N`3Upqdq22+}+_)2xZ&1zK45ariv)E#ZcHQAz5zzI!qRrshvx} z1a>mWG-um^8?i_FsVJI8Sg+gQo94G$@|QO1W8y+9%5E3_>KZ>*F~I#(Fjld!NZ>s2i%1xjAuu z_TQH7-f`V2zUdmbHio=EC25-SwJ$9%@PUu~x5X%d0R|{KlxOpcB(2Vka>y)kB*Xc- ztu2)drQVGt>$fucS=V&-EmTG`xAdE0JEHD%*jHwn32X(Yj+nd+cy5ya zlqiJ(h_eef|M`)kBFeUjO+(Uxsri?@4LL1Z4n8Xn?>+F&*eKHNAKIs%O1k4e^jI&#Uh*M7>d=&-v{SPXGbI}HftcZtlOIAQ27hD$wWL? z6?C$UIkWQj1)CU0P8bTcIYw_eFsm!*tqtj{I|)3|YVqbmGELgolF3U=(;$x_1n}EYgspwFDP;; zu+xCPUhfhk)eFPr4pp+1fypIxPp*bFCm%ipLV(xWT=```_U3!DVyt$ju;iqKzI*>p zj}y9BW)JlKx;gWwP9tz>4xcZHYd(ydM7@Y6CznSw;$|{}?;`t-^SC-xrBHJsbK|x_ zl+?CAu5;6ysxaD~Xb?`9JV0#DgdJO6w+PL%3q*E&DopbC8jMH=W7(XXMig6m1hl2E zp;j%~13SIioX~^uK2XtEo;s^Z$n6cT=@EL|vSLduS6<-}b@Kcyv^V^pb3xRfa{=2y zF4+A&7X&Z;$OVcgf6oO=$de6BwCOZQABIp$UwdErbmgr#@mg( z*~r2Die;RO?8%2oe9bdf?uuGGd1&XjxlX512E(;SlKotk?9^B*nI5!*a7yoku%fnc zLIZL)dWsAF4e5wVWFvpxJV!424avF0BQU6YbiTcQXQ(%QE7=1XL@M*6Ma*ff_OlN; z?lVz7oUpHqCN$^28h4d|B{F%O&Yj|@F+0`!J|>^;TO_0Kr7IB7Z zm6suYLW9BB?-23l$wr7H!UL1Z`6_Rz-@_eyBk-BF!Omx;72?lUvVwM@SQeFph$XXJ;XPu)Gy%vo7Y=oZ$U&$hc92>q$9nW*07`tk}a zXc?g-k*0lYd);NToNZhkrXHQgLqpQyTJrj~5^-XEz-yuPFpEO|0IgMUU)~dCym*#oacp+=bBXJj1sUF+;TmI#Y#$eyv}JZ%u=k#d#l`+b*?X1 z5amey2@^v3+`{aO8>>Sy6@)fE4w7)EvgtX`xAmU7%#S0cdKzyLt}R;kIhLSU%xLn# zr4PP2J2VJn7>+U6n=nIGpT}`TUe44C@)h+tTm9?t%E|Wj@xsgB`*T# zBY?0Ua~_Bna9}KYjOymvPJXCbs`+3N~24wy{*%9yD*C*qkOJUDr) z;g)vQxno)oqbCAEz9SY9$ICeHHMoK#61xQAbWdN^vC7o$GwT@2?vXX>x>(N~MI z>U_znnex&ZS=&2>wzpfjlYKN|T5De}yQN8`OOp<<#a?O&dZ`zB9~OMuXg$hYOA}$P ze3iFM_ALaw!RA4LCcN5|)MXx^{{(hU{A**DXvtZY3Z_bDvh4B;H4X#NYg-go8&oJS zmSzPfb=jz~WQLl@P!A~|`3zG-stlH0*v|2;eT06bvq~$y7dy7W$(eWpy#BQQn@xs# z4EQR!NZq1|tIl=1rkMd_*y2hY47sEIluB?<>0M%?wxeo&E0cY&9EhaKgR%0;3FjLI zQRc%O$qjzbHd2gF7vFw+vdiO^_LK{gHT-AeI|OmC1Qq(1 zypaz?!H;5c&=a&8m>iSYd|4qwfAOk4qv3LOCd*yy z*m&Z|1ZPcwD+olRy=*B1-#_PqxIgED;|IB5=l5I?p70|VoO$(oF36BRm-c`s;#I1X z8bxkvHCY{jQNv2#r=nRdG+aJNaJQf>J8RE|H=Luowclh}n8|1^dEs`hSjbcLF0r?M zwAZ-js#+3C3v2@SC_Lo%x?1!ki=yL|g#ky>&SQzhGG5D+RlHKoJ?uOO)K>GTQ zX2o5)^hxC1F%~A>${WL1o(VJ?1r3sXh+m*wY6|c(xCb-B?C*x9dmuNfta@YLWJ+K! zdAY1*Gzr*H1bUR|Rx)PY?tiN}zs}Fp%a>v>%%DSNYCXWGI;?scMU6m=b|5|IADvE; zxqP)++TZb{x5mP28l#0bMJ}SHC5SW$EtivO3x$Tp9Ucpaje>Wp# z@_ZW==462V9m4i`ok#guy%n;r-xfD7D!+z&Y5H0?prNFx9_s~7Dpcvx7?5Z*Q!W){ zm%J8Z-_AE9A9F97gl2*xf99Fi9%UhJLG>XBY(3{fd=|-{tbi3A~h+Nh+a>c)xBEe)iTr4!7^5;z=26<2*NCV++^&mk(8YFbQ3H}Ed$Lfq%h zF5Rcz!vPVeG!o26bMAC+S~|+8DtB4>?ze>s)WE~iz-VJ}omRqA*>_9|#+=ejJ)gdp z+@kF@$w2axKwi+Ey&-$IzJunRpqh0F?vYMYg#A?7UB-BK@~wqD-o-jv!O`w*k3PPx zs%!Kst^~{Xc1sqzs(d?QZu(cv&_CLfRT9fnFkZP(YrnAHIYpNi`@E6PB`5FU-2Ith zhfnF)w(yc08=^b(F80q{1?MD72UO=y5Pe#@KuvL>ZiaW0T1Saacj{D3t={oeQ$u;P z&mZkr{TbIP6T&z_td)g zZv5%+UO(>h`CU`~DUK)X;Z=@p-XMMr;ewko4h{3PhS(CHeCv<{M!m)z zXVbg~y1(8j?oqOHDxlfA=X9Rjlcz@1NcrmPP*h@-Ger>bL?O!ai)Gzvjq`F%#`;sz zX+^6nu*KdW!>OGakG{4m+$K65@vX`Ovr!3|jy4!F?|J+75L&XCkaD4U(>C|}#wtDw3ue~q5zA*WuT*$b$9c4?jOpq{ zudEl{pjxbITA-USbu)D*VP(80yCut2#>UY0xVc_-%>fdze zC(j1JpOTte78HxU=cHhk<|~JisH=$FY;E#O*KhX(lz!~(yULTp+U_iwXZyQHncol@ z5*r?C1~;xcH8!WXyc2fksI0#_-`Mu@K(Hrv*gquNg*WwtDByilQ@W^FCT|*m_aFaX z*{`*kWFQXmz%zXNRSs)24Se`GcqaYGiQnr$(+FaJ6`(e=K4<@199aLV&HTzp`7?w6 zfRXrjM(`5A_%i)_-u)Lw>Yo{>4j2i4XM`sJjCm~2f8$5mpBVxNjQGDZVq^fu%t+vW z!$|)#9l$az`MN{wZ1u?k9U(fztO|x-vbikL(KzLvO z^PY16KL7hYk7mdFT$266=ffIg%=D2rk#0a-*N0ht^I7n(dmj2dfcUsR02r3Q7~g)C zgScL9fE~ou;RoaJyA#1LC^5{5!*w11Ji++F>}Ppd7lF6cm(nbvaH$IqVLK zgTO<4bJWZi`0WyE&aw2I*ir-0l0Y@uK)l5 literal 0 HcmV?d00001 diff --git a/testing/btest/scripts/base/protocols/http/flip-content-line-orig.zeek b/testing/btest/scripts/base/protocols/http/flip-content-line-orig.zeek new file mode 100644 index 0000000000..320462fbd4 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/flip-content-line-orig.zeek @@ -0,0 +1,19 @@ +# @TEST-DOC: Flipping roles of a HTTP connection didn't flip the content line analyzers, resulting in inconsistent deliveries. Regression test for #3789 + +# Pcap contains a POST of the Zeek logo, expecting SHA1 1991cedee47909e324ac1b8bee2020d5690891e1 in files.log +# @TEST-EXEC: zeek -b -r $TRACES/http/zeek-image-post-1080-8000-x.pcap %INPUT +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p history service < conn.log > conn.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg < http.log > http.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 < files.log > files.log.cut +# @TEST-EXEC: btest-diff conn.log.cut +# @TEST-EXEC: btest-diff http.log.cut +# @TEST-EXEC: btest-diff files.log.cut + +@load base/protocols/conn +@load base/protocols/http +@load base/files/hash + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_SHA1); + } diff --git a/testing/btest/scripts/base/protocols/http/flip-content-line-resp.zeek b/testing/btest/scripts/base/protocols/http/flip-content-line-resp.zeek new file mode 100644 index 0000000000..4123a34321 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/flip-content-line-resp.zeek @@ -0,0 +1,19 @@ +# @TEST-DOC: Flipping roles of a HTTP connection didn't flip the content line analyzers, resulting in inconsistent deliveries. Regression test for #3789 + +# Pcap contains a download of the Zeek logo, expecting SHA1 1991cedee47909e324ac1b8bee2020d5690891e1 in files.log +# @TEST-EXEC: zeek -b -r $TRACES/http/zeek-image-1080-80-x.pcap %INPUT +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p history service < conn.log > conn.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg < http.log > http.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 < files.log > files.log.cut +# @TEST-EXEC: btest-diff conn.log.cut +# @TEST-EXEC: btest-diff http.log.cut +# @TEST-EXEC: btest-diff files.log.cut + +@load base/protocols/conn +@load base/protocols/http +@load base/files/hash + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_SHA1); + } From 64ea0af04d25f992745f02002cb658bf8779943f Mon Sep 17 00:00:00 2001 From: Benjamin Bannier Date: Thu, 4 Jul 2024 14:25:18 +0200 Subject: [PATCH 65/89] Bump auxil/spicy to latest development snapshot --- auxil/spicy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/spicy b/auxil/spicy index b8299609eb..90f8281322 160000 --- a/auxil/spicy +++ b/auxil/spicy @@ -1 +1 @@ -Subproject commit b8299609eb579722d1ba9b261f00660f5cc84e10 +Subproject commit 90f8281322a13c52a270b50764cbc85633c5b74e From 036ed95a3adec89f77f490112d867de7a0ca439f Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Thu, 4 Jul 2024 13:38:49 +0200 Subject: [PATCH 66/89] ci/macos: Only use sonoma image --- .cirrus.yml | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index ab50a7bc73..1323ce8e95 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -338,18 +338,15 @@ alpine_task: << : *RESOURCES_TEMPLATE << : *CI_TEMPLATE -# Apple doesn't publish official long-term support timelines. -# We aim to support both the current and previous macOS release. +# Cirrus only supports the following macos runner currently, selecting +# anything else automatically upgrades to this one. +# +# ghcr.io/cirruslabs/macos-runner:sonoma +# +# See also: https://cirrus-ci.org/guide/macOS/ macos_sonoma_task: macos_instance: - image: ghcr.io/cirruslabs/macos-sonoma-xcode:latest - prepare_script: ./ci/macos/prepare.sh - << : *CI_TEMPLATE - << : *MACOS_ENVIRONMENT - -macos_ventura_task: - macos_instance: - image: ghcr.io/cirruslabs/macos-ventura-base:latest + image: ghcr.io/cirruslabs/macos-runner:sonoma prepare_script: ./ci/macos/prepare.sh << : *CI_TEMPLATE << : *MACOS_ENVIRONMENT From 36dfe89b59c17e42d86c80699079160f5636560e Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Thu, 4 Jul 2024 15:10:26 +0200 Subject: [PATCH 67/89] CMakeLists: Ensure Threads::Threads target exists Fix failure on OSX with CMake 3.30 complaining about missing Threads::Threads target. --- CMakeLists.txt | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 13ea5a5fe2..c4c7aa9990 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -306,6 +306,9 @@ function (zeek_add_dependencies dep) endforeach () endfunction () +# Used by library zeek_dynamic_plugin_base and for sanitizer builds. +find_package(Threads REQUIRED) + # Interface library for propagating extra flags and include paths to dynamically # loaded plugins. Also propagates include paths and C++17 mode on the install # interface. @@ -640,11 +643,6 @@ if (NOT BINARY_PACKAGING_MODE) endif () if (ZEEK_SANITIZERS) - # Check the thread library info early as setting compiler flags seems to - # interfere with the detection and cause CMAKE_THREAD_LIBS_INIT to not include - # -lpthread when it should. - find_package(Threads) - string(REPLACE "," " " _sanitizer_args "${ZEEK_SANITIZERS}") separate_arguments(_sanitizer_args) set(ZEEK_SANITIZERS "") From b6be7df5408cd0e11a5202e8dac47afbdc796400 Mon Sep 17 00:00:00 2001 From: Benjamin Bannier Date: Thu, 4 Jul 2024 16:16:34 +0200 Subject: [PATCH 68/89] Bump auxil/zeek-aux --- auxil/zeek-aux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/zeek-aux b/auxil/zeek-aux index 338ece9314..d4d091b014 160000 --- a/auxil/zeek-aux +++ b/auxil/zeek-aux @@ -1 +1 @@ -Subproject commit 338ece93146c594f497f5fd370fd5dde23186d2e +Subproject commit d4d091b014671df468a51ba8161ed3e8e2001e1c From a53e28be870c025e12bf57a83660f43edc8d3ab2 Mon Sep 17 00:00:00 2001 From: Vern Paxson Date: Sat, 6 Jul 2024 16:44:27 -0700 Subject: [PATCH 69/89] minor script optimization updates to reflect recent changes, Coverity findings --- src/script_opt/Expr.cc | 14 +++++++------- src/script_opt/Expr.h | 8 +++++--- src/script_opt/FuncInfo.cc | 1 - .../btest/Baseline.zam/bifs.from_json-5/.stderr | 2 +- .../btest/Baseline.zam/bifs.from_json-6/.stderr | 3 ++- .../btest/Baseline.zam/bifs.from_json-6/.stdout | 2 ++ testing/btest/Baseline.zam/bifs.from_json/.stdout | 2 ++ .../core.file-analyzer-violation/.stderr | 2 ++ .../core.file-analyzer-violation/files.log | 1 + .../btest/Baseline.zam/opt.ZAM-bif-tracking/output | 2 +- testing/btest/language/record-chain-assign.zeek | 1 + testing/btest/opt/ZAM-bif-tracking.zeek | 1 - 12 files changed, 24 insertions(+), 15 deletions(-) create mode 100644 testing/btest/Baseline.zam/core.file-analyzer-violation/.stderr diff --git a/src/script_opt/Expr.cc b/src/script_opt/Expr.cc index 5e6f0dcc40..1ad24c5277 100644 --- a/src/script_opt/Expr.cc +++ b/src/script_opt/Expr.cc @@ -2858,7 +2858,7 @@ static NameExprPtr get_RFU_LHS_var(const Stmt* s) { auto s_e = s->AsExprStmt()->StmtExpr(); auto var = s_e->GetOp1()->GetOp1()->GetOp1(); ASSERT(var->Tag() == EXPR_NAME); - return cast_intrusive(var); + return cast_intrusive(std::move(var)); } // This one mines out the RHS, so 'y' for "x$foo = y$bar", or for @@ -2874,7 +2874,7 @@ static NameExprPtr get_RFU_RHS_var(const Stmt* s) { var = rhs->GetOp2()->GetOp1(); ASSERT(var->Tag() == EXPR_NAME); - return cast_intrusive(var); + return cast_intrusive(std::move(var)); } RecordFieldUpdatesExpr::RecordFieldUpdatesExpr(ExprTag t, const std::vector& stmts, @@ -2952,7 +2952,7 @@ ExprPtr RecordFieldUpdatesExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { ExprPtr AssignRecordFieldsExpr::Duplicate() { auto e1 = op1->Duplicate(); auto e2 = op2->Duplicate(); - return SetSucc(new AssignRecordFieldsExpr(e1, e2, lhs_map, rhs_map)); + return SetSucc(new AssignRecordFieldsExpr(std::move(e1), std::move(e2), lhs_map, rhs_map)); } void AssignRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const { @@ -3027,13 +3027,13 @@ FieldExprPtr ConstructFromRecordExpr::FindRecordSource(const Expr* const_e) { if ( rhs_rec->Tag() != EXPR_NAME ) return nullptr; - return cast_intrusive(fa_rhs); + return cast_intrusive(std::move(fa_rhs)); } ExprPtr ConstructFromRecordExpr::Duplicate() { auto e1 = op1->Duplicate(); auto e2 = op2->Duplicate(); - return SetSucc(new ConstructFromRecordExpr(e1, e2, lhs_map, rhs_map)); + return SetSucc(new ConstructFromRecordExpr(std::move(e1), std::move(e2), lhs_map, rhs_map)); } bool ConstructFromRecordExpr::IsReduced(Reducer* c) const { return op1->HasReducedOps(c) && op2->IsReduced(c); } @@ -3066,7 +3066,7 @@ ExprPtr ConstructFromRecordExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { ExprPtr AddRecordFieldsExpr::Duplicate() { auto e1 = op1->Duplicate(); auto e2 = op2->Duplicate(); - return SetSucc(new AddRecordFieldsExpr(e1, e2, lhs_map, rhs_map)); + return SetSucc(new AddRecordFieldsExpr(std::move(e1), std::move(e2), lhs_map, rhs_map)); } void AddRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const { @@ -3084,7 +3084,7 @@ void AddRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) co auto sum = add_expr->Eval(nullptr); ASSERT(sum); - rv1->Assign(lhs_map[i], sum); + rv1->Assign(lhs_map[i], std::move(sum)); } CoerceToAnyExpr::CoerceToAnyExpr(ExprPtr arg_op) : UnaryExpr(EXPR_TO_ANY_COERCE, std::move(arg_op)) { diff --git a/src/script_opt/Expr.h b/src/script_opt/Expr.h index 31ebeaf2b7..f1b8427319 100644 --- a/src/script_opt/Expr.h +++ b/src/script_opt/Expr.h @@ -143,7 +143,8 @@ public: protected: // Used for duplicating. AssignRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) - : RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, e1, e2, _lhs_map, _rhs_map) {} + : RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, std::move(e1), std::move(e2), std::move(_lhs_map), + std::move(_rhs_map)) {} void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override; }; @@ -170,7 +171,7 @@ public: protected: ConstructFromRecordExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) - : AssignRecordFieldsExpr(e1, e2, _lhs_map, _rhs_map) { + : AssignRecordFieldsExpr(std::move(e1), std::move(e2), std::move(_lhs_map), std::move(_rhs_map)) { tag = EXPR_REC_CONSTRUCT_WITH_REC; } @@ -191,7 +192,8 @@ public: protected: AddRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) - : RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, e1, e2, _lhs_map, _rhs_map) {} + : RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, std::move(e1), std::move(e2), std::move(_lhs_map), + std::move(_rhs_map)) {} void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override; }; diff --git a/src/script_opt/FuncInfo.cc b/src/script_opt/FuncInfo.cc index ed2ec6466f..8b6314dd4d 100644 --- a/src/script_opt/FuncInfo.cc +++ b/src/script_opt/FuncInfo.cc @@ -142,7 +142,6 @@ static std::unordered_map func_attrs = { {"Spicy::__toggle_analyzer", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"Supervisor::__create", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"Supervisor::__destroy", ATTR_NO_SCRIPT_SIDE_EFFECTS}, - {"Supervisor::__init_cluster", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"Supervisor::__is_supervised", ATTR_IDEMPOTENT}, {"Supervisor::__is_supervisor", ATTR_IDEMPOTENT}, {"Supervisor::__node", ATTR_IDEMPOTENT}, diff --git a/testing/btest/Baseline.zam/bifs.from_json-5/.stderr b/testing/btest/Baseline.zam/bifs.from_json-5/.stderr index ed0056356c..d7ed338df6 100644 --- a/testing/btest/Baseline.zam/bifs.from_json-5/.stderr +++ b/testing/btest/Baseline.zam/bifs.from_json-5/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 4: tables are not supported (from_json([], ::#0, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 4: cannot convert JSON type 'array' to Zeek type 'table' (from_json([], ::#0, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline.zam/bifs.from_json-6/.stderr b/testing/btest/Baseline.zam/bifs.from_json-6/.stderr index 7a7c048f3c..b1ddbdd07a 100644 --- a/testing/btest/Baseline.zam/bifs.from_json-6/.stderr +++ b/testing/btest/Baseline.zam/bifs.from_json-6/.stderr @@ -1,2 +1,3 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 5: wrong port format, must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", ::#0, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 8: wrong port format, string must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", ::#2, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 9: wrong port format, object must have 'port' and 'proto' members (from_json({}, ::#4, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline.zam/bifs.from_json-6/.stdout b/testing/btest/Baseline.zam/bifs.from_json-6/.stdout index aee95c8a8e..a4da3aa3e4 100644 --- a/testing/btest/Baseline.zam/bifs.from_json-6/.stdout +++ b/testing/btest/Baseline.zam/bifs.from_json-6/.stdout @@ -1,2 +1,4 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +[v=80/tcp, valid=T] +[v=, valid=F] [v=, valid=F] diff --git a/testing/btest/Baseline.zam/bifs.from_json/.stdout b/testing/btest/Baseline.zam/bifs.from_json/.stdout index 24f35f7b9b..584cb2ca60 100644 --- a/testing/btest/Baseline.zam/bifs.from_json/.stdout +++ b/testing/btest/Baseline.zam/bifs.from_json/.stdout @@ -5,4 +5,6 @@ aa:bb::/32, }, se={ [192.168.0.1, 80/tcp] , [2001:db8::1, 8080/udp] +}, tbl={ +[192.168.0.1, 80/tcp] = foo }], valid=T] diff --git a/testing/btest/Baseline.zam/core.file-analyzer-violation/.stderr b/testing/btest/Baseline.zam/core.file-analyzer-violation/.stderr new file mode 100644 index 0000000000..e3f6131b1d --- /dev/null +++ b/testing/btest/Baseline.zam/core.file-analyzer-violation/.stderr @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +received termination signal diff --git a/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log b/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log index abef87d6a6..3b26732673 100644 --- a/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log +++ b/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log @@ -8,3 +8,4 @@ #fields ts fuid uid id.orig_h id.orig_p id.resp_h id.resp_p source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid #types time string string addr port addr port string count set[string] string string interval bool bool count count count count bool string XXXXXXXXXX.XXXXXX FKPuH630Tmj6UQUMP7 - - - - - ./myfile.exe 0 PE application/x-dosexec - 0.000000 - - 64 - 0 0 F - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output b/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output index 542c7fe5c1..795f0f84d7 100644 --- a/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output +++ b/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -534 seen BiFs, 0 unseen BiFs (), 0 new BiFs () +533 seen BiFs, 0 unseen BiFs (), 0 new BiFs () diff --git a/testing/btest/language/record-chain-assign.zeek b/testing/btest/language/record-chain-assign.zeek index acccacc7cc..ac896760e6 100644 --- a/testing/btest/language/record-chain-assign.zeek +++ b/testing/btest/language/record-chain-assign.zeek @@ -1,5 +1,6 @@ # @TEST-DOC: Test for correct ZAM optimization of record "chains". # +# @TEST-REQUIRES: test "${ZEEK_USE_CPP}" != "1" # @TEST-EXEC: zeek -b -O ZAM %INPUT >output # @TEST-EXEC: btest-diff output diff --git a/testing/btest/opt/ZAM-bif-tracking.zeek b/testing/btest/opt/ZAM-bif-tracking.zeek index a38bcc5e6d..b6c027f8b1 100644 --- a/testing/btest/opt/ZAM-bif-tracking.zeek +++ b/testing/btest/opt/ZAM-bif-tracking.zeek @@ -170,7 +170,6 @@ global known_BiFs = set( "Spicy::__toggle_analyzer", "Supervisor::__create", "Supervisor::__destroy", - "Supervisor::__init_cluster", "Supervisor::__is_supervised", "Supervisor::__is_supervisor", "Supervisor::__node", From 74ab980992b9c6e3e17b85ca568e78a2335df6ef Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Mon, 8 Jul 2024 12:56:44 -0700 Subject: [PATCH 70/89] Update submodules [nomail] --- auxil/bifcl | 2 +- auxil/binpac | 2 +- auxil/broker | 2 +- auxil/btest | 2 +- auxil/gen-zam | 2 +- auxil/package-manager | 2 +- auxil/zeek-aux | 2 +- auxil/zeek-client | 2 +- auxil/zeekctl | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/auxil/bifcl b/auxil/bifcl index 22c0317628..fd83a78984 160000 --- a/auxil/bifcl +++ b/auxil/bifcl @@ -1 +1 @@ -Subproject commit 22c031762832b72c2f7b4ac8bbe8102d66b09ccc +Subproject commit fd83a789848b485c81f28b8a6af23d28eca7b3c7 diff --git a/auxil/binpac b/auxil/binpac index 822cdb551b..7db629d4e2 160000 --- a/auxil/binpac +++ b/auxil/binpac @@ -1 +1 @@ -Subproject commit 822cdb551bf6c2e7c18f16c7f088c61675ae588b +Subproject commit 7db629d4e2f8128e3e27aa28200106fa6d553be0 diff --git a/auxil/broker b/auxil/broker index c529c38de3..c47de11e4b 160000 --- a/auxil/broker +++ b/auxil/broker @@ -1 +1 @@ -Subproject commit c529c38de3d540953e799a83c44683a3413a1a14 +Subproject commit c47de11e4b84f24e8b501c3b1a446ad808e4964a diff --git a/auxil/btest b/auxil/btest index 46f982cd6f..989c7513c3 160000 --- a/auxil/btest +++ b/auxil/btest @@ -1 +1 @@ -Subproject commit 46f982cd6fafd34639c2f97628a57f1457f7e56a +Subproject commit 989c7513c3b6056a429a5d48dacdc9a2c1b216a7 diff --git a/auxil/gen-zam b/auxil/gen-zam index 376de10133..396723c04b 160000 --- a/auxil/gen-zam +++ b/auxil/gen-zam @@ -1 +1 @@ -Subproject commit 376de10133c100948a2875258d11ab97b361467c +Subproject commit 396723c04ba1f8f2f75555745a503b8edf353ff6 diff --git a/auxil/package-manager b/auxil/package-manager index ba0354c84f..bdc15fab95 160000 --- a/auxil/package-manager +++ b/auxil/package-manager @@ -1 +1 @@ -Subproject commit ba0354c84f8afb7804afe7d673081edfa712ad5c +Subproject commit bdc15fab95b1ca2bd370fa25d91f7879b5da35fc diff --git a/auxil/zeek-aux b/auxil/zeek-aux index d4d091b014..f44475b8ee 160000 --- a/auxil/zeek-aux +++ b/auxil/zeek-aux @@ -1 +1 @@ -Subproject commit d4d091b014671df468a51ba8161ed3e8e2001e1c +Subproject commit f44475b8ee5dac9c10eaa64ddcb357d4dc77098b diff --git a/auxil/zeek-client b/auxil/zeek-client index 6c8cb3e1c4..855b037b3f 160000 --- a/auxil/zeek-client +++ b/auxil/zeek-client @@ -1 +1 @@ -Subproject commit 6c8cb3e1c475424880eae968f812805fdbd95cea +Subproject commit 855b037b3f1ed6aefb7ef8674f4d71eef9679242 diff --git a/auxil/zeekctl b/auxil/zeekctl index 8775b9010c..7671450f34 160000 --- a/auxil/zeekctl +++ b/auxil/zeekctl @@ -1 +1 @@ -Subproject commit 8775b9010c545cfb1b1ebbae1541c6f567ab8d5b +Subproject commit 7671450f34c65259463b4fd651a18df3935f235c From 8dc120df1d4841e417dd41cbdba941abacf481b1 Mon Sep 17 00:00:00 2001 From: cknill Date: Mon, 8 Jul 2024 19:24:40 -0600 Subject: [PATCH 71/89] Fix for --display-cmake in configure Moved build directory creation further down in the script so that --display-cmake has a chance to happen before build tree setup. --- configure | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/configure b/configure index 40f052a220..b365b7dd2a 100755 --- a/configure +++ b/configure @@ -458,6 +458,19 @@ if [ -z "$CMakeCommand" ]; then fi fi +echo "Using $(cmake --version | head -1)" +echo +if [ -n "$CMakeGenerator" ]; then + cmake="${CMakeCommand} -G ${CMakeGenerator} ${CMakeCacheEntries} ${sourcedir}" +else + cmake="${CMakeCommand} ${CMakeCacheEntries} ${sourcedir}" +fi + +if [ "${display_cmake}" = 1 ]; then + echo "${cmake}" + exit 0 +fi + if [ -d $builddir ]; then # If build directory exists, check if it has a CMake cache if [ -f $builddir/CMakeCache.txt ]; then @@ -474,19 +487,6 @@ echo "Build Directory : $builddir" echo "Source Directory: $sourcedir" cd $builddir -echo "Using $(cmake --version | head -1)" -echo -if [ -n "$CMakeGenerator" ]; then - cmake="${CMakeCommand} -G ${CMakeGenerator} ${CMakeCacheEntries} ${sourcedir}" -else - cmake="${CMakeCommand} ${CMakeCacheEntries} ${sourcedir}" -fi - -if [ "${display_cmake}" = 1 ]; then - echo "${cmake}" - exit 0 -fi - eval ${cmake} 2>&1 echo "# This is the command used to configure this build" >config.status From 3ecacf4f509eb63ca807e8309514a5469b4a7433 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Thu, 4 Jul 2024 15:20:42 -0700 Subject: [PATCH 72/89] Comment-only tweaks for telemetry-related settings. These weren't quite accurate any more. --- scripts/base/frameworks/telemetry/options.zeek | 2 +- scripts/policy/frameworks/telemetry/prometheus.zeek | 4 ---- scripts/site/local.zeek | 4 ++-- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/scripts/base/frameworks/telemetry/options.zeek b/scripts/base/frameworks/telemetry/options.zeek index 9e5adf1fbb..6aa05f5f9b 100644 --- a/scripts/base/frameworks/telemetry/options.zeek +++ b/scripts/base/frameworks/telemetry/options.zeek @@ -11,7 +11,7 @@ export { const metrics_address = getenv("ZEEK_DEFAULT_LISTEN_ADDRESS") &redef; ## Port used to make metric data available to Prometheus scrapers via - ## HTTP. + ## HTTP. The default value means Zeek won't expose the port. const metrics_port = 0/unknown &redef; ## ID for the metrics exporter. This is used as the 'endpoint' label diff --git a/scripts/policy/frameworks/telemetry/prometheus.zeek b/scripts/policy/frameworks/telemetry/prometheus.zeek index b1d9374e8b..2b2ac4d255 100644 --- a/scripts/policy/frameworks/telemetry/prometheus.zeek +++ b/scripts/policy/frameworks/telemetry/prometheus.zeek @@ -2,10 +2,6 @@ ##! from the cluster node configuration for exporting data to ##! Prometheus. ##! -##! For customization or disabling, redef the involved Telemetry options -##! again. Specifically, to disable listening on port 9911, set -##! :zeek:see:`Telemetry::metrics_port` to `0/unknown` again. -##! ##! The manager node will also provide a ``/services.json`` endpoint ##! for the HTTP Service Discovery system in Prometheus to use for ##! configuration. This endpoint will include information for all of diff --git a/scripts/site/local.zeek b/scripts/site/local.zeek index 328c823975..71251c0cb1 100644 --- a/scripts/site/local.zeek +++ b/scripts/site/local.zeek @@ -94,8 +94,8 @@ redef digest_salt = "Please change this value."; # telemetry_histogram.log. @load frameworks/telemetry/log -# Enable metrics centralization on the manager. This opens port 9911/tcp -# on the manager node that can be readily scraped by Prometheus. +# Enable Prometheus metrics scraping in the cluster: each Zeek node will listen +# on the metrics port defined in its Cluster::nodes entry. # @load frameworks/telemetry/prometheus # Uncomment the following line to enable detection of the heartbleed attack. Enabling From 8eb74c04de4f2d042b41c9c52f658dfd34e7c7be Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Fri, 5 Jul 2024 17:15:33 -0700 Subject: [PATCH 73/89] Harden the telemetry manager against unset Telemetry::metrics_address We populate that address from the ZEEK_DEFAULT_LISTEN_ADDRESS environment variable, but weren't prepared for that not being set. We now fall back to 0.0.0.0. This may have the same IPv6 issues that we've encountered elsewhere when doing so before (v6 interfaces need "::") -- but this is still more likely to work than not having any string at all. --- src/telemetry/Manager.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index 1df895dea1..04c47ba3ef 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -36,6 +36,8 @@ void Manager::InitPostScript() { std::string prometheus_url; auto metrics_port = id::find_val("Telemetry::metrics_port")->AsPortVal(); auto metrics_address = id::find_val("Telemetry::metrics_address")->AsStringVal()->ToStdString(); + if ( metrics_address.empty() ) + metrics_address = "0.0.0.0"; if ( metrics_port->Port() != 0 ) prometheus_url = util::fmt("%s:%u", metrics_address.data(), metrics_port->Port()); From 563704a26ec8a48206b9cd75a1314ddea54bf597 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 1 Jul 2024 18:28:11 -0700 Subject: [PATCH 74/89] Management framework: add metrics port in management & Supervisor node records This allows setting a metrics port for creation in new nodes. --- scripts/base/frameworks/supervisor/api.zeek | 2 ++ scripts/policy/frameworks/management/types.zeek | 3 +++ 2 files changed, 5 insertions(+) diff --git a/scripts/base/frameworks/supervisor/api.zeek b/scripts/base/frameworks/supervisor/api.zeek index 97a286f8c9..e3a6d06c9b 100644 --- a/scripts/base/frameworks/supervisor/api.zeek +++ b/scripts/base/frameworks/supervisor/api.zeek @@ -30,6 +30,8 @@ export { ## The PCAP file name from which the node will read/analyze packets. ## Typically used by worker nodes. pcap_file: string &optional; + ## The TCP port at which the cluster node exposes metrics for Prometheus. + metrics_port: port &optional; }; ## Configuration options that influence behavior of a supervised Zeek node. diff --git a/scripts/policy/frameworks/management/types.zeek b/scripts/policy/frameworks/management/types.zeek index 796c943754..f12cc1d9e5 100644 --- a/scripts/policy/frameworks/management/types.zeek +++ b/scripts/policy/frameworks/management/types.zeek @@ -60,6 +60,7 @@ export { interface: string &optional; ##< Interface to sniff cpu_affinity: int &optional; ##< CPU/core number to pin to env: table[string] of string &default=table(); ##< Custom environment vars + metrics_port: port &optional; ##< Metrics exposure port, for Prometheus }; ## Data structure capturing a cluster's complete configuration. @@ -88,6 +89,8 @@ export { pid: int &optional; ## The node's Broker peering listening port, if any. p: port &optional; + ## The node's metrics port for Prometheus, if any. + metrics_port: port &optional; }; type NodeStatusVec: vector of NodeStatus; From fa6361af56c056ec7093a267c471b047a309d83f Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 1 Jul 2024 18:31:14 -0700 Subject: [PATCH 75/89] Management framework: propagate metrics port from agent This propagates the metrics port from the node config passed through the supervisor all the way into the script layer. --- scripts/base/frameworks/cluster/supervisor.zeek | 2 ++ scripts/policy/frameworks/management/agent/main.zeek | 12 +++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/base/frameworks/cluster/supervisor.zeek b/scripts/base/frameworks/cluster/supervisor.zeek index 4e6ec51fff..ba0d676c6a 100644 --- a/scripts/base/frameworks/cluster/supervisor.zeek +++ b/scripts/base/frameworks/cluster/supervisor.zeek @@ -49,6 +49,8 @@ function __init_cluster_nodes(): bool @pragma pop ignore-deprecations if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER ) cnode$manager = manager_name; + if ( endp?$metrics_port ) + cnode$metrics_port = endp$metrics_port; Cluster::nodes[node_name] = cnode; } diff --git a/scripts/policy/frameworks/management/agent/main.zeek b/scripts/policy/frameworks/management/agent/main.zeek index 7dbe963cf9..81ffe97252 100644 --- a/scripts/policy/frameworks/management/agent/main.zeek +++ b/scripts/policy/frameworks/management/agent/main.zeek @@ -520,6 +520,8 @@ function deploy_request_finish(areq: Management::Request::Request) if ( node?$interface ) cep$interface = node$interface; + if ( node?$metrics_port ) + cep$metrics_port = node$metrics_port; g_cluster[node$name] = cep; } @@ -618,7 +620,8 @@ function get_nodes_request_finish(areq: Management::Request::Request) # the respective boot.zeek scripts). if ( node in sns$node$cluster ) { - cns$cluster_role = sns$node$cluster[node]$role; + local cep: Supervisor::ClusterEndpoint = sns$node$cluster[node]; + cns$cluster_role = cep$role; # For cluster nodes, copy run state from g_nodes, our # live node status table. @@ -627,8 +630,11 @@ function get_nodes_request_finish(areq: Management::Request::Request) # The supervisor's responses use 0/unknown to indicate # unused ports. (Prior to Zeek 7 this used to be 0/tcp.) - if ( sns$node$cluster[node]$p != 0/unknown ) - cns$p = sns$node$cluster[node]$p; + if ( cep$p != 0/unknown ) + cns$p = cep$p; + + if ( cep?$metrics_port ) + cns$metrics_port = cep$metrics_port; } else { From 742f7fe34028c13732c69b43e8f9743ff27ddc3c Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 1 Jul 2024 18:32:05 -0700 Subject: [PATCH 76/89] Management framework: add auto-enumeration of metrics ports This is quite redundant with the enumeration for Broker ports, unfortunately. But the logic is subtly different: all nodes obtain a telemetry port, while not all nodes require a Broker port, for example, and in the metrics port assignment we also cross-check selected Broker ports. I found more unified code actually harder to read in the end. The logic for the two sets remains the same: from a start point, ports get enumerated sequentially that aren't otherwise taken. These ports are assumed available; there's nothing that checks their availability -- for now. The default start port is 9000. I considered 9090, to align with the Prometheus default, but counting upward from there is likely to hit trouble with the Broker default ports (9999/9997), used by the Supervisor. Counting downward is a bit unnatural, and shifting the Broker default ports brings subtle ordering issues. This also changes the node ordering logic slightly since it seems more intuitive to keep sequential ports on a given instance, instead of striping across them. --- .../management/controller/config.zeek | 27 ++- .../management/controller/main.zeek | 208 ++++++++++++++---- 2 files changed, 181 insertions(+), 54 deletions(-) diff --git a/scripts/policy/frameworks/management/controller/config.zeek b/scripts/policy/frameworks/management/controller/config.zeek index 9f96155d9f..0b3d50b4ab 100644 --- a/scripts/policy/frameworks/management/controller/config.zeek +++ b/scripts/policy/frameworks/management/controller/config.zeek @@ -61,16 +61,27 @@ export { ## for websocket clients. const default_port_websocket = 2149/tcp &redef; - ## Whether the controller should auto-assign listening ports to cluster - ## nodes that need them and don't have them explicitly specified in - ## cluster configurations. - const auto_assign_ports = T &redef; + ## Whether the controller should auto-assign Broker listening ports to + ## cluster nodes that need them and don't have them explicitly specified + ## in cluster configurations. + const auto_assign_broker_ports = T &redef; + const auto_assign_ports = T &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_ports."; ## The TCP start port to use for auto-assigning cluster node listening - ## ports, if :zeek:see:`Management::Controller::auto_assign_ports` is - ## enabled (the default) and the provided configurations don't have - ## ports assigned. - const auto_assign_start_port = 2200/tcp &redef; + ## ports, if :zeek:see:`Management::Controller::auto_assign_broker_ports` is + ## enabled (the default) and nodes don't come with those ports assigned. + const auto_assign_broker_start_port = 2200/tcp &redef; + const auto_assign_start_port = 2200/tcp &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_start_port."; + + ## Whether the controller should auto-assign metrics ports for Prometheus + ## to nodes that need them and don't have them explicitly specified in + ## their cluster configurations. + const auto_assign_metrics_ports = T &redef; + + ## The TCP start port to use for auto-assigning metrics exposition ports + ## for Prometheus, if :zeek:see:`Management::Controller::auto_assign_metrics_ports` + ## is enabled (the default). + const auto_assign_metrics_start_port = 9000/tcp &redef; ## The controller's Broker topic. Clients send requests to this topic. const topic = "zeek/management/controller" &redef; diff --git a/scripts/policy/frameworks/management/controller/main.zeek b/scripts/policy/frameworks/management/controller/main.zeek index 1cfd5e6880..455f2dc6b3 100644 --- a/scripts/policy/frameworks/management/controller/main.zeek +++ b/scripts/policy/frameworks/management/controller/main.zeek @@ -116,14 +116,18 @@ global config_deploy_to_agents: function(config: Management::Configuration, req: Management::Request::Request); # Returns list of names of nodes in the given configuration that require a -# listening port. Returns empty list if the config has no such nodes. -global config_nodes_lacking_ports: function(config: Management::Configuration): vector of string; +# Broker listening port. Returns empty list if the config has no such nodes. +global config_nodes_lacking_broker_ports: function(config: Management::Configuration): vector of string; # Assign node listening ports in the given configuration by counting up from -# Management::Controller::auto_assign_start_port. Scans the included nodes and -# fills in ports for any non-worker cluster node that doesn't have an existing -# port. This assumes those ports are actually available on the instances. -global config_assign_ports: function(config: Management::Configuration); +# Management::Controller::auto_assign_broker_start_port. Scans the included +# nodes and fills in ports for any non-worker cluster node that doesn't have an +# existing port. This assumes those ports are actually available on the +# instances. +global config_assign_broker_ports: function(config: Management::Configuration); + +# Assign node metrics ports, similar to config_assign_broker_ports above. +global config_assign_metrics_ports: function(config: Management::Configuration); # Validate the given configuration in terms of missing, incorrect, or # conflicting content. Returns T if validation succeeds, F otherwise. The @@ -265,7 +269,7 @@ function drop_instance(inst: Management::Instance) Management::Log::info(fmt("dropped instance %s", inst$name)); } -function config_nodes_lacking_ports(config: Management::Configuration): vector of string +function config_nodes_lacking_broker_ports(config: Management::Configuration): vector of string { local res: vector of string; local roles = { Supervisor::MANAGER, Supervisor::LOGGER, Supervisor::PROXY }; @@ -279,7 +283,23 @@ function config_nodes_lacking_ports(config: Management::Configuration): vector o return sort(res, strcmp); } -function config_assign_ports(config: Management::Configuration) +# A comparison function for nodes. This first compares according to the node's +# agent/instance, then by role priority, and finally by node name. This yields a +# sequence in which ports remain sequential to a node, and to roles on that +# node. +function config_nodes_compare(n1: Management::Node, n2: Management::Node, roles: table[Supervisor::ClusterRole] of count): int + { + local instcmp = strcmp(n1$instance, n2$instance); + if ( instcmp != 0 ) + return instcmp; + if ( roles[n1$role] < roles[n2$role] ) + return -1; + if ( roles[n1$role] > roles[n2$role] ) + return 1; + return strcmp(n1$name, n2$name); + } + +function config_assign_broker_ports(config: Management::Configuration) { # We're changing nodes in the configuration's set, so need to rebuild it: local new_nodes: set[Management::Node]; @@ -295,7 +315,15 @@ function config_assign_ports(config: Management::Configuration) # not per-instance: if the user wants auto-assignment, it seems better # to avoid confusion with the same port being used on multiple # instances. - local p = port_to_count(Management::Controller::auto_assign_start_port); + local start_port = Management::Controller::auto_assign_broker_start_port; + +@pragma push ignore-deprecations + # Keep deprecated config setting working until 7.1: + if ( Management::Controller::auto_assign_start_port != 2200/tcp ) + start_port = Management::Controller::auto_assign_start_port; +@pragma pop ignore-deprecations + + local p = port_to_count(start_port); # A set that tracks the ports we've used so far. Helpful for avoiding # collisions between manually specified and auto-enumerated ports. @@ -327,11 +355,9 @@ function config_assign_ports(config: Management::Configuration) add new_nodes[node]; } - # Now process the ones that may need ports, in order. We first sort by - # roles; we go manager -> logger -> proxy. Next are instance names, to - # get locally sequential ports among the same roles, and finally by - # name. + # Now process the ones that need ports, in order. local nodes: vector of Management::Node; + for ( node in config$nodes ) { if ( node?$p ) @@ -342,16 +368,7 @@ function config_assign_ports(config: Management::Configuration) } sort(nodes, function [roles] (n1: Management::Node, n2: Management::Node): int - { - if ( roles[n1$role] < roles[n2$role] ) - return -1; - if ( roles[n1$role] > roles[n2$role] ) - return 1; - local instcmp = strcmp(n1$instance, n2$instance); - if ( instcmp != 0 ) - return instcmp; - return strcmp(n1$name, n2$name); - }); + { return config_nodes_compare(n1, n2, roles); }); for ( i in nodes ) { @@ -372,6 +389,87 @@ function config_assign_ports(config: Management::Configuration) config$nodes = new_nodes; } +function config_assign_metrics_ports(config: Management::Configuration) + { + # We're changing nodes in the configuration's set, so need to rebuild it: + local new_nodes: set[Management::Node]; + + # An ordering of nodes by role: + local roles: table[Supervisor::ClusterRole] of count = { + [Supervisor::MANAGER] = 0, + [Supervisor::LOGGER] = 1, + [Supervisor::PROXY] = 2, + [Supervisor::WORKER] = 3, + }; + + local p = port_to_count(Management::Controller::auto_assign_metrics_start_port); + local ports_set: set[count]; + local node: Management::Node; + + # Pre-populate agents ports, if we have them: + for ( inst in config$instances ) + { + if ( inst?$listen_port ) + add ports_set[port_to_count(inst$listen_port)]; + } + + # Pre-populate nodes with pre-defined metrics ports, as well + # as their Broker ports: + for ( node in config$nodes ) + { + if ( node?$p ) + add ports_set[port_to_count(node$p)]; + + if ( node?$metrics_port ) + { + add ports_set[port_to_count(node$metrics_port)]; + add new_nodes[node]; + } + } + + # Copy any nodes to the new set that have roles we don't care about. + # (This should be none right now given that every cluster node can have + # a metrics port.) + for ( node in config$nodes ) + { + if ( node$role !in roles ) + add new_nodes[node]; + } + + # Now process the ones that need ports, in order. + local nodes: vector of Management::Node; + + for ( node in config$nodes ) + { + if ( node?$metrics_port ) + next; + if ( node$role !in roles ) + next; + nodes += node; + } + + sort(nodes, function [roles] (n1: Management::Node, n2: Management::Node): int + { return config_nodes_compare(n1, n2, roles); }); + + for ( i in nodes ) + { + node = nodes[i]; + + # Find next available port ... + while ( p in ports_set ) + ++p; + + node$metrics_port = count_to_port(p, tcp); + add new_nodes[node]; + add ports_set[p]; + + # ... and consume it. + ++p; + } + + config$nodes = new_nodes; + } + function config_validate(config: Management::Configuration, req: Management::Request::Request): bool { @@ -493,6 +591,36 @@ function config_validate(config: Management::Configuration, } } + # If port auto-configuration is disabled, the user needs to define the + # ports. Verify this both for Broker's ports and the metrics export + # ones. + +@pragma push ignore-deprecations + # Keep deprecated config setting working until 7.1: + local auto_broker_ports = Management::Controller::auto_assign_broker_ports; + if ( ! Management::Controller::auto_assign_ports ) + auto_broker_ports = F; +@pragma pop ignore-deprecations + + local nodes: vector of string; + local nodes_str: string; + + if ( ! auto_broker_ports ) + { + nodes = config_nodes_lacking_broker_ports(config); + + if ( |nodes| > 0 ) + { + nodes_str = join_string_vec(nodes, ", "); + errors += make_error(req$id, fmt("Broker port auto-assignment disabled but nodes %s lack ports", nodes_str)); + } + } + + # For metrics ports, it is not an error if auto-assignment is disabled + # but not all nodes feature a port. They user might intentionally want + # telemetry only from select nodes, and the discovery feature supports + # this. + # Possibilities for the future: # - Are node options understood? # - Do provided scripts exist/load? @@ -893,32 +1021,20 @@ event Management::Controller::API::stage_configuration_request(reqid: string, co return; } - if ( ! Management::Controller::auto_assign_ports ) - { - local nodes = config_nodes_lacking_ports(config); - - if ( |nodes| > 0 ) - { - local nodes_str = join_string_vec(nodes, ", "); - - res$success = F; - res$error = fmt("port auto-assignment disabled but nodes %s lack ports", nodes_str); - req$results += res; - - Management::Log::info(fmt("tx Management::Controller::API::stage_configuration_response %s", - Management::Request::to_string(req))); - Broker::publish(Management::Controller::topic, - Management::Controller::API::stage_configuration_response, req$id, req$results); - Management::Request::finish(req$id); - return; - } - } - g_configs[STAGED] = config; config_copy = copy(config); - if ( Management::Controller::auto_assign_ports ) - config_assign_ports(config_copy); +@pragma push ignore-deprecations + # Keep deprecated config setting working until 7.1: + local auto_broker_ports = Management::Controller::auto_assign_broker_ports; + if ( ! Management::Controller::auto_assign_ports ) + auto_broker_ports = F; + + if ( auto_broker_ports ) + config_assign_broker_ports(config_copy); + if ( Management::Controller::auto_assign_metrics_ports ) + config_assign_metrics_ports(config_copy); +@pragma pop ignore-deprecations g_configs[READY] = config_copy; From 8a4fb0ee19c021abf2e390810dfd70272b7704f6 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Fri, 5 Jul 2024 17:22:27 -0700 Subject: [PATCH 77/89] Management framework: augment deployed configs with instance IP addresses The controller learns IP addresses from agents that peer with it, but that information has so far gotten lost when resulting configs get pushed out to the agents. This makes these updates include that information. --- .../frameworks/management/controller/main.zeek | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/scripts/policy/frameworks/management/controller/main.zeek b/scripts/policy/frameworks/management/controller/main.zeek index 455f2dc6b3..fd7df8343d 100644 --- a/scripts/policy/frameworks/management/controller/main.zeek +++ b/scripts/policy/frameworks/management/controller/main.zeek @@ -186,6 +186,24 @@ global g_configs: table[ConfigState] of Management::Configuration function config_deploy_to_agents(config: Management::Configuration, req: Management::Request::Request) { + # Make any final changes to the configuration we send off. + + # If needed, fill in agent IP address info as learned from their peerings. + # XXX this will need revisiting when we support host names. + local instances: set[Management::Instance]; + + for ( inst in config$instances ) + { + if ( inst$name in g_instances_known + && inst$host == 0.0.0.0 + && g_instances_known[inst$name]$host != 0.0.0.0 ) + inst$host = g_instances_known[inst$name]$host; + + add instances[inst]; + } + + config$instances = instances; + for ( name in g_instances ) { if ( name !in g_instances_ready ) From 7d802f27b70fc0d3411714fd811b1b274ae301fb Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Mon, 1 Jul 2024 18:36:15 -0700 Subject: [PATCH 78/89] Management framework: bump zeek-client --- auxil/zeek-client | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/zeek-client b/auxil/zeek-client index 855b037b3f..5bcc140851 160000 --- a/auxil/zeek-client +++ b/auxil/zeek-client @@ -1 +1 @@ -Subproject commit 855b037b3f1ed6aefb7ef8674f4d71eef9679242 +Subproject commit 5bcc14085178ed4ddfa9ad972b441c36e8bc0787 From cdd5062f454b04e35d8a23a0f2fa1abeb92622e4 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Sun, 7 Jul 2024 22:43:25 -0700 Subject: [PATCH 79/89] Management framework: bump cluster testsuite to pull in telemetry tests --- testing/external/commit-hash.zeek-testing-cluster | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/external/commit-hash.zeek-testing-cluster b/testing/external/commit-hash.zeek-testing-cluster index 6c5a060b06..8b8bfdc2e4 100644 --- a/testing/external/commit-hash.zeek-testing-cluster +++ b/testing/external/commit-hash.zeek-testing-cluster @@ -1 +1 @@ -216efb97832e412ae0197dc8e36069d8c35fd81c +45582671c6715e719d91c8afde7ffb480c602441 From fcded0fc6ac336049a2daab65bfaedb1159ce863 Mon Sep 17 00:00:00 2001 From: Benjamin Bannier Date: Tue, 9 Jul 2024 18:22:21 +0200 Subject: [PATCH 80/89] Bump auxil/spicy to latest development snapshot --- auxil/spicy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/spicy b/auxil/spicy index 90f8281322..6581b1855a 160000 --- a/auxil/spicy +++ b/auxil/spicy @@ -1 +1 @@ -Subproject commit 90f8281322a13c52a270b50764cbc85633c5b74e +Subproject commit 6581b1855a5ea8cc102c66b4ac6a431fc67484a0 From 0c06c604abad96234f81128c65b68f183f4e194f Mon Sep 17 00:00:00 2001 From: Jan Grashoefer Date: Tue, 9 Jul 2024 12:18:10 +0200 Subject: [PATCH 81/89] Add logging of disabled analyzers to analyzer.log --- scripts/base/frameworks/analyzer/logging.zeek | 45 ++++++++++++++++++- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/scripts/base/frameworks/analyzer/logging.zeek b/scripts/base/frameworks/analyzer/logging.zeek index dc3a611f4d..cde62315c2 100644 --- a/scripts/base/frameworks/analyzer/logging.zeek +++ b/scripts/base/frameworks/analyzer/logging.zeek @@ -53,6 +53,12 @@ export { ## service field. option include_confirmations = F; + ## Enable tracking of analyzers getting disabled. This is mostly + ## interesting for troubleshooting of analyzers in DPD scenarios. + ## Setting this option may also generated multiple log entries per + ## connection. + option include_disabling = F; + ## If a violation contains information about the data causing it, ## include at most this many bytes of it in the log. option failure_data_max_size = 40; @@ -88,11 +94,24 @@ event zeek_init() &priority=5 Option::set_change_handler("Analyzer::Logging::include_confirmations", include_confirmations_handler); + local include_disabling_handler = function(id: string, new_value: bool): bool { + if ( new_value ) + enable_event_group("Analyzer::Logging::include_disabling"); + else + disable_event_group("Analyzer::Logging::include_disabling"); + + return new_value; + }; + Option::set_change_handler("Analyzer::Logging::include_disabling", + include_disabling_handler); + # Call the handlers directly with the current values to avoid config # framework interactions like creating entries in config.log. enable_handler("Analyzer::Logging::enable", Analyzer::Logging::enable); include_confirmations_handler("Analyzer::Logging::include_confirmations", Analyzer::Logging::include_confirmations); + include_disabling_handler("Analyzer::Logging::include_disabling", + Analyzer::Logging::include_disabling); } @@ -119,7 +138,7 @@ function populate_from_file(rec: Info, f: fa_file) { rec$fuid = f$id; # If the confirmation didn't have a connection, but the - # fa_file object has has exactly one, use it. + # fa_file object has exactly one, use it. if ( ! rec?$uid && f?$conns && |f$conns| == 1 ) { for ( _, c in f$conns ) @@ -151,7 +170,7 @@ event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirm Log::write(LOG, rec); } -event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) +event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=6 { if ( atype in ignore_analyzers ) return; @@ -180,3 +199,25 @@ event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationI Log::write(LOG, rec); } + +hook Analyzer::disabling_analyzer(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=-1000 &group="Analyzer::Logging::include_disabling" + { + if ( atype in ignore_analyzers ) + return; + + local rec = Info( + $ts=network_time(), + $cause="disabled", + $analyzer_kind=analyzer_kind(atype), + $analyzer_name=Analyzer::name(atype), + ); + + populate_from_conn(rec, c); + + if ( c?$dpd_state && aid in c$dpd_state$violations ) + { + rec$failure_data = fmt("Disabled after %d violations", c$dpd_state$violations[aid]); + } + + Log::write(LOG, rec); + } From 03e17a530a04b9ba854b4c93ce8db27673e08993 Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Tue, 9 Jul 2024 09:33:29 -0700 Subject: [PATCH 82/89] Update zeek-aux submodule [nomail] --- auxil/zeek-aux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/zeek-aux b/auxil/zeek-aux index f44475b8ee..1478f2ee55 160000 --- a/auxil/zeek-aux +++ b/auxil/zeek-aux @@ -1 +1 @@ -Subproject commit f44475b8ee5dac9c10eaa64ddcb357d4dc77098b +Subproject commit 1478f2ee550a0f99f5b93975c17ae814ebe515b7 From c6c8d078c0712814934b6636949852341862401e Mon Sep 17 00:00:00 2001 From: Jan Grashoefer Date: Tue, 9 Jul 2024 12:27:36 +0200 Subject: [PATCH 83/89] Extend btest for logging of disabled analyzers --- testing/btest/Baseline/plugins.hooks/output | 12 ++++++++++++ ...og-no-confirmations => analyzer.log-default} | 2 -- .../analyzer.log-include-confirmations | 2 -- .../analyzer.log-include-disabling | 17 +++++++++++++++++ .../base/frameworks/analyzer/logging.zeek | 12 ++++++++++-- 5 files changed, 39 insertions(+), 6 deletions(-) rename testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/{analyzer.log-no-confirmations => analyzer.log-default} (80%) create mode 100644 testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-disabling diff --git a/testing/btest/Baseline/plugins.hooks/output b/testing/btest/Baseline/plugins.hooks/output index 297bdb3584..c3b551dc84 100644 --- a/testing/btest/Baseline/plugins.hooks/output +++ b/testing/btest/Baseline/plugins.hooks/output @@ -108,6 +108,8 @@ 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::ignore_analyzers, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, lambda_<15261139872714441626>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_confirmations)elsedisable_event_group(Analyzer::Logging::include_confirmations)return (Analyzer::Logging::new_value)}, 0)) -> +0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> +0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, lambda_<3084630089048204467>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_disabling)elsedisable_event_group(Analyzer::Logging::include_disabling)return (Analyzer::Logging::new_value)}, 0)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Broker::peer_counts_as_iosource, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Conn::analyzer_inactivity_timeouts, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Conn::default_extract, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> @@ -302,6 +304,7 @@ 0.000000 MetaHookPost CallFunction(__init_primary_bifs, , ()) -> 0.000000 MetaHookPost CallFunction(__init_secondary_bifs, , ()) -> 0.000000 MetaHookPost CallFunction(disable_event_group, , (Analyzer::Logging::include_confirmations)) -> +0.000000 MetaHookPost CallFunction(disable_event_group, , (Analyzer::Logging::include_disabling)) -> 0.000000 MetaHookPost CallFunction(enable_module_events, , (Analyzer::Logging)) -> 0.000000 MetaHookPost CallFunction(getenv, , (CLUSTER_NODE)) -> 0.000000 MetaHookPost CallFunction(getenv, , (ZEEK_DEFAULT_LISTEN_ADDRESS)) -> @@ -313,6 +316,7 @@ 0.000000 MetaHookPost CallFunction(is_packet_analyzer, , (AllAnalyzers::ANALYZER_ANALYZER_TCPSTATS)) -> 0.000000 MetaHookPost CallFunction(lambda_<15261139872714441626>, , (Analyzer::Logging::include_confirmations, F)) -> 0.000000 MetaHookPost CallFunction(lambda_<2645182068207650863>, , (Analyzer::Logging::enable, T)) -> +0.000000 MetaHookPost CallFunction(lambda_<3084630089048204467>, , (Analyzer::Logging::include_disabling, F)) -> 0.000000 MetaHookPost CallFunction(port_to_count, , (2123/udp)) -> 0.000000 MetaHookPost CallFunction(port_to_count, , (2152/udp)) -> 0.000000 MetaHookPost CallFunction(port_to_count, , (3544/udp)) -> @@ -1024,6 +1028,8 @@ 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::ignore_analyzers, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, lambda_<15261139872714441626>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_confirmations)elsedisable_event_group(Analyzer::Logging::include_confirmations)return (Analyzer::Logging::new_value)}, 0)) +0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) +0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, lambda_<3084630089048204467>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_disabling)elsedisable_event_group(Analyzer::Logging::include_disabling)return (Analyzer::Logging::new_value)}, 0)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Broker::peer_counts_as_iosource, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Conn::analyzer_inactivity_timeouts, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Conn::default_extract, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) @@ -1218,6 +1224,7 @@ 0.000000 MetaHookPre CallFunction(__init_primary_bifs, , ()) 0.000000 MetaHookPre CallFunction(__init_secondary_bifs, , ()) 0.000000 MetaHookPre CallFunction(disable_event_group, , (Analyzer::Logging::include_confirmations)) +0.000000 MetaHookPre CallFunction(disable_event_group, , (Analyzer::Logging::include_disabling)) 0.000000 MetaHookPre CallFunction(enable_module_events, , (Analyzer::Logging)) 0.000000 MetaHookPre CallFunction(getenv, , (CLUSTER_NODE)) 0.000000 MetaHookPre CallFunction(getenv, , (ZEEK_DEFAULT_LISTEN_ADDRESS)) @@ -1229,6 +1236,7 @@ 0.000000 MetaHookPre CallFunction(is_packet_analyzer, , (AllAnalyzers::ANALYZER_ANALYZER_TCPSTATS)) 0.000000 MetaHookPre CallFunction(lambda_<15261139872714441626>, , (Analyzer::Logging::include_confirmations, F)) 0.000000 MetaHookPre CallFunction(lambda_<2645182068207650863>, , (Analyzer::Logging::enable, T)) +0.000000 MetaHookPre CallFunction(lambda_<3084630089048204467>, , (Analyzer::Logging::include_disabling, F)) 0.000000 MetaHookPre CallFunction(port_to_count, , (2123/udp)) 0.000000 MetaHookPre CallFunction(port_to_count, , (2152/udp)) 0.000000 MetaHookPre CallFunction(port_to_count, , (3544/udp)) @@ -1939,6 +1947,8 @@ 0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::ignore_analyzers, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_confirmations, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_confirmations, lambda_<15261139872714441626>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_confirmations)elsedisable_event_group(Analyzer::Logging::include_confirmations)return (Analyzer::Logging::new_value)}, 0) +0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_disabling, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) +0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_disabling, lambda_<3084630089048204467>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_disabling)elsedisable_event_group(Analyzer::Logging::include_disabling)return (Analyzer::Logging::new_value)}, 0) 0.000000 | HookCallFunction Option::set_change_handler(Broker::peer_counts_as_iosource, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Conn::analyzer_inactivity_timeouts, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Conn::default_extract, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) @@ -2133,6 +2143,7 @@ 0.000000 | HookCallFunction __init_primary_bifs() 0.000000 | HookCallFunction __init_secondary_bifs() 0.000000 | HookCallFunction disable_event_group(Analyzer::Logging::include_confirmations) +0.000000 | HookCallFunction disable_event_group(Analyzer::Logging::include_disabling) 0.000000 | HookCallFunction enable_module_events(Analyzer::Logging) 0.000000 | HookCallFunction getenv(CLUSTER_NODE) 0.000000 | HookCallFunction getenv(ZEEK_DEFAULT_LISTEN_ADDRESS) @@ -2144,6 +2155,7 @@ 0.000000 | HookCallFunction is_packet_analyzer(AllAnalyzers::ANALYZER_ANALYZER_TCPSTATS) 0.000000 | HookCallFunction lambda_<15261139872714441626>(Analyzer::Logging::include_confirmations, F) 0.000000 | HookCallFunction lambda_<2645182068207650863>(Analyzer::Logging::enable, T) +0.000000 | HookCallFunction lambda_<3084630089048204467>(Analyzer::Logging::include_disabling, F) 0.000000 | HookCallFunction port_to_count(2123/udp) 0.000000 | HookCallFunction port_to_count(2152/udp) 0.000000 | HookCallFunction port_to_count(3544/udp) diff --git a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-no-confirmations b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-default similarity index 80% rename from testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-no-confirmations rename to testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-default index ef8059b705..db390c9968 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-no-confirmations +++ b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-default @@ -13,6 +13,4 @@ XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 5399 XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations index 87e8855b64..56c4033614 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations +++ b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations @@ -15,6 +15,4 @@ XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 5399 XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-disabling b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-disabling new file mode 100644 index 0000000000..662d888f7c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-disabling @@ -0,0 +1,17 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path analyzer +#open XXXX-XX-XX-XX-XX-XX +#fields ts cause analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data +#types time string string string string string addr port addr port string string +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: out_of_bound: DCE_RPC_PDU:frag: -2665 > 31 - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX disabled protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 - Disabled after 6 violations +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/scripts/base/frameworks/analyzer/logging.zeek b/testing/btest/scripts/base/frameworks/analyzer/logging.zeek index 6f8be11f24..7e6be8e6be 100644 --- a/testing/btest/scripts/base/frameworks/analyzer/logging.zeek +++ b/testing/btest/scripts/base/frameworks/analyzer/logging.zeek @@ -1,11 +1,19 @@ # @TEST-EXEC: zeek -r ${TRACES}/socks.trace %INPUT -# @TEST-EXEC: mv analyzer.log analyzer.log-no-confirmations -# @TEST-EXEC: btest-diff analyzer.log-no-confirmations +# @TEST-EXEC: mv analyzer.log analyzer.log-default +# @TEST-EXEC: btest-diff analyzer.log-default # @TEST-EXEC: zeek -r ${TRACES}/socks.trace %INPUT Analyzer::Logging::include_confirmations=T # @TEST-EXEC: mv analyzer.log analyzer.log-include-confirmations # @TEST-EXEC: btest-diff analyzer.log-include-confirmations +# @TEST-EXEC: zeek -r ${TRACES}/socks.trace %INPUT Analyzer::Logging::include_disabling=T +# @TEST-EXEC: mv analyzer.log analyzer.log-include-disabling +# @TEST-EXEC: btest-diff analyzer.log-include-disabling + @load base/protocols/conn @load base/protocols/dns @load base/protocols/socks + +# DCE RPC violations are ignored by default. Consider violations for this +# test so that the analyzer will be disabled eventually. +redef DPD::ignore_violations -= { Analyzer::ANALYZER_DCE_RPC }; From b59bed9d06f2257a6b4f6c6408b8363a277eebac Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Tue, 9 Jul 2024 14:31:09 -0700 Subject: [PATCH 84/89] CI: Use ccache and a single CPU when building spicy analyzers for btests --- ci/test.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ci/test.sh b/ci/test.sh index c5304b4512..e8c56e152a 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -59,7 +59,9 @@ function run_btests { pushd testing/btest - ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \ + HILTI_CXX_COMPILER_LAUNCHER=ccache \ + HILTI_JIT_PARALLELISM=1 \ + ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \ ${BTEST} -z ${ZEEK_CI_BTEST_RETRIES} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} || result=1 make coverage prep_artifacts From b995924b217ad8cc6ba96fddb1638eafd2bbcd5b Mon Sep 17 00:00:00 2001 From: zeek-bot Date: Wed, 10 Jul 2024 00:21:07 +0000 Subject: [PATCH 85/89] Update doc submodule [nomail] [skip ci] --- doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc b/doc index bceb0f6035..42a8c7a028 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit bceb0f6035cb1b98f6f2d9649e2fe67bba4f3999 +Subproject commit 42a8c7a0286ca0610eb0a3cdc8cb74d06b3c0557 From cb500536eaa272c48fb04e71b84da5a3729de4ad Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Wed, 10 Jul 2024 10:02:53 +0200 Subject: [PATCH 86/89] Revert "CI: Use ccache and a single CPU when building spicy analyzers for btests" This reverts commit b59bed9d06f2257a6b4f6c6408b8363a277eebac. --- ci/test.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ci/test.sh b/ci/test.sh index e8c56e152a..c5304b4512 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -59,9 +59,7 @@ function run_btests { pushd testing/btest - HILTI_CXX_COMPILER_LAUNCHER=ccache \ - HILTI_JIT_PARALLELISM=1 \ - ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \ + ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \ ${BTEST} -z ${ZEEK_CI_BTEST_RETRIES} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} || result=1 make coverage prep_artifacts From 7b99fc01a98deca53d1872c58f5346200fe49b77 Mon Sep 17 00:00:00 2001 From: Arne Welzel Date: Wed, 10 Jul 2024 10:14:26 +0200 Subject: [PATCH 87/89] testing/btest: Default to HILTI_JIT_PARALLELISM=1 This is a rework of b59bed9d06f2257a6b4f6c6408b8363a277eebac moving HILTI_JIT_PARALLELISM=1 into btest.cfg to make it default applicable to btest -j users (and CI). The background for this change is that spicyz may spawn up to nproc compiler instances by default. Combined with btest -j, this may be nproc x nproc instances worst case. Particularly with gcc, this easily overloads CI or local systems, putting them into hard-to-recover-from thrashing/OOM states. Exporting HILTI_JIT_PARALLELISM in the shell allows overriding. --- testing/btest/btest.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/btest/btest.cfg b/testing/btest/btest.cfg index 5668992b56..b47ff2dddf 100644 --- a/testing/btest/btest.cfg +++ b/testing/btest/btest.cfg @@ -14,6 +14,7 @@ MinVersion = 0.63 [environment] ZEEKPATH=`bash -c %(testbase)s/../../%(build_dir)s/zeek-path-dev` HILTI_CXX_COMPILER_LAUNCHER=`f=%(testbase)s/../../%(build_dir)s/CMakeCache.txt && grep -q '^ENABLE_CCACHE:BOOL=true' $f && sed -n 's/^CCACHE_PROGRAM:FILEPATH=\(.*\)$/\1/p' $f` +HILTI_JIT_PARALLELISM=`bash -c 'echo ${HILTI_JIT_PARALLELISM:-1}'` ZEEK_SEED_FILE=%(testbase)s/random.seed ZEEK_PLUGIN_PATH= TZ=UTC From 3f475ebf7dcf1e5c3dab7e7c9073e73b07e3547e Mon Sep 17 00:00:00 2001 From: Tim Wojtulewicz Date: Wed, 10 Jul 2024 12:37:21 -0700 Subject: [PATCH 88/89] Bump cmake submodule [nomail] --- cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake b/cmake index d996924f5c..db0d52761f 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit d996924f5c94231290ec6991397d04df2adef6c3 +Subproject commit db0d52761f38f3602060da36adc1afff608730c1 From e99b94c18f8975eab8ffe4098875ae339e8140fb Mon Sep 17 00:00:00 2001 From: zeek-bot Date: Thu, 11 Jul 2024 00:10:52 +0000 Subject: [PATCH 89/89] Update doc submodule [nomail] [skip ci] --- doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc b/doc index 42a8c7a028..f65820ff0f 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit 42a8c7a0286ca0610eb0a3cdc8cb74d06b3c0557 +Subproject commit f65820ff0faf2887799fe691a443b5db39eeed54