Return weird if a log line is over a configurable size limit

This commit is contained in:
Tim Wojtulewicz 2025-07-15 15:23:43 -07:00
parent 9bda7493ec
commit 19346b93ad
11 changed files with 261 additions and 26 deletions

14
NEWS
View file

@ -6,15 +6,25 @@ release. For an exhaustive list of changes, see the ``CHANGES`` file
Zeek 7.0.9 Zeek 7.0.9
========== ==========
We would like to thank @cccs-jsjm and @edoardomich for their contributions to this release. We would like to thank @cccs-jsjm, @edoardomich, and the Canadian Cyber Defence Collective
for their contributions to this release.
This release fixes the following security issue: This release fixes the following security issue:
- Very large log records can cause Zeek to overflow memory and potentially crash. Due to
the possibility of building these log records with packets from remote hosts, this is a
DoS risk. The fix adds additional length checking when serializing log data for writing
to logging streams. This can be controlled via a new ``Log::max_log_record_size``
redefinable constant, and reports a new ``log_record_too_large`` weird if the limitation
is reached for any individual log entry. There is an also a new
``log-writer-discarded-writes`` metric that tracks when this limitation is
reached.
This release fixes the following bugs: This release fixes the following bugs:
- The response to BDAT ``LAST`` was never recognized by the SMTP analyzer, resulting in - The response to BDAT ``LAST`` was never recognized by the SMTP analyzer, resulting in
the BDAT ``LAST`` commands not being logged in a timely fashion and receiving the wrong the BDAT ``LAST`` commands not being logged in a timely fashion and receiving the wrong
status. Zeek now correctly reports these comands. status. Zeek now correctly reports these commands.
- The Docker images for zeek 7.0 releases now include the ``net-tools`` (for ``iproute2``) - The Docker images for zeek 7.0 releases now include the ``net-tools`` (for ``iproute2``)
package to silience a warning from ``zeekctl``. They also now include the ``procps`` package to silience a warning from ``zeekctl``. They also now include the ``procps``

View file

@ -2901,7 +2901,20 @@ export {
auth_flavors: vector of auth_flavor_t &optional; ##< Returned authentication flavors auth_flavors: vector of auth_flavor_t &optional; ##< Returned authentication flavors
}; };
} # end export }
module Log;
export {
## Maximum size of a message that can be sent to a remote logger or logged
## locally. If this limit is met, report a ``log_line_too_large`` weird and drop
## the log entry. This isn't necessarily the full size of a line that might be
## written to a log, but a general representation of the size as the log record is
## serialized for writing. The size of end result from serialization might be
## higher than this limit, but it prevents runaway-sized log entries from causing
## problems.
const max_log_record_size = 1024*1024*64 &redef;
}
module POP3; module POP3;

View file

@ -224,8 +224,11 @@ struct Manager::WriterInfo {
string instantiating_filter; string instantiating_filter;
std::shared_ptr<telemetry::Counter> total_writes; std::shared_ptr<telemetry::Counter> total_writes;
std::shared_ptr<telemetry::Counter> total_discarded_writes;
WriterInfo(std::shared_ptr<telemetry::Counter> total_writes) : total_writes(std::move(total_writes)) {} WriterInfo(std::shared_ptr<telemetry::Counter> total_writes,
std::shared_ptr<telemetry::Counter> total_discarded_writes)
: total_writes(std::move(total_writes)), total_discarded_writes(std::move(total_discarded_writes)) {}
}; };
struct Manager::Stream { struct Manager::Stream {
@ -423,7 +426,11 @@ Manager::Manager()
telemetry_mgr telemetry_mgr
->CounterFamily("zeek", "log-writer-writes", {"writer", "module", "stream", "filter-name", "path"}, ->CounterFamily("zeek", "log-writer-writes", {"writer", "module", "stream", "filter-name", "path"},
"Total number of log writes passed to a concrete log writer not vetoed by stream or " "Total number of log writes passed to a concrete log writer not vetoed by stream or "
"filter policies.")) { "filter policies.")),
total_log_writer_discarded_writes_family(
telemetry_mgr->CounterFamily("zeek", "log-writer-discarded-writes",
{"writer", "module", "stream", "filter-name", "path"},
"Total number of log writes discarded due to size limitations.")) {
rotations_pending = 0; rotations_pending = 0;
} }
@ -435,6 +442,7 @@ Manager::~Manager() {
void Manager::InitPostScript() { void Manager::InitPostScript() {
rotation_format_func = id::find_func("Log::rotation_format_func"); rotation_format_func = id::find_func("Log::rotation_format_func");
log_stream_policy_hook = id::find_func("Log::log_stream_policy"); log_stream_policy_hook = id::find_func("Log::log_stream_policy");
max_log_record_size = id::find_val("Log::max_log_record_size")->AsCount();
} }
WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, EnumVal* tag) { WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, EnumVal* tag) {
@ -1153,8 +1161,15 @@ bool Manager::WriteToFilters(const Manager::Stream* stream, zeek::RecordValPtr c
} }
// Alright, can do the write now. // Alright, can do the write now.
size_t total_size = 0;
threading::Value** vals = RecordToFilterVals(stream, filter, columns.get(), total_size);
threading::Value** vals = RecordToFilterVals(stream, filter, columns.get()); if ( total_size > max_log_record_size ) {
reporter->Weird("log_record_too_large", util::fmt("%s", stream->name.c_str()));
w->second->total_discarded_writes->Inc();
DeleteVals(filter->num_fields, vals);
continue;
}
if ( ! PLUGIN_HOOK_WITH_RESULT(HOOK_LOG_WRITE, if ( ! PLUGIN_HOOK_WITH_RESULT(HOOK_LOG_WRITE,
HookLogWrite(filter->writer->GetType()->AsEnumType()->Lookup( HookLogWrite(filter->writer->GetType()->AsEnumType()->Lookup(
@ -1385,7 +1400,7 @@ bool Manager::SetMaxDelayQueueSize(const EnumValPtr& id, zeek_uint_t queue_size)
return true; return true;
} }
threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) { threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty, size_t& total_size) {
if ( ! val ) if ( ! val )
return new threading::Value(ty->Tag(), false); return new threading::Value(ty->Tag(), false);
@ -1393,7 +1408,10 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
switch ( lval->type ) { switch ( lval->type ) {
case TYPE_BOOL: case TYPE_BOOL:
case TYPE_INT: lval->val.int_val = val->AsInt(); break; case TYPE_INT:
lval->val.int_val = val->AsInt();
total_size += sizeof(lval->val.int_val);
break;
case TYPE_ENUM: { case TYPE_ENUM: {
const char* s = ty->AsEnumType()->Lookup(val->AsInt()); const char* s = ty->AsEnumType()->Lookup(val->AsInt());
@ -1410,10 +1428,16 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
lval->val.string_val.data = util::copy_string("", 0); lval->val.string_val.data = util::copy_string("", 0);
lval->val.string_val.length = 0; lval->val.string_val.length = 0;
} }
total_size += lval->val.string_val.length;
break; break;
} }
case TYPE_COUNT: lval->val.uint_val = val->AsCount(); break; case TYPE_COUNT:
lval->val.uint_val = val->AsCount();
total_size += sizeof(lval->val.uint_val);
break;
case TYPE_PORT: { case TYPE_PORT: {
auto p = val->AsCount(); auto p = val->AsCount();
@ -1429,16 +1453,26 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
lval->val.port_val.port = p & ~PORT_SPACE_MASK; lval->val.port_val.port = p & ~PORT_SPACE_MASK;
lval->val.port_val.proto = pt; lval->val.port_val.proto = pt;
total_size += lval->val.port_val.size();
break; break;
} }
case TYPE_SUBNET: val->AsSubNet()->Get().ConvertToThreadingValue(&lval->val.subnet_val); break; case TYPE_SUBNET:
val->AsSubNet()->Get().ConvertToThreadingValue(&lval->val.subnet_val);
total_size += lval->val.subnet_val.size();
break;
case TYPE_ADDR: val->AsAddr()->Get().ConvertToThreadingValue(&lval->val.addr_val); break; case TYPE_ADDR:
val->AsAddr()->Get().ConvertToThreadingValue(&lval->val.addr_val);
total_size += lval->val.addr_val.size();
break;
case TYPE_DOUBLE: case TYPE_DOUBLE:
case TYPE_TIME: case TYPE_TIME:
case TYPE_INTERVAL: lval->val.double_val = val->AsDouble(); break; case TYPE_INTERVAL:
lval->val.double_val = val->AsDouble();
total_size += sizeof(lval->val.double_val);
break;
case TYPE_STRING: { case TYPE_STRING: {
const String* s = val->AsString()->AsString(); const String* s = val->AsString()->AsString();
@ -1447,6 +1481,7 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
lval->val.string_val.data = buf; lval->val.string_val.data = buf;
lval->val.string_val.length = s->Len(); lval->val.string_val.length = s->Len();
total_size += lval->val.string_val.length;
break; break;
} }
@ -1456,6 +1491,7 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
auto len = strlen(s); auto len = strlen(s);
lval->val.string_val.data = util::copy_string(s, len); lval->val.string_val.data = util::copy_string(s, len);
lval->val.string_val.length = len; lval->val.string_val.length = len;
total_size += lval->val.string_val.length;
break; break;
} }
@ -1467,6 +1503,7 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
auto len = strlen(s); auto len = strlen(s);
lval->val.string_val.data = util::copy_string(s, len); lval->val.string_val.data = util::copy_string(s, len);
lval->val.string_val.length = len; lval->val.string_val.length = len;
total_size += lval->val.string_val.length;
break; break;
} }
@ -1483,14 +1520,15 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
auto& set_t = tbl_t->GetIndexTypes()[0]; auto& set_t = tbl_t->GetIndexTypes()[0];
bool is_managed = ZVal::IsManagedType(set_t); bool is_managed = ZVal::IsManagedType(set_t);
lval->val.set_val.size = set->Length(); zeek_int_t set_length = set->Length();
lval->val.set_val.vals = new threading::Value*[lval->val.set_val.size]; lval->val.set_val.vals = new threading::Value*[set_length];
for ( zeek_int_t i = 0; i < lval->val.set_val.size; i++ ) { for ( zeek_int_t i = 0; i < set_length && total_size < max_log_record_size; i++ ) {
std::optional<ZVal> s_i = ZVal(set->Idx(i), set_t); std::optional<ZVal> s_i = ZVal(set->Idx(i), set_t);
lval->val.set_val.vals[i] = ValToLogVal(s_i, set_t.get()); lval->val.set_val.vals[i] = ValToLogVal(s_i, set_t.get(), total_size);
if ( is_managed ) if ( is_managed )
ZVal::DeleteManagedType(*s_i); ZVal::DeleteManagedType(*s_i);
lval->val.set_val.size++;
} }
break; break;
@ -1498,14 +1536,15 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
case TYPE_VECTOR: { case TYPE_VECTOR: {
VectorVal* vec = val->AsVector(); VectorVal* vec = val->AsVector();
lval->val.vector_val.size = vec->Size(); zeek_int_t vec_length = vec->Size();
lval->val.vector_val.vals = new threading::Value*[lval->val.vector_val.size]; lval->val.vector_val.vals = new threading::Value*[vec_length];
auto& vv = vec->RawVec(); auto& vv = vec->RawVec();
auto& vt = vec->GetType()->Yield(); auto& vt = vec->GetType()->Yield();
for ( zeek_int_t i = 0; i < lval->val.vector_val.size; i++ ) { for ( zeek_int_t i = 0; i < vec_length && total_size < max_log_record_size; i++ ) {
lval->val.vector_val.vals[i] = ValToLogVal(vv[i], vt.get()); lval->val.vector_val.vals[i] = ValToLogVal(vv[i], vt.get(), total_size);
lval->val.vector_val.size++;
} }
break; break;
@ -1517,7 +1556,8 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
return lval; return lval;
} }
threading::Value** Manager::RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns) { threading::Value** Manager::RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns,
size_t& total_size) {
RecordValPtr ext_rec; RecordValPtr ext_rec;
if ( filter->num_ext_fields > 0 ) { if ( filter->num_ext_fields > 0 ) {
@ -1565,7 +1605,11 @@ threading::Value** Manager::RecordToFilterVals(const Stream* stream, Filter* fil
} }
if ( val ) if ( val )
vals[i] = ValToLogVal(val, vt); vals[i] = ValToLogVal(val, vt, total_size);
if ( total_size > max_log_record_size ) {
return vals;
}
} }
return vals; return vals;
@ -1614,7 +1658,8 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, WriterBacken
{"filter-name", instantiating_filter}, {"filter-name", instantiating_filter},
{"path", info->path}}; {"path", info->path}};
WriterInfo* winfo = new WriterInfo(zeek::log_mgr->total_log_writer_writes_family->GetOrAdd(labels)); WriterInfo* winfo = new WriterInfo(zeek::log_mgr->total_log_writer_writes_family->GetOrAdd(labels),
zeek::log_mgr->total_log_writer_discarded_writes_family->GetOrAdd(labels));
winfo->type = writer->Ref()->AsEnumVal(); winfo->type = writer->Ref()->AsEnumVal();
winfo->writer = nullptr; winfo->writer = nullptr;
winfo->open_time = run_state::network_time; winfo->open_time = run_state::network_time;

View file

@ -376,9 +376,9 @@ private:
bool TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, TableVal* include, TableVal* exclude, bool TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, TableVal* include, TableVal* exclude,
const std::string& path, const std::list<int>& indices); const std::string& path, const std::list<int>& indices);
threading::Value** RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns); threading::Value** RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns, size_t& total_size);
threading::Value* ValToLogVal(std::optional<ZVal>& val, Type* ty); threading::Value* ValToLogVal(std::optional<ZVal>& val, Type* ty, size_t& total_size);
Stream* FindStream(EnumVal* id); Stream* FindStream(EnumVal* id);
void RemoveDisabledWriters(Stream* stream); void RemoveDisabledWriters(Stream* stream);
void InstallRotationTimer(WriterInfo* winfo); void InstallRotationTimer(WriterInfo* winfo);
@ -399,12 +399,14 @@ private:
bool DelayCompleted(Manager::Stream* stream, detail::DelayInfo& delay_info); bool DelayCompleted(Manager::Stream* stream, detail::DelayInfo& delay_info);
std::vector<Stream*> streams; // Indexed by stream enum. std::vector<Stream*> streams; // Indexed by stream enum.
int rotations_pending; // Number of rotations not yet finished. int rotations_pending = 0; // Number of rotations not yet finished.
FuncPtr rotation_format_func; FuncPtr rotation_format_func;
FuncPtr log_stream_policy_hook; FuncPtr log_stream_policy_hook;
size_t max_log_record_size = 0;
std::shared_ptr<telemetry::CounterFamily> total_log_stream_writes_family; std::shared_ptr<telemetry::CounterFamily> total_log_stream_writes_family;
std::shared_ptr<telemetry::CounterFamily> total_log_writer_writes_family; std::shared_ptr<telemetry::CounterFamily> total_log_writer_writes_family;
std::shared_ptr<telemetry::CounterFamily> total_log_writer_discarded_writes_family;
zeek_uint_t last_delay_token = 0; zeek_uint_t last_delay_token = 0;
std::vector<detail::WriteContext> active_writes; std::vector<detail::WriteContext> active_writes;

View file

@ -116,6 +116,7 @@ struct Value {
struct port_t { struct port_t {
zeek_uint_t port; zeek_uint_t port;
TransportProto proto; TransportProto proto;
constexpr size_t size() { return sizeof(port) + sizeof(proto); }
}; };
struct addr_t { struct addr_t {
@ -124,6 +125,8 @@ struct Value {
struct in_addr in4; struct in_addr in4;
struct in6_addr in6; struct in6_addr in6;
} in; } in;
constexpr size_t size() { return sizeof(in) + sizeof(IPFamily); }
}; };
// A small note for handling subnet values: Subnet values emitted from // A small note for handling subnet values: Subnet values emitted from
@ -136,6 +139,7 @@ struct Value {
struct subnet_t { struct subnet_t {
addr_t prefix; addr_t prefix;
uint8_t length; uint8_t length;
constexpr size_t size() { return prefix.size() + sizeof(length); }
}; };
/** /**

View file

@ -0,0 +1,11 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path test
#open XXXX-XX-XX-XX-XX-XX
#fields strings
#types vector[string]
a,b,c
#close XXXX-XX-XX-XX-XX-XX

View file

@ -0,0 +1,11 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path weird
#open XXXX-XX-XX-XX-XX-XX
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer source
#types time string addr port addr port string string bool string string
XXXXXXXXXX.XXXXXX - - - - - log_record_too_large Test::LOG F zeek -
#close XXXX-XX-XX-XX-XX-XX

View file

@ -0,0 +1,11 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path test
#open XXXX-XX-XX-XX-XX-XX
#fields strings
#types vector[string]
a,b,c
#close XXXX-XX-XX-XX-XX-XX

View file

@ -0,0 +1,11 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path weird
#open XXXX-XX-XX-XX-XX-XX
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer source
#types time string addr port addr port string string bool string string
XXXXXXXXXX.XXXXXX - - - - - log_record_too_large Test::LOG F zeek -
#close XXXX-XX-XX-XX-XX-XX

View file

@ -10,7 +10,10 @@
XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_discarded_writes_total filter_name,module,path,stream,writer default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 0.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_discarded_writes_total filter_name,module,path,stream,writer default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 0.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_discarded_writes_total filter_name,module,path,stream,writer default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 0.0
XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0 XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0
#close XXXX-XX-XX-XX-XX-XX #close XXXX-XX-XX-XX-XX-XX

View file

@ -0,0 +1,114 @@
# @TEST-GROUP: broker
#
# @TEST-DOC: Limit the size of log lines that can be written.
#
# @TEST-PORT: BROKER_PORT
#
# @TEST-EXEC: btest-bg-run logger "zeek -b ../logger.zeek"
# @TEST-EXEC: btest-bg-run worker-1 "zeek -b ../worker-1.zeek"
# @TEST-EXEC: btest-bg-run worker-2 "zeek -b ../worker-2.zeek"
#
# @TEST-EXEC: btest-bg-wait -k 10
# @TEST-EXEC: btest-diff logger/weird.log
# @TEST-EXEC: btest-diff logger/test.log
# @TEST-EXEC: btest-diff worker-2/weird.log
# @TEST-EXEC: btest-diff worker-2/test.log
# @TEST-START-FILE common.zeek
@load base/frameworks/notice/weird
module Test;
export {
redef enum Log::ID += { LOG };
type Info: record {
strings: vector of string &log;
};
}
# Limit log lines to 1MB.
redef Log::max_log_record_size = 1024 * 1024;
redef Broker::disable_ssl = T;
event zeek_init()
{
Log::create_stream(LOG, [$columns=Info, $path="test"]);
}
# @TEST-END-FILE
# @TEST-START-FILE logger.zeek
@load ./common.zeek
redef Log::enable_remote_logging = F;
redef Log::enable_local_logging = T;
event zeek_init()
{
Broker::subscribe("zeek/logs");
Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT")));
}
global peers_lost = 0;
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
{
print "peer lost";
++peers_lost;
if ( peers_lost == 2 )
terminate();
}
# @TEST-END-FILE
# @TEST-START-FILE worker.zeek
@load ./common.zeek
event zeek_init()
{
Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT")));
}
event do_write()
{
local s = "AAAAAAAAAAAAAAAAAAAA"; # 20 bytes
local s100 = s + s + s + s + s;
local s1000 = s100 + s100 + s100 + s100 + s100 + s100 + s100 + s100 + s100 + s100;
local rec = Test::Info();
local i = 0;
while ( ++i <= ( 1000 * 1000 ) )
{
rec$strings += s1000;
}
Log::write(Test::LOG, rec);
local rec2 = Test::Info();
rec2$strings += "a";
rec2$strings += "b";
rec2$strings += "c";
Log::write(Test::LOG, rec2);
terminate();
}
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{
print "new_peer", msg;
schedule 1sec { do_write() };
}
# @TEST-END-FILE
# @TEST-START-FILE worker-1.zeek
@load ./worker.zeek
redef Log::enable_remote_logging = T;
redef Log::enable_local_logging = F;
# @TEST-END-FILE worker-1.zeek
# @TEST-START-FILE worker-2.zeek
@load ./worker.zeek
redef Log::enable_remote_logging = F;
redef Log::enable_local_logging = T;
# @TEST-END-FILE worker-2.zeek