Add peer buffer update tracking to the Broker manager's event_observer

This implements basic tracking of each peering's current fill level, the maximum
level over a recent time interval (via a new Broker::buffer_stats_reset_interval
tunable, defaulting to 1min), and the number of times a buffer overflows. For
the disconnect policy this is the number of depeerings, but for drop_newest and
drop_oldest it implies the number of messages lost.

This doesn't use "proper" telemetry metrics for a few reasons: this tracking is
Broker-specific, so we need to track each peering via endpoint_ids, while we
want the metrics to use Cluster node name labels, and the latter live in the
script layer. Using broker::endpoint_id directly as keys also means we rely on
their ability to hash in STL containers, which should be fast.

This does not track the buffer levels for Broker "clients" (as opposed to
"peers"), i.e. WebSockets, since we currently don't have a way to name these,
and we don't want to use ephemeral Broker IDs in their telemetry.

To make the stats accessible to the script layer the Broker manager (via a new
helper class that lives in the event_observer) maintains a TableVal mapping
Broker IDs to a new BrokerPeeringStats record. The table's members get updated
every time that table is requested. This minimizes new val instantiation and
allows the script layer to customize the BrokerPeeringStats record by redefing,
updating fields, etc. Since we can't use Zeek vals outside the main thread, this
requires some care so all table updates happen only in the Zeek-side table
updater, PeerBufferState::GetPeeringStatsTable().
This commit is contained in:
Christian Kreibich 2025-04-15 18:08:16 -07:00
parent 23554280e0
commit f5fbad23ff
7 changed files with 241 additions and 9 deletions

View file

@ -104,6 +104,10 @@ export {
## Same as :zeek:see:`Broker::peer_overflow_policy` but for WebSocket clients.
const web_socket_overflow_policy = "disconnect" &redef;
## How frequently Zeek resets some peering/client buffer statistics,
## such as ``max_queued_recently`` in :zeek:see:`BrokerPeeringStats`.
const buffer_stats_reset_interval = 1min &redef;
## The CAF scheduling policy to use. Available options are "sharing" and
## "stealing". The "sharing" policy uses a single, global work queue along
## with mutex and condition variable used for accessing it, which may be
@ -392,6 +396,12 @@ export {
## Returns: a unique identifier for the local broker endpoint.
global node_id: function(): string;
## Obtain each peering's send-buffer statistics. The keys are Broker
## endpoint IDs.
##
## Returns: per-peering statistics.
global peering_stats: function(): table[string] of BrokerPeeringStats;
## Sends all pending log messages to remote peers. This normally
## doesn't need to be used except for test cases that are time-sensitive.
global flush_logs: function(): count;
@ -554,6 +564,11 @@ function node_id(): string
return __node_id();
}
function peering_stats(): table[string] of BrokerPeeringStats
{
return __peering_stats();
}
function flush_logs(): count
{
return __flush_logs();

View file

@ -1135,6 +1135,20 @@ type BrokerStats: record {
num_ids_outgoing: count;
};
## Broker statistics for an individual peering.
##
type BrokerPeeringStats: record {
## The number of messages currently queued locally for transmission.
num_queued: count;
## The maximum number of messages queued in the recent
## :zeek:see:`Broker::buffer_stats_reset_interval` time interval.
max_queued_recently: count;
## The number of times the send buffer has overflowed.
num_overflows: count;
};
type BrokerPeeringStatsTable: table[string] of BrokerPeeringStats;
## Statistics about reporter messages and weirds.
##
## .. zeek:see:: get_reporter_stats

View file

@ -92,6 +92,174 @@ void print_escaped(std::string& buf, std::string_view str) {
buf.push_back('"');
}
// Track metrics for a given peering's send buffer.
class PeerBufferState {
public:
struct Stats {
// The rendered peer ID. Storing this here helps reuse.
// Note that we only ever touch this from Zeek's main thread, not
// any of Broker's.
zeek::StringValPtr peer_id;
// Whether Broker has removed the peer, and this instance still
// needs to be removed.
bool is_zombie = false;
// Number of messages queued locally in the send buffer.
uint32_t queued = 0;
// Maximum number queued in the last Broker::buffer_stats_reset_interval.
// This improces visibility into message bursts since instantaneous
// queueing (captured above) can be short-lived.
uint32_t max_queued_recently = 0;
// Number of times the buffer overflowed at send time. For the
// "disconnect" overflow policy (via Broker::peer_overflow_policy), this
// count will at most be 1 since Broker will remove the peering upon
// overflow. The existing Zeek-level metric for tracking disconnects
// (see frameworks/broker/broker-backpressure.zeek) covers this one more
// permanently. For the "drop_newest" and "drop_oldest" policies it
// equals a count of the number of messages lost, since the peering
// continues.
uint64_t overflows = 0;
// When we last started a stats-tracking interval for this peering.
double last_interval = 0;
};
// For per-peering tracking, map endpoint IDs to the above state.
using EndpointMetricMap = std::unordered_map<broker::endpoint_id, Stats>;
PeerBufferState(size_t a_buffer_size, double a_stats_reset_interval)
: buffer_size(a_buffer_size), stats_reset_interval(a_stats_reset_interval) {
stats_table =
zeek::make_intrusive<zeek::TableVal>(zeek::id::find_type<zeek::TableType>("BrokerPeeringStatsTable"));
stats_record_type = zeek::id::find_type<zeek::RecordType>("BrokerPeeringStats");
}
void SetEndpoint(const broker::endpoint* a_endpoint) { endpoint = a_endpoint; }
// Update the peering's stats. This runs in Broker's execution context.
// Broker does not expose send-buffer/queue state explicitly, so track
// arrivals (a push, is_push == true) and departures (a pull, is_push ==
// false) as they happen. Note that this must not touch Zeek-side Vals.
void Observe(const broker::endpoint_id& peer, bool is_push) {
std::lock_guard<std::mutex> lock(mutex);
auto it = stats_map.find(peer);
if ( it == stats_map.end() ) {
stats_map.emplace(peer, Stats());
it = stats_map.find(peer);
}
auto& stats = it->second;
// Stick to Broker's notion of time here.
double now{0};
if ( endpoint != nullptr )
broker::convert(endpoint->now(), now);
if ( now - stats.last_interval > stats_reset_interval ) {
stats.last_interval = now;
stats.max_queued_recently = stats.queued;
}
if ( stats.queued == 0 ) {
// Watch for underflows. We could report somehow. Note that this
// runs in the context of Broker's threads.
assert(is_push);
}
if ( is_push && stats.queued == buffer_size )
stats.overflows += 1;
else {
stats.queued += is_push ? 1 : -1;
if ( stats.queued > stats.max_queued_recently )
stats.max_queued_recently = stats.queued;
}
}
// Updates the internal table[string] of BrokerPeeringStats and returns it.
const zeek::TableValPtr& GetPeeringStatsTable() {
std::lock_guard<std::mutex> lock(mutex);
for ( auto it = stats_map.begin(); it != stats_map.end(); ) {
auto& peer = it->first;
auto& stats = it->second;
if ( stats.peer_id == nullptr )
stats.peer_id = PeerIdToStringVal(peer);
// Broker told us the peer is gone, in RemovePeer() below. Remove it
// now from both tables. We add/remove from stats_table only here,
// not in Observer() and/or RemovePeer(), to ensure we only touch
// the Zeek-side Table from Zeek's main thread.
if ( stats.is_zombie ) {
stats_table->Remove(*stats.peer_id);
it = stats_map.erase(it);
continue;
}
auto stats_v = stats_table->Find(stats.peer_id);
if ( stats_v == nullptr ) {
stats_v = zeek::make_intrusive<zeek::RecordVal>(stats_record_type);
stats_table->Assign(stats.peer_id, stats_v);
}
// We may get here more than stats_reset_interval after the last
// Observe(), in which case the max_queued_recently value is now
// stale. Update if so.
double now{0};
if ( endpoint != nullptr )
broker::convert(endpoint->now(), now);
if ( now - stats.last_interval > stats_reset_interval ) {
stats.last_interval = now;
stats.max_queued_recently = stats.queued;
}
int n = 0;
stats_v->AsRecordVal()->Assign(n++, zeek::val_mgr->Count(stats.queued));
stats_v->AsRecordVal()->Assign(n++, zeek::val_mgr->Count(stats.max_queued_recently));
stats_v->AsRecordVal()->Assign(n++, zeek::val_mgr->Count(stats.overflows));
++it;
}
return stats_table;
}
void RemovePeer(const broker::endpoint_id& peer) {
std::lock_guard<std::mutex> lock(mutex);
if ( auto it = stats_map.find(peer); it != stats_map.end() )
it->second.is_zombie = true;
}
private:
zeek::StringValPtr PeerIdToStringVal(const broker::endpoint_id& peer) const {
std::string peer_s;
broker::convert(peer, peer_s);
return zeek::make_intrusive<zeek::StringVal>(peer_s);
}
// The maximum number of messages queueable for transmission to a peer,
// see Broker::peer_buffer_size and Broker::web_socket_buffer_size.
size_t buffer_size;
// Seconds after which we reset stats tracked per time window.
double stats_reset_interval;
EndpointMetricMap stats_map;
zeek::TableValPtr stats_table;
zeek::RecordTypePtr stats_record_type;
mutable std::mutex mutex;
const broker::endpoint* endpoint = nullptr;
};
using PeerBufferStatePtr = std::shared_ptr<PeerBufferState>;
class LoggerQueue {
public:
void Push(broker::event_ptr event) {
@ -132,8 +300,20 @@ class Observer : public broker::event_observer {
public:
using LogSeverityLevel = broker::event::severity_level;
explicit Observer(LogSeverityLevel severity, LoggerQueuePtr queue)
: severity_(severity), queue_(std::move(queue)) {}
explicit Observer(LogSeverityLevel severity, LoggerQueuePtr queue, PeerBufferStatePtr pbstate)
: severity_(severity), queue_(std::move(queue)), pbstate_(std::move(pbstate)) {}
void on_peer_buffer_push(const broker::endpoint_id& peer, const broker::node_message&) override {
pbstate_->Observe(peer, true);
}
void on_peer_buffer_pull(const broker::endpoint_id& peer, const broker::node_message&) override {
pbstate_->Observe(peer, false);
}
void on_peer_disconnect(const broker::endpoint_id& peer, const broker::error&) override {
pbstate_->RemovePeer(peer);
}
void observe(broker::event_ptr what) override { queue_->Push(std::move(what)); }
@ -144,12 +324,12 @@ public:
private:
LogSeverityLevel severity_;
LoggerQueuePtr queue_;
PeerBufferStatePtr pbstate_;
};
} // namespace
namespace zeek::Broker {
static inline Val* get_option(const char* option) {
const auto& id = zeek::detail::global_scope()->Find(option);
@ -224,15 +404,20 @@ class BrokerState {
public:
using LogSeverityLevel = Observer::LogSeverityLevel;
BrokerState(broker::configuration config, size_t congestion_queue_size, LoggerQueuePtr queue)
BrokerState(broker::configuration config, size_t congestion_queue_size, LoggerQueuePtr queue,
PeerBufferStatePtr pbstate)
: endpoint(std::move(config), telemetry_mgr->GetRegistry()),
subscriber(
endpoint.make_subscriber({broker::topic::statuses(), broker::topic::errors()}, congestion_queue_size)),
loggerQueue(std::move(queue)) {}
loggerQueue(std::move(queue)),
peerBufferState(std::move(pbstate)) {
peerBufferState->SetEndpoint(&endpoint);
}
broker::endpoint endpoint;
broker::subscriber subscriber;
LoggerQueuePtr loggerQueue;
PeerBufferStatePtr peerBufferState;
LogSeverityLevel logSeverity = LogSeverityLevel::critical;
LogSeverityLevel stderrSeverity = LogSeverityLevel::critical;
std::unordered_set<broker::network_info> outbound_peerings;
@ -404,11 +589,13 @@ void Manager::DoInitPostScript() {
checkLogSeverity(stderrSeverityVal);
auto adapterVerbosity = static_cast<BrokerSeverityLevel>(std::max(logSeverityVal, stderrSeverityVal));
auto queue = std::make_shared<LoggerQueue>();
auto observer = std::make_shared<Observer>(adapterVerbosity, queue);
auto pbstate = std::make_shared<PeerBufferState>(options.peer_buffer_size,
get_option("Broker::buffer_stats_reset_interval")->AsDouble());
auto observer = std::make_shared<Observer>(adapterVerbosity, queue, pbstate);
broker::logger(observer); // *must* be called before creating the BrokerState
auto cqs = get_option("Broker::congestion_queue_size")->AsCount();
bstate = std::make_shared<BrokerState>(std::move(config), cqs, queue);
bstate = std::make_shared<BrokerState>(std::move(config), cqs, queue, pbstate);
bstate->logSeverity = static_cast<BrokerSeverityLevel>(logSeverityVal);
bstate->stderrSeverity = static_cast<BrokerSeverityLevel>(stderrSeverityVal);
@ -1970,6 +2157,8 @@ const Stats& Manager::GetStatistics() {
return statistics;
}
TableValPtr Manager::GetPeeringStatsTable() { return bstate->peerBufferState->GetPeeringStatsTable(); }
bool Manager::AddForwardedStore(const std::string& name, TableValPtr table) {
if ( forwarded_stores.find(name) != forwarded_stores.end() ) {
reporter->Error("same &broker_store %s specified for two different variables", name.c_str());

View file

@ -384,6 +384,14 @@ public:
*/
const Stats& GetStatistics();
/**
* Returns a table[string] of BrokerPeeringStats, with each peering's
* send-buffer stats filled in. The keys are Broker node IDs identifying the
* current peers.
* @return Each peering's send-buffer statistics.
*/
TableValPtr GetPeeringStatsTable();
/**
* Creating an instance of this struct simply helps the manager
* keep track of whether calls into its API are coming from script

View file

@ -264,3 +264,8 @@ function Broker::__node_id%(%): string
zeek::Broker::Manager::ScriptScopeGuard ssg;
return zeek::make_intrusive<zeek::StringVal>(broker_mgr->NodeId());
%}
function Broker::__peering_stats%(%): BrokerPeeringStatsTable
%{
return broker_mgr->GetPeeringStatsTable();
%}

View file

@ -1,2 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
558 seen BiFs, 0 unseen BiFs (), 0 new BiFs ()
559 seen BiFs, 0 unseen BiFs (), 0 new BiFs ()

View file

@ -7,7 +7,7 @@
# @TEST-EXEC: btest-diff output
# This set tracks the BiFs that have been characterized for ZAM analysis.
# As new ones are added or old ones removed, attend to updating FuncInfo.cc
# As new ones are added or old ones removed, update src/script_opt/FuncInfo.cc
# for ZAM, and then update the list here.
global known_BiFs = set(
"Analyzer::__disable_all_analyzers",
@ -45,6 +45,7 @@ global known_BiFs = set(
"Broker::__opaque_clone_through_serialization",
"Broker::__peer",
"Broker::__peer_no_retry",
"Broker::__peering_stats",
"Broker::__peers",
"Broker::__pop",
"Broker::__publish_id",