From 7a471df1a1410ebd6ae7fd0439d2371d03356a7d Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Tue, 7 Jun 2022 13:38:04 -0700 Subject: [PATCH 1/5] Management framework: support auto-assignment of ports in cluster nodes This enables the controller to assign listening ports to managers, loggers, and proxies. (We don't currently make the workers listen.) The feature is controlled by the Management::Controller::auto_assign_ports flag. When enabled (the default), enumeration starts from Management::Controller::auto_assign_start_port, beginning with the manager, then the logger(s), then proxy(s). When the feature is disabled and nodes that require a port lack it, the controller rejects the configuration. --- .../management/controller/config.zeek | 11 ++ .../management/controller/main.zeek | 105 ++++++++++++++++++ 2 files changed, 116 insertions(+) diff --git a/scripts/policy/frameworks/management/controller/config.zeek b/scripts/policy/frameworks/management/controller/config.zeek index 2728d01ec3..a524fb049b 100644 --- a/scripts/policy/frameworks/management/controller/config.zeek +++ b/scripts/policy/frameworks/management/controller/config.zeek @@ -39,6 +39,17 @@ export { ## remains empty. const default_port = 2150/tcp &redef; + ## Whether the controller should auto-assign listening ports to cluster + ## nodes that need them and don't have them explicitly specified in + ## cluster configurations. + const auto_assign_ports = T &redef; + + ## The TCP start port to use for auto-assigning cluster node listening + ## ports, if :zeek:see:`Management::Controller::auto_assign_ports` is + ## enabled (the default) and the provided configurations don't have + ## ports assigned. + const auto_assign_start_port = 2200/tcp &redef; + ## The controller's Broker topic. Clients send requests to this topic. const topic = "zeek/management/controller" &redef; diff --git a/scripts/policy/frameworks/management/controller/main.zeek b/scripts/policy/frameworks/management/controller/main.zeek index 98a9fe19e8..8410aed802 100644 --- a/scripts/policy/frameworks/management/controller/main.zeek +++ b/scripts/policy/frameworks/management/controller/main.zeek @@ -93,6 +93,22 @@ global drop_instance: function(inst: Management::Instance); global null_config: function(): Management::Configuration; global is_null_config: function(config: Management::Configuration): bool; +# Returns list of names of nodes in the given configuration that require a +# listening port. Returns empty list if the config has no such nodes. +global config_nodes_lacking_ports: function(config: Management::Configuration): vector of string; + +# Assign node listening ports in the given configuration by counting up from +# Management::Controller::auto_assign_start_port. Scans the included nodes and +# fills in ports for any non-worker cluster node that doesn't have an existing +# port. This assumes those ports are actually available on the instances. +global config_assign_ports: function(config: Management::Configuration); + +# Rejects the given configuration with the given error message. The function +# adds a non-success result record to the given request and send the +# set_configuration_response event back to the client. It does not call finish() +# on the request. +global send_set_configuration_response_error: function(req: Management::Request::Request, error: string); + # Given a Broker ID, this returns the endpoint info associated with it. # On error, returns a dummy record with an empty ID string. global find_endpoint: function(id: string): Broker::EndpointInfo; @@ -221,6 +237,64 @@ function null_config(): Management::Configuration return Management::Configuration($id=""); } +function config_nodes_lacking_ports(config: Management::Configuration): vector of string + { + local res: vector of string; + local roles = { Supervisor::MANAGER, Supervisor::LOGGER, Supervisor::PROXY }; + + for ( node in config$nodes ) + { + if ( node$role in roles && ! node?$p ) + res += node$name; + } + + return sort(res, strcmp); + } + +function config_assign_ports(config: Management::Configuration) + { + # We're changing nodes in the configuration's set, so need to rebuild it: + local new_nodes: set[Management::Node]; + + # Workers don't need listening ports, but these do: + local roles = vector(Supervisor::MANAGER, Supervisor::LOGGER, Supervisor::PROXY); + + local p = port_to_count(Management::Controller::auto_assign_start_port); + local roles_set: set[Supervisor::ClusterRole]; + + for ( i in roles ) + add roles_set[roles[i]]; + + # Copy any nodes to the new set that have roles we don't care about. + for ( node in config$nodes ) + { + if ( node$role !in roles_set ) + add new_nodes[node]; + } + + # Now process the ones that may need ports, in order. + for ( i in roles ) + { + for ( node in config$nodes ) + { + if ( node$role != roles[i] ) + next; + + if ( node?$p ) # Already has a port. + { + add new_nodes[node]; + next; + } + + node$p = count_to_port(p, tcp); + add new_nodes[node]; + ++p; + } + } + + config$nodes = new_nodes; + } + function find_endpoint(id: string): Broker::EndpointInfo { local peers = Broker::peers(); @@ -277,6 +351,18 @@ function filter_config_nodes_by_name(nodes: set[string]): set[string] return nodes & cluster_nodes; } +function send_set_configuration_response_error(req: Management::Request::Request, error: string) + { + local res = Management::Result($reqid=req$id); + + res$success = F; + res$error = error; + req$results += res; + + Broker::publish(Management::Controller::topic, + Management::Controller::API::set_configuration_response, req$id, req$results); + } + event Management::Controller::API::notify_agents_ready(instances: set[string]) { local insts = Management::Util::set_to_vector(instances); @@ -486,6 +572,25 @@ event Management::Controller::API::set_configuration_request(reqid: string, conf # - Do node types with optional fields have required values? # ... + if ( Management::Controller::auto_assign_ports ) + config_assign_ports(config); + else + { + local nodes = config_nodes_lacking_ports(config); + + if ( |nodes| > 0 ) + { + local nodes_str = join_string_vec(nodes, ", "); + send_set_configuration_response_error(req, + fmt("port auto-assignment disabled but nodes %s lack ports", nodes_str)); + + Management::Request::finish(req$id); + Management::Log::info(fmt("tx Management::Controller::API::set_configuration_response %s", + Management::Request::to_string(req))); + return; + } + } + # The incoming request is now the pending one. It gets cleared when all # agents have processed their config updates successfully, or their # responses time out. From ccf3c24e23f2b957cdd1654fec0347eacc5de882 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Tue, 7 Jun 2022 13:41:25 -0700 Subject: [PATCH 2/5] Management framework: minor log formatting tweak, for consistency --- scripts/policy/frameworks/management/controller/main.zeek | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/policy/frameworks/management/controller/main.zeek b/scripts/policy/frameworks/management/controller/main.zeek index 8410aed802..08b7804e1b 100644 --- a/scripts/policy/frameworks/management/controller/main.zeek +++ b/scripts/policy/frameworks/management/controller/main.zeek @@ -368,7 +368,7 @@ event Management::Controller::API::notify_agents_ready(instances: set[string]) local insts = Management::Util::set_to_vector(instances); Management::Log::info(fmt("rx Management::Controller::API:notify_agents_ready %s", - join_string_vec(insts, ","))); + join_string_vec(insts, ", "))); local req = Management::Request::lookup(g_config_reqid_pending); From 9b4841912cb0cff3a00cbb623b9f307d760fe31b Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Tue, 7 Jun 2022 13:42:07 -0700 Subject: [PATCH 3/5] Management framework: also use send_set_configuration_response_error elsewhere --- .../policy/frameworks/management/controller/main.zeek | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/scripts/policy/frameworks/management/controller/main.zeek b/scripts/policy/frameworks/management/controller/main.zeek index 08b7804e1b..a0100355ab 100644 --- a/scripts/policy/frameworks/management/controller/main.zeek +++ b/scripts/policy/frameworks/management/controller/main.zeek @@ -552,16 +552,12 @@ event Management::Controller::API::set_configuration_request(reqid: string, conf # At the moment there can only be one pending request. if ( g_config_reqid_pending != "" ) { - res = Management::Result($reqid=reqid); - res$success = F; - res$error = fmt("request %s still pending", g_config_reqid_pending); - req$results += res; + send_set_configuration_response_error(req, + fmt("request %s still pending", g_config_reqid_pending)); + Management::Request::finish(req$id); Management::Log::info(fmt("tx Management::Controller::API::set_configuration_response %s", Management::Request::to_string(req))); - Broker::publish(Management::Controller::topic, - Management::Controller::API::set_configuration_response, req$id, req$results); - Management::Request::finish(req$id); return; } From c0a4bc3adecadd367ee92f4350680e8e69abb7a4 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Tue, 7 Jun 2022 14:31:27 -0700 Subject: [PATCH 4/5] Management framework: bump external cluster testsuite --- testing/external/commit-hash.zeek-testing-cluster | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/external/commit-hash.zeek-testing-cluster b/testing/external/commit-hash.zeek-testing-cluster index e10be1fb68..30fa37db10 100644 --- a/testing/external/commit-hash.zeek-testing-cluster +++ b/testing/external/commit-hash.zeek-testing-cluster @@ -1 +1 @@ -343f1f6800ed92c33c915e357d21802be9e9f2f7 +322c8bb498c0bea21a236a7fa59cc4e8be19c6f9 From d8605884ffd9112a75f9efbf635862b6167dc7b0 Mon Sep 17 00:00:00 2001 From: Christian Kreibich Date: Tue, 7 Jun 2022 18:39:40 -0700 Subject: [PATCH 5/5] Management framework: bump zeek-client to pull in relaxed port handling --- auxil/zeek-client | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auxil/zeek-client b/auxil/zeek-client index 6d6a8202de..58c02fdcdf 160000 --- a/auxil/zeek-client +++ b/auxil/zeek-client @@ -1 +1 @@ -Subproject commit 6d6a8202def91e91cbdc53b0ccf7aeca7806cd8a +Subproject commit 58c02fdcdf3bdec7795316611e9a93885ac4adaf