mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
Management framework: rename agent "set_configuration" to "deploy"
This renames the agent's functionality for setting a configuration to reflect the controller's upcoming separation of set_configuration and deployment.
This commit is contained in:
parent
f353ac22a5
commit
0480b5f39c
3 changed files with 45 additions and 47 deletions
|
@ -15,29 +15,28 @@ export {
|
|||
|
||||
# Agent API events
|
||||
|
||||
## The controller sends this event to convey a new cluster configuration
|
||||
## to the agent. Once processed, the agent responds with the response
|
||||
## event.
|
||||
## The controller sends this event to deploy a cluster configuration to
|
||||
## this instance. Once processed, the agent responds with a
|
||||
## :zeek:see:`Management::Agent::API::deploy_response` event. event.
|
||||
##
|
||||
## reqid: a request identifier string, echoed in the response event.
|
||||
##
|
||||
## config: a :zeek:see:`Management::Configuration` record
|
||||
## describing the cluster topology. Note that this contains the full
|
||||
## topology, not just the part pertaining to this agent. That's because
|
||||
## the cluster framework requires full cluster visibility to establish
|
||||
## the needed peerings.
|
||||
## config: a :zeek:see:`Management::Configuration` record describing the
|
||||
## cluster topology. This contains the full topology, not just the
|
||||
## part pertaining to this instance: the cluster framework requires
|
||||
## full cluster visibility to establish needed peerings.
|
||||
##
|
||||
global set_configuration_request: event(reqid: string,
|
||||
global deploy_request: event(reqid: string,
|
||||
config: Management::Configuration);
|
||||
|
||||
## Response to a set_configuration_request event. The agent sends
|
||||
## Response to a deploy_request event. The agent sends
|
||||
## this back to the controller.
|
||||
##
|
||||
## reqid: the request identifier used in the request event.
|
||||
##
|
||||
## result: the result record.
|
||||
##
|
||||
global set_configuration_response: event(reqid: string,
|
||||
global deploy_response: event(reqid: string,
|
||||
result: Management::ResultVec);
|
||||
|
||||
|
||||
|
|
|
@ -26,8 +26,8 @@ export {
|
|||
node: string; ##< Name of the node the Supervisor is acting on.
|
||||
};
|
||||
|
||||
## Request state for set_configuration requests.
|
||||
type SetConfigurationState: record {
|
||||
## Request state for deploy requests.
|
||||
type DeployState: record {
|
||||
## Zeek cluster nodes the provided configuration requested
|
||||
## and which have not yet checked in with the agent.
|
||||
nodes_pending: set[string];
|
||||
|
@ -62,7 +62,7 @@ export {
|
|||
# members with _agent to disambiguate.
|
||||
redef record Management::Request::Request += {
|
||||
supervisor_state_agent: SupervisorState &optional;
|
||||
set_configuration_state_agent: SetConfigurationState &optional;
|
||||
deploy_state_agent: DeployState &optional;
|
||||
node_dispatch_state_agent: NodeDispatchState &optional;
|
||||
};
|
||||
|
||||
|
@ -80,11 +80,11 @@ redef Management::Request::timeout_interval = 5 sec;
|
|||
# Returns the effective agent topic for this agent.
|
||||
global agent_topic: function(): string;
|
||||
|
||||
# Finalizes a set_configuration_request transaction: cleans up remaining state
|
||||
# Finalizes a deploy_request transaction: cleans up remaining state
|
||||
# and sends response event.
|
||||
global send_set_configuration_response: function(req: Management::Request::Request);
|
||||
global send_deploy_response: function(req: Management::Request::Request);
|
||||
|
||||
# The global configuration as passed to us by the controller
|
||||
# The global configuration, as deployed by the controller.
|
||||
global g_config: Management::Configuration;
|
||||
|
||||
# A map to make other instance info accessible
|
||||
|
@ -93,10 +93,9 @@ global g_instances: table[string] of Management::Instance;
|
|||
# A map for the nodes we run on this instance, via this agent.
|
||||
global g_nodes: table[string] of Management::Node;
|
||||
|
||||
# The request ID of the most recent configuration update from the controller.
|
||||
# We track it here until the nodes_pending set in the corresponding request's
|
||||
# SetConfigurationState is cleared out, or the corresponding request state hits
|
||||
# a timeout.
|
||||
# The request ID of the most recent config deployment from the controller. We
|
||||
# track it until the nodes_pending set in the corresponding request's
|
||||
# DeployState is cleared out, or the corresponding request state hits a timeout.
|
||||
global g_config_reqid_pending: string = "";
|
||||
|
||||
# The complete node map employed by the supervisor to describe the cluster
|
||||
|
@ -115,7 +114,7 @@ function agent_topic(): string
|
|||
return Management::Agent::topic_prefix + "/" + epi$id;
|
||||
}
|
||||
|
||||
function send_set_configuration_response(req: Management::Request::Request)
|
||||
function send_deploy_response(req: Management::Request::Request)
|
||||
{
|
||||
local node: string;
|
||||
local res: Management::Result;
|
||||
|
@ -128,7 +127,7 @@ function send_set_configuration_response(req: Management::Request::Request)
|
|||
$instance = Management::Agent::get_name(),
|
||||
$node = node);
|
||||
|
||||
if ( node in req$set_configuration_state_agent$nodes_pending )
|
||||
if ( node in req$deploy_state_agent$nodes_pending )
|
||||
{
|
||||
# This node failed.
|
||||
res$success = F;
|
||||
|
@ -142,10 +141,10 @@ function send_set_configuration_response(req: Management::Request::Request)
|
|||
req$results[|req$results|] = res;
|
||||
}
|
||||
|
||||
Management::Log::info(fmt("tx Management::Agent::API::set_configuration_response %s",
|
||||
Management::Log::info(fmt("tx Management::Agent::API::deploy_response %s",
|
||||
Management::result_to_string(res)));
|
||||
Broker::publish(agent_topic(),
|
||||
Management::Agent::API::set_configuration_response, req$id, req$results);
|
||||
Management::Agent::API::deploy_response, req$id, req$results);
|
||||
|
||||
Management::Request::finish(req$id);
|
||||
|
||||
|
@ -263,14 +262,13 @@ function supervisor_destroy(node: string)
|
|||
Management::Log::info(fmt("issued supervisor destroy for %s, %s", node, req$id));
|
||||
}
|
||||
|
||||
event Management::Agent::API::set_configuration_request(reqid: string, config: Management::Configuration)
|
||||
event Management::Agent::API::deploy_request(reqid: string, config: Management::Configuration)
|
||||
{
|
||||
Management::Log::info(fmt("rx Management::Agent::API::set_configuration_request %s", reqid));
|
||||
Management::Log::info(fmt("rx Management::Agent::API::deploy_request %s", reqid));
|
||||
|
||||
local nodename: string;
|
||||
local node: Management::Node;
|
||||
local nc: Supervisor::NodeConfig;
|
||||
local msg: string;
|
||||
|
||||
# Adopt the global configuration provided. The act of trying to launch
|
||||
# the requested nodes perturbs any existing ones one way or another, so
|
||||
|
@ -299,15 +297,15 @@ event Management::Agent::API::set_configuration_request(reqid: string, config: M
|
|||
$reqid = reqid,
|
||||
$instance = Management::Agent::get_name());
|
||||
|
||||
Management::Log::info(fmt("tx Management::Agent::API::set_configuration_response %s",
|
||||
Management::Log::info(fmt("tx Management::Agent::API::deploy_response %s",
|
||||
Management::result_to_string(res)));
|
||||
Broker::publish(agent_topic(),
|
||||
Management::Agent::API::set_configuration_response, reqid, vector(res));
|
||||
Management::Agent::API::deploy_response, reqid, vector(res));
|
||||
return;
|
||||
}
|
||||
|
||||
local req = Management::Request::create(reqid);
|
||||
req$set_configuration_state_agent = SetConfigurationState();
|
||||
req$deploy_state_agent = DeployState();
|
||||
|
||||
# Establish this request as the pending one:
|
||||
g_config_reqid_pending = reqid;
|
||||
|
@ -318,7 +316,7 @@ event Management::Agent::API::set_configuration_request(reqid: string, config: M
|
|||
if ( node$instance == Management::Agent::get_name() )
|
||||
{
|
||||
g_nodes[node$name] = node;
|
||||
add req$set_configuration_state_agent$nodes_pending[node$name];
|
||||
add req$deploy_state_agent$nodes_pending[node$name];
|
||||
}
|
||||
|
||||
# The cluster and supervisor frameworks require a port for every
|
||||
|
@ -399,7 +397,8 @@ event Management::Agent::API::set_configuration_request(reqid: string, config: M
|
|||
|
||||
# At this point we await Management::Node::API::notify_node_hello events
|
||||
# from the new nodes, or a timeout, whichever happens first. These
|
||||
# trigger the set_configuration_response event back to the controller.
|
||||
# update the pending nodes in the request state, and eventually trigger
|
||||
# the deploy_response event back to the controller.
|
||||
}
|
||||
|
||||
event SupervisorControl::status_response(reqid: string, result: Supervisor::Status)
|
||||
|
@ -692,7 +691,7 @@ event Management::Agent::API::agent_standby_request(reqid: string)
|
|||
# peered/connected -- otherwise there's nothing we can do here via
|
||||
# Broker anyway), mainly to keep open the possibility of running
|
||||
# cluster nodes again later.
|
||||
event Management::Agent::API::set_configuration_request("", Management::Configuration());
|
||||
event Management::Agent::API::deploy_request("", Management::Configuration());
|
||||
|
||||
local res = Management::Result(
|
||||
$reqid = reqid,
|
||||
|
@ -712,20 +711,20 @@ event Management::Node::API::notify_node_hello(node: string)
|
|||
if ( node in g_nodes )
|
||||
g_nodes[node]$state = Management::RUNNING;
|
||||
|
||||
# Look up the set_configuration request this node launch was part of (if
|
||||
# Look up the deploy request this node launch was part of (if
|
||||
# any), and check it off. If it was the last node we expected to launch,
|
||||
# finalize the request and respond to the controller.
|
||||
|
||||
local req = Management::Request::lookup(g_config_reqid_pending);
|
||||
|
||||
if ( Management::Request::is_null(req) || ! req?$set_configuration_state_agent )
|
||||
if ( Management::Request::is_null(req) || ! req?$deploy_state_agent )
|
||||
return;
|
||||
|
||||
if ( node in req$set_configuration_state_agent$nodes_pending )
|
||||
if ( node in req$deploy_state_agent$nodes_pending )
|
||||
{
|
||||
delete req$set_configuration_state_agent$nodes_pending[node];
|
||||
if ( |req$set_configuration_state_agent$nodes_pending| == 0 )
|
||||
send_set_configuration_response(req);
|
||||
delete req$deploy_state_agent$nodes_pending[node];
|
||||
if ( |req$deploy_state_agent$nodes_pending| == 0 )
|
||||
send_deploy_response(req);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -736,9 +735,9 @@ event Management::Request::request_expired(req: Management::Request::Request)
|
|||
$success = F,
|
||||
$error = "request timed out");
|
||||
|
||||
if ( req?$set_configuration_state_agent )
|
||||
if ( req?$deploy_state_agent )
|
||||
{
|
||||
send_set_configuration_response(req);
|
||||
send_deploy_response(req);
|
||||
# This timeout means we no longer have a pending request.
|
||||
g_config_reqid_pending = "";
|
||||
}
|
||||
|
|
|
@ -172,8 +172,8 @@ function send_config_to_agents(req: Management::Request::Request, config: Manage
|
|||
|
||||
# We could also broadcast just once on the agent prefix, but
|
||||
# explicit request/response pairs for each agent seems cleaner.
|
||||
Management::Log::info(fmt("tx Management::Agent::API::set_configuration_request %s to %s", areq$id, name));
|
||||
Broker::publish(agent_topic, Management::Agent::API::set_configuration_request, areq$id, config);
|
||||
Management::Log::info(fmt("tx Management::Agent::API::deploy_request %s to %s", areq$id, name));
|
||||
Broker::publish(agent_topic, Management::Agent::API::deploy_request, areq$id, config);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -685,9 +685,9 @@ event Management::Agent::API::notify_log(instance: string, msg: string, node: st
|
|||
# XXX TODO
|
||||
}
|
||||
|
||||
event Management::Agent::API::set_configuration_response(reqid: string, results: Management::ResultVec)
|
||||
event Management::Agent::API::deploy_response(reqid: string, results: Management::ResultVec)
|
||||
{
|
||||
Management::Log::info(fmt("rx Management::Agent::API::set_configuration_response %s", reqid));
|
||||
Management::Log::info(fmt("rx Management::Agent::API::deploy_response %s", reqid));
|
||||
|
||||
# Retrieve state for the request we just got a response to
|
||||
local areq = Management::Request::lookup(reqid);
|
||||
|
@ -722,7 +722,7 @@ event Management::Agent::API::set_configuration_response(reqid: string, results:
|
|||
if ( |req$set_configuration_state$requests| > 0 )
|
||||
return;
|
||||
|
||||
# All set_configuration requests to instances are done, so adopt the
|
||||
# All deploy requests to instances are done, so adopt the
|
||||
# client's requested configuration as the new one and respond back to
|
||||
# client.
|
||||
g_config_current = req$set_configuration_state$config;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue