Spelling fixes: scripts

* accessing
* across
* adding
* additional
* addresses
* afterwards
* analyzer
* ancillary
* answer
* associated
* attempts
* because
* belonging
* buffer
* cleanup
* committed
* connects
* database
* destination
* destroy
* distinguished
* encoded
* entries
* entry
* hopefully
* image
* include
* incorrect
* information
* initial
* initiate
* interval
* into
* java
* negotiation
* nodes
* nonexistent
* ntlm
* occasional
* omitted
* otherwise
* ourselves
* paragraphs
* particular
* perform
* received
* receiver
* referring
* release
* repetitions
* request
* responded
* retrieval
* running
* search
* separate
* separator
* should
* synchronization
* target
* that
* the
* threshold
* timeout
* transaction
* transferred
* transmission
* triggered
* vetoes
* virtual

Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
This commit is contained in:
Josh Soref 2022-10-23 16:00:49 -04:00
parent 5aa7d80e88
commit 21e0d777b3
52 changed files with 93 additions and 93 deletions

View file

@ -53,7 +53,7 @@ event zeek_init() &priority=5
hook x509_certificate_cache_replay(f: fa_file, e: X509::Info, sha256: string)
{
# we encountered a cached cert. The X509 analyzer will skip it. Let's raise all the events that it typically
# raises by ourselfes.
# raises by ourselves.
# first - let's checked if it already has an x509 record. That would mean that someone raised the file_hash event
# several times for the certificate - in which case we bail out.

View file

@ -15,7 +15,7 @@ export {
id: string &log;
## Hash algorithm used to generate issuerNameHash and issuerKeyHash.
hashAlgorithm: string &log;
## Hash of the issuer's distingueshed name.
## Hash of the issuer's distinguished name.
issuerNameHash: string &log;
## Hash of the issuer's public key.
issuerKeyHash: string &log;

View file

@ -122,7 +122,7 @@ event zeek_init() &priority=5
# We use MIME types internally to distinguish between user and CA certificates.
# The first certificate in a connection always gets tagged as user-cert, all
# following certificates get tagged as CA certificates. Certificates gotten via
# other means (e.g. identified from HTTP traffic when they are transfered in plain
# other means (e.g. identified from HTTP traffic when they are transferred in plain
# text) get tagged as application/pkix-cert.
Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-user-cert");
Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-ca-cert");

View file

@ -131,7 +131,7 @@ export {
## Whether calling :zeek:see:`Broker::peer` will register the Broker
## system as an I/O source that will block the process from shutting
## down. For example, set this to false when you are reading pcaps,
## but also want to initaiate a Broker peering and still shutdown after
## but also want to initiate a Broker peering and still shutdown after
## done reading the pcap.
option peer_counts_as_iosource = T;

View file

@ -351,7 +351,7 @@ export {
e: interval &default=0sec) : bool;
## Returns a set with all of a store's keys. The results reflect a snapshot
## in time that may diverge from reality soon afterwards. When acessing
## in time that may diverge from reality soon afterwards. When accessing
## any of the element, it may no longer actually be there. The function is
## also expensive for large stores, as it copies the complete set.
##

View file

@ -29,7 +29,7 @@ export {
## The maximum number of nodes that may belong to the pool.
## If not set, then all available nodes will be added to the pool,
## else the cluster framework will automatically limit the pool
## membership according to the threshhold.
## membership according to the threshold.
max_nodes: count &optional;
## Whether the pool requires exclusive access to nodes. If true,
## then *max_nodes* nodes will not be assigned to any other pool.

View file

@ -15,7 +15,7 @@ export {
## Represents the data in config.log.
type Info: record {
## Timestamp at which the configuration change occured.
## Timestamp at which the configuration change occurred.
ts: time &log;
## ID of the value that was changed.
id: string &log;

View file

@ -1,7 +1,7 @@
# These signatures were semi-automatically generated from libmagic's
# (~ v5.17) magic database rules that have an associated mime type.
# After generating, they were all manually reviewed and occassionally
# needed minor modifications by hand or were just ommited depending on
# After generating, they were all manually reviewed and occasionally
# needed minor modifications by hand or were just omitted depending on
# the complexity of the original magic rules.
#
# The instrumented version of the `file` command used to generate these
@ -1370,7 +1370,7 @@ signature file-magic-auto532 {
#}
# The use of non-sequential offsets and relational operations made the
# autogenerated signature incorrrect.
# autogenerated signature incorrect.
# >0 belong&,>100 (0x00000064), [""], swap_endian=0
# >>8 belong&,<3 (0x00000003), [""], swap_endian=0
# >>>12 belong&,<33 (0x00000021), [""], swap_endian=0

View file

@ -109,7 +109,7 @@ export {
path: string; ##< Original path value.
open: time; ##< Time when opened.
close: time; ##< Time when closed.
terminating: bool; ##< True if rotation occured due to Zeek shutting down.
terminating: bool; ##< True if rotation occurred due to Zeek shutting down.
};
## The function type for log rotation post processors.
@ -151,7 +151,7 @@ export {
## will generally fail.
dir: string &default = default_rotation_dir;
## A base name to use for the the rotated log. Log writers may later
## A base name to use for the rotated log. Log writers may later
## append a file extension of their choosing to this user-chosen
## base (e.g. if using the default ASCII writer and you want
## rotated files of the format "foo-<date>.log", then this basename
@ -579,7 +579,7 @@ export {
## log write, prior to iterating over the stream's associated filters.
## As with filter-specific hooks, breaking from the hook vetoes writing
## of the given log record. Note that filter-level policy hooks still get
## invoked after the global hook vetos, but they cannot "un-veto" the write.
## invoked after the global hook vetoes, but they cannot "un-veto" the write.
global log_stream_policy: Log::StreamPolicyHook;
}
@ -685,7 +685,7 @@ function Log::rotation_format_func(ri: Log::RotationFmtInfo): Log::RotationPath
# The reason for branching here is historical:
# the default format path before the intro of Log::rotation_format_func
# always separated the path from open-time using a '-', but ASCII's
# default postprocessor chose to rename using a '.' separaor. It also
# default postprocessor chose to rename using a '.' separator. It also
# chose a different date format.
if ( ri$postprocessor == __default_rotation_postprocessor &&
ri$writer == WRITER_ASCII &&

View file

@ -53,7 +53,7 @@ function add_rule(r: Rule) : string
return add_rule_impl(r);
else
{
# We sync rule entities accross the cluster, so we
# We sync rule entities across the cluster, so we
# actually can test if the rule already exists. If yes,
# refuse insertion already at the node.

View file

@ -55,7 +55,7 @@ export {
##
## t: How long to whitelist it, with 0 being indefinitely.
##
## location: An optional string describing whitelist was triddered.
## location: An optional string describing whitelist was triggered.
##
## Returns: The id of the inserted rule on success and zero on failure.
global whitelist_address: function(a: addr, t: interval, location: string &default="") : string;
@ -66,7 +66,7 @@ export {
##
## t: How long to whitelist it, with 0 being indefinitely.
##
## location: An optional string describing whitelist was triddered.
## location: An optional string describing whitelist was triggered.
##
## Returns: The id of the inserted rule on success and zero on failure.
global whitelist_subnet: function(s: subnet, t: interval, location: string &default="") : string;
@ -904,7 +904,7 @@ function rule_expire_impl(r: Rule, p: PluginState) &priority=-5 &is_used
if ( p$_id in rule$_no_expire_plugins )
{
# in this case - don't log anything, just remove the plugin from the rule
# and cleaup
# and cleanup
delete rule$_active_plugin_ids[p$_id];
delete rule$_no_expire_plugins[p$_id];
rule_cleanup(rule);
@ -1047,7 +1047,7 @@ function rule_error_impl(r: Rule, p: PluginState, msg: string &default="") &is_u
else
{
# error during insertion. Meh. If we are the only plugin, remove the rule again.
# Otherwhise - keep it, minus us.
# Otherwise - keep it, minus us.
delete rule$_plugin_ids[p$_id];
if ( |rule$_plugin_ids| == 0 )
{

View file

@ -54,7 +54,7 @@ export {
init: function(state: PluginState) &optional;
## One-time finalization function called when a plugin is shutdown; no further
## functions will be called afterwords.
## functions will be called afterwards.
done: function(state: PluginState) &optional;
## Implements the add_rule() operation. If the plugin accepts the rule,
@ -74,7 +74,7 @@ export {
## Table for a plugin to store instance-specific configuration information.
##
## Note, it would be nicer to pass the Plugin instance to all the below, instead
## of this state table. However Zeek's type resolver has trouble with refering to a
## of this state table. However Zeek's type resolver has trouble with referring to a
## record type from inside itself.
redef record PluginState += {
## The plugin that the state belongs to. (Defined separately

View file

@ -43,7 +43,7 @@ export {
ty: EntityType; ##< Type of entity.
conn: conn_id &optional; ##< Used with :zeek:enum:`NetControl::CONNECTION`.
flow: Flow &optional; ##< Used with :zeek:enum:`NetControl::FLOW`.
ip: subnet &optional; ##< Used with :zeek:enum:`NetControl::ADDRESS` to specifiy a CIDR subnet.
ip: subnet &optional; ##< Used with :zeek:enum:`NetControl::ADDRESS` to specify a CIDR subnet.
mac: string &optional; ##< Used with :zeek:enum:`NetControl::MAC`.
};

View file

@ -44,7 +44,7 @@ function want_pp() : bool
&& (mail_dest != "" || mail_dest_pretty_printed != ""));
}
# Opens and intializes the output file.
# Opens and initializes the output file.
function pp_open()
{
if ( pp_alarms_open )

View file

@ -276,7 +276,7 @@ export {
## This event is generated when a notice begins to be suppressed.
##
## ts: time indicating then when the notice to be suppressed occured.
## ts: time indicating then when the notice to be suppressed occurred.
##
## suppress_for: length of time that this notice should be suppressed.
##
@ -288,7 +288,7 @@ export {
## This is an internal event that is used to broadcast the begin_suppression
## event over a cluster.
##
## ts: time indicating then when the notice to be suppressed occured.
## ts: time indicating then when the notice to be suppressed occurred.
##
## suppress_for: length of time that this notice should be suppressed.
##

View file

@ -108,7 +108,7 @@ function unregister_controller(controller: Controller)
function lookup_controller(name: string): vector of Controller
{
# we only run the on the manager. Otherwhise we don't have a mapping or state -> return empty
# we only run the on the manager. Otherwise we don't have a mapping or state -> return empty
if ( Cluster::local_node_type() != Cluster::MANAGER )
return vector();

View file

@ -83,7 +83,7 @@ export {
const IP_IPIP = 0x04;
# Internet Stream Protocol (RFC1190;RFC1819)
const IP_ST = 0x05;
# Tansmission Control Protocol (RFC793)
# Transmission Control Protocol (RFC793)
const IP_TCP = 0x06;
# Core-based trees (RFC2189)
const IP_CBT = 0x07;
@ -126,7 +126,7 @@ export {
const INVALID_COOKIE = 0x7fffffffffffffff;
# Openflow physical port definitions
## Send the packet out the input port. This
## virual port must be explicitly used in
## virtual port must be explicitly used in
## order to send back out of the input port.
const OFPP_IN_PORT = 0xfffffff8;
## Perform actions in flow table.

View file

@ -102,7 +102,7 @@ export {
hard_timeout: count &default=0;
## Priority level of flow entry.
priority: count &default=0;
## For OFPFC_DELETE* commands, require matching entried to include
## For OFPFC_DELETE* commands, require matching entry to include
## this as an output port/group. OFPP_ANY/OFPG_ANY means no restrictions.
out_port: count &optional;
out_group: count &optional;

View file

@ -227,7 +227,7 @@ global stats_keys: table[string] of set[Key] &read_expire=1min
return 0secs;
};
# This variable is maintained by manager nodes to track how many "dones" they
# This variable is maintained by manager nodes to track how many "nodes" they
# collected per collection unique id. Once the number of results for a uid
# matches the number of peer nodes that results should be coming from, the
# result is written out and deleted from here.

View file

@ -74,7 +74,7 @@ export {
##
## reqid: an arbitrary string that will be directly echoed in the response
##
## node: the name of the node to destory or empty string to mean "all
## node: the name of the node to destroy or empty string to mean "all
## nodes".
global SupervisorControl::destroy_request: event(reqid: string, node: string);

View file

@ -157,7 +157,7 @@ type PacketSource: record {
path: string;
## The data link-layer type of the packet source.
link_type: int;
## The netmask assoicated with the source or ``NETMASK_UNKNOWN``.
## The netmask associated with the source or ``NETMASK_UNKNOWN``.
netmask: count;
};
@ -195,7 +195,7 @@ type flow_id : record {
src_h: addr; ##< The source IP address.
src_p: port; ##< The source port number.
dst_h: addr; ##< The destination IP address.
dst_p: port; ##< The desintation port number.
dst_p: port; ##< The destination port number.
} &log;
## Specifics about an ICMP conversation. ICMP events typically pass this in
@ -1172,7 +1172,7 @@ const tcp_max_above_hole_without_any_acks = 16384 &redef;
const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024 &redef;
## Number of TCP segments to buffer beyond what's been acknowledged already
## to detect retransmission inconsistencies. Zero disables any additonal
## to detect retransmission inconsistencies. Zero disables any additional
## buffering.
const tcp_max_old_segments = 0 &redef;
@ -2502,7 +2502,7 @@ export {
fh: string; ##< File handle to write to.
offset: count; ##< Offset in file.
size: count; ##< Number of bytes to write.
stable: stable_how_t; ##< How and when data is commited.
stable: stable_how_t; ##< How and when data is committed.
data: string &optional; ##< The actual data; not implemented yet.
};
@ -2769,7 +2769,7 @@ export {
## If set, requests and identify level token
negotiate_identify : bool;
## If set, requests usage of NTLM v2 session security
## Note: NTML v2 session security is actually NTLM v1
## Note: NTLM v2 session security is actually NTLM v1
negotiate_extended_sessionsecurity : bool;
## If set, TargetName must be a server name
target_type_server : bool;
@ -2957,7 +2957,7 @@ export {
unicode : bool;
## The server supports large files with 64 bit offsets
large_files : bool;
## The server supports the SMBs particilar to the NT LM 0.12 dialect. Implies nt_find.
## The server supports the SMBs particular to the NT LM 0.12 dialect. Implies nt_find.
nt_smbs : bool;
## The server supports remote admin API requests via DCE-RPC
@ -3245,7 +3245,7 @@ export {
info_level : count;
## Specify whether to search for directories or files
search_storage_type : count;
## The string to serch for (note: may contain wildcards)
## The string to search for (note: may contain wildcards)
file_name : string;
};
@ -3443,7 +3443,7 @@ export {
type SMB2::NegotiateContextValues: vector of SMB2::NegotiateContextValue;
## The response to an SMB2 *negotiate* request, which is used by tghe client to notify the server
## The response to an SMB2 *negotiate* request, which is used by the client to notify the server
## what dialects of the SMB2 protocol the client understands.
##
## For more information, see MS-SMB2:2.2.4
@ -3569,7 +3569,7 @@ export {
## This information class is used to query or set extended attribute (EA) information for a file.
##
## For more infomation, see MS-SMB2:2.2.39 and MS-FSCC:2.4.15
## For more information, see MS-SMB2:2.2.39 and MS-FSCC:2.4.15
##
type SMB2::FileEA: record {
## Specifies the extended attribute name
@ -3580,7 +3580,7 @@ export {
## A vector of extended attribute (EA) information for a file.
##
## For more infomation, see MS-SMB2:2.2.39 and MS-FSCC:2.4.15
## For more information, see MS-SMB2:2.2.39 and MS-FSCC:2.4.15
##
type SMB2::FileEAs: vector of SMB2::FileEA;
@ -3978,7 +3978,7 @@ type dns_svcb_rr: record {
# DNS answer types.
#
# .. zeek:see:: dns_answerr
# .. zeek:see:: dns_answer
#
# todo:: use enum to make them autodoc'able
const DNS_QUERY = 0; ##< A query. This shouldn't occur, just for completeness.
@ -4101,7 +4101,7 @@ type PE::DOSHeader: record {
num_reloc_items : count;
## Number of paragraphs in the header.
header_in_paragraphs : count;
## Number of paragraps of additional memory that the program will need.
## Number of paragraphs of additional memory that the program will need.
min_extra_paragraphs : count;
## Maximum number of paragraphs of additional memory.
max_extra_paragraphs : count;
@ -4180,7 +4180,7 @@ type PE::OptionalHeader: record {
major_subsys_version : count;
## The minor version of the subsystem required to run this file.
minor_subsys_version : count;
## The size (in bytes) of the iamge as the image is loaded in memory.
## The size (in bytes) of the image as the image is loaded in memory.
size_of_image : count;
## The size (in bytes) of the headers, rounded up to file_alignment.
size_of_headers : count;
@ -4219,7 +4219,7 @@ type PE::SectionHeader: record {
ptr_to_line_nums : count;
## The number of relocation entries for the section.
num_of_relocs : count;
## The number of line-number entrie for the section.
## The number of line-number entries for the section.
num_of_line_nums : count;
## Bit-flags that describe the characteristics of the section.
characteristics : set[count];
@ -4720,7 +4720,7 @@ export {
## The message type (20 for SAFE_MSG)
msg_type : count;
## The application-specific data that is being passed
## from the sender to the reciever
## from the sender to the receiver
data : string;
## Current time from the sender of the message
timestamp : time &optional;
@ -5178,7 +5178,7 @@ module NTP;
export {
## NTP standard message as defined in :rfc:`5905` for modes 1-5
## This record contains the standard fields used by the NTP protocol
## for standard syncronization operations.
## for standard synchronization operations.
type NTP::StandardMessage: record {
## This value mainly identifies the type of server (primary server,
## secondary server, etc.). Possible values, as in :rfc:`5905`, are:
@ -5294,7 +5294,7 @@ export {
sequence: count;
## The number of the implementation this request code
## is defined by. An implementation number of zero is used
## for requst codes/data formats which all implementations
## for request codes/data formats which all implementations
## agree on. Implementation number 255 is reserved (for
## extensions, in case we run out).
implementation: count;
@ -5331,7 +5331,7 @@ export {
## * 6 - NTP control message
## * 7 - reserved for private use
mode: count;
## If mode 1-5, the standard fields for syncronization operations are
## If mode 1-5, the standard fields for synchronization operations are
## here. See :rfc:`5905`
std_msg: NTP::StandardMessage &optional;
## If mode 6, the fields for control operations are here.
@ -5490,7 +5490,7 @@ export {
const flowbuffer_capacity_max = 10 * 1024 * 1024 &redef;
## The initial capacity, in bytes, that will be allocated to the BinPAC
## flowbuffer of a given connection/analyzer. If the buffer buffer is
## flowbuffer of a given connection/analyzer. If the buffer is
## later contracted, its capacity is also reduced to this size.
const flowbuffer_capacity_min = 512 &redef;

View file

@ -53,7 +53,7 @@ export {
##
## threshold: Threshold in bytes to remove.
##
## is_orig: If true, threshold is removed for packets from originator, otherwhise for packets from responder.
## is_orig: If true, threshold is removed for packets from originator, otherwise for packets from responder.
##
## Returns: T on success, F on failure.
global delete_bytes_threshold: function(c: connection, threshold: count, is_orig: bool): bool;

View file

@ -2733,7 +2733,7 @@ export {
["e33c0cc4-0482-101a-bc0c-02608c6ba218",0x05] = "I_nsi_entry_object_inq_done",
["e33c0cc4-0482-101a-bc0c-02608c6ba218",0x06] = "I_nsi_entry_object_inq_begin",
# lsacap - MSDN Ref: Central Access Policy Identifier Retreival Protocol [ms-capr]
# lsacap - MSDN Ref: Central Access Policy Identifier Retrieval Protocol [ms-capr]
["afc07e2e-311c-4435-808c-c483ffeec7c9",0x00] = "LsarGetAvailableCAPIDs",
# NetEventForwarder - MSDN Ref: Live Remote Event Capture Protocol [ms-lrec]

View file

@ -86,7 +86,7 @@ export {
client_chaddr: string &optional;
};
## The maximum amount of time that a transation ID will be watched
## The maximum amount of time that a transaction ID will be watched
## for to try and tie messages together into a single DHCP
## transaction narrative.
option DHCP::max_txid_watch_time = 30secs;

View file

@ -47,7 +47,7 @@ function describe_file(f: fa_file): string
# we do not know when this function is called (hence, if the data structures
# are already populated).
#
# Just return a bit of our connection information and hope that that is good enough.
# Just return a bit of our connection information and hope that is good enough.
for ( _, c in f$conns )
{
if ( c?$krb )

View file

@ -54,7 +54,7 @@ export {
option ignored_errors: set[string] = {
# This will significantly increase the noisiness of the log.
# However, one attack is to iterate over principals, looking
# for ones that don't require preauth, and then performn
# for ones that don't require preauth, and then perform
# an offline attack on that ticket. To detect that attack,
# log NEEDED_PREAUTH.
"NEEDED_PREAUTH",

View file

@ -21,7 +21,7 @@ export {
## This is typically a username.
cookie: string &log &optional;
## Status result for the connection. It's a mix between
## RDP negotation failure messages and GCC server create
## RDP negotiation failure messages and GCC server create
## response messages.
result: string &log &optional;
## Security protocol chosen by the server.

View file

@ -284,7 +284,7 @@ event smb2_file_delete(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, de
if ( ! delete_pending )
{
# This is weird beause it would mean that someone didn't
# This is weird because it would mean that someone didn't
# set the delete bit in a delete request.
return;
}

View file

@ -18,7 +18,7 @@ export {
## The connection's 5-tuple of addresses/ports (ports inherently
## include transport protocol information)
id: conn_id &log;
## The amount of time between the first packet beloning to
## The amount of time between the first packet belonging to
## the SNMP session and the latest one seen.
duration: interval &log &default=0secs;
## The version of SNMP being used.

View file

@ -17,7 +17,7 @@ export {
## Time when the proxy connection was first detected.
ts: time &log;
## Unique ID for the tunnel - may correspond to connection uid
## or be non-existent.
## or be nonexistent.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;

View file

@ -28,7 +28,7 @@ export {
version: count &log &optional;
## Authentication result (T=success, F=failure, unset=unknown)
auth_success: bool &log &optional;
## The number of authentication attemps we observed. There's always
## The number of authentication attempts we observed. There's always
## at least one, since some servers might support no authentication at all.
## It's important to note that not all of these are failures, since
## some servers require two-factor auth (e.g. password AND pubkey)
@ -125,7 +125,7 @@ redef record Info += {
# Store capabilities from the first host for
# comparison with the second (internal use)
capabilities: Capabilities &optional;
## Analzyer ID
## Analyzer ID
analyzer_id: count &optional;
};

View file

@ -6,7 +6,7 @@
module SSL;
export {
## Set this to true to includd the server certificate subject and
## Set this to true to include the server certificate subject and
## issuer from the SSL log file. This information is still available
## in x509.log.
const log_include_server_certificate_subject_issuer = F &redef;
@ -80,7 +80,7 @@ function describe_file(f: fa_file): string
# we do not know when this function is called (hence, if the data structures
# are already populated).
#
# Just return a bit of our connection information and hope that that is good enough.
# Just return a bit of our connection information and hope that is good enough.
for ( _, c in f$conns )
{
if ( c?$ssl )

View file

@ -32,7 +32,7 @@ event Dir::monitor_ev(dir: string, last_files: set[string],
{
if ( result$exit_code != 0 )
{
Reporter::warning(fmt("Requested monitoring of non-existent directory (%s).", dir));
Reporter::warning(fmt("Requested monitoring of nonexistent directory (%s).", dir));
return;
}

View file

@ -71,7 +71,7 @@ event Control::configuration_update_request()
# Don't need to do anything in particular here, it's just indicating that
# the configuration is going to be updated. This event could be handled
# by other scripts if they need to do some ancilliary processing if
# by other scripts if they need to do some ancillary processing if
# redef-able consts are modified at runtime.
event Control::configuration_update_response();
}

View file

@ -169,7 +169,7 @@ export {
## Response to a :zeek:see:`Management::Agent::API::restart_request`
## event. The agent sends this back to the controller when the
## Supervisor has restarted all nodes affected, or a timoeut occurs.
## Supervisor has restarted all nodes affected, or a timeout occurs.
##
## reqid: the request identifier used in the request event.
##

View file

@ -49,7 +49,7 @@ export {
requests: set[string] &default=set();
};
## Request state for restart requests, tracking eceived responses.
## Request state for restart requests, tracking received responses.
type RestartState: record {
## Request state for every node the agent asks the Supervisor
## to restart.
@ -820,7 +820,7 @@ event Management::Agent::API::node_dispatch_request(reqid: string, action: vecto
res = Management::Result($reqid=reqid,
$instance = Management::Agent::get_name(),
$success = F,
$error = fmt("cluster node %s not in runnning state", node),
$error = fmt("cluster node %s not in running state", node),
$node=node);
req$results += res;
}

View file

@ -17,12 +17,12 @@ export {
## running.
const role = Management::NONE &redef;
## The fallback listen address if more specific adddresses, such as
## The fallback listen address if more specific addresses, such as
## the controller's :zeek:see:`Management::Controller::listen_address`
## remains empty. Unless redefined, this listens on all interfaces.
const default_address = "0.0.0.0" &redef;
## The retry interval for Broker connnects. Defaults to a more
## The retry interval for Broker connects. Defaults to a more
## aggressive value compared to Broker's 30s.
const connect_retry = 1sec &redef;

View file

@ -851,7 +851,7 @@ event Management::Agent::API::deploy_response(reqid: string, results: Management
delete req$deploy_state$requests[areq$id];
# If there are any pending requests to the agents, we're
# done: we respond once every agent has responed (or we time out).
# done: we respond once every agent has responded (or we time out).
if ( |req$deploy_state$requests| > 0 )
return;

View file

@ -15,7 +15,7 @@ export {
## :zeek:see:`Management::Request::finish` clears the state when
## a corresponding response event comes in, or the state times out.
type Request: record {
## Each request has a hopfully unique ID provided by the requester.
## Each request has a hopefully unique ID provided by the requester.
id: string;
## For requests that result based upon another request (such as when
@ -51,7 +51,7 @@ export {
## ensure response to the client.
const timeout_interval = 10sec &redef;
## A token request that serves as a null/nonexistant request.
## A token request that serves as a null/nonexistent request.
global null_req = Request($id="", $finished=T);
## This function establishes request state.
@ -71,7 +71,7 @@ export {
## its internal state. When the request does not exist, this does
## nothing.
##
## reqid: the ID of the request state to releaase.
## reqid: the ID of the request state to release.
##
global finish: function(reqid: string): bool;

View file

@ -51,7 +51,7 @@ export {
## The record type that is used for representing and logging
type CatchReleaseInfo: record {
## The absolute time indicating when the action for this log-line occured.
## The absolute time indicating when the action for this log-line occurred.
ts: time &log;
## The rule id that this log line refers to.
rule_id: string &log &optional;
@ -59,7 +59,7 @@ export {
ip: addr &log;
## The action that was taken in this log-line.
action: CatchReleaseActions &log;
## The current block_interaval (for how long the address is blocked).
## The current block_interval (for how long the address is blocked).
block_interval: interval &log &optional;
## The current watch_interval (for how long the address will be watched and re-block if it reappears).
watch_interval: interval &log &optional;

View file

@ -17,7 +17,7 @@ export {
}
# This is inefficient; however, since this script only executes once on
# startup, this shold be ok.
# startup, this should be ok.
function get_indent(level: count): string
{
local out = "";

View file

@ -53,7 +53,7 @@ event http_message_done(c: connection, is_orig: bool, stat: http_message_stat)
# If a Flash was detected, it has to be logged considering the user agent.
if ( is_orig && c$http?$flash_version )
{
# AdobeAIR contains a seperate Flash, which should be emphasized.
# AdobeAIR contains a separate Flash, which should be emphasized.
# Note: We assume that the user agent header was not reset by the app.
if( c$http?$user_agent )
{

View file

@ -33,7 +33,7 @@ export {
## keys: key material
global add_keys: event(client_random: string, keys: string);
## This event can be triggered, e.g., via Broker to add known secrets to the TLS secret datbase.
## This event can be triggered, e.g., via Broker to add known secrets to the TLS secret database.
##
## client_random: client random for which the secret is set
##

View file

@ -18,7 +18,7 @@ export {
};
}
# Do not disable analyzers after detection - otherwhise we will not notice
# Do not disable analyzers after detection - otherwise we will not notice
# encrypted attacks.
redef SSL::disable_analyzer_after_detection=F;

View file

@ -6,7 +6,7 @@
@load base/files/x509
redef record X509::Info += {
## Base64 endoded X.509 certificate.
## Base64 encoded X.509 certificate.
cert: string &log &optional;
};

View file

@ -1,6 +1,6 @@
##! Perform validation of stapled OCSP responses.
#!
#! Note: this _only_ performs validation of stapled OCSP responsed. It does
#! Note: this _only_ performs validation of stapled OCSP responded. It does
#! not validate OCSP responses that are retrieved via HTTP, because we do not
#! have a mapping to certificates.

View file

@ -30,7 +30,7 @@ export {
## Warn if the DH key length is smaller than the certificate key length. This is
## potentially unsafe because it gives a wrong impression of safety due to the
## certificate key length. However, it is very common and cannot be avoided in some
## settings (e.g. with old jave clients).
## settings (e.g. with old java clients).
option notify_dh_length_shorter_cert_length = T;
## Warn if a server negotiates a SSL session with a protocol version smaller than

View file

@ -66,7 +66,7 @@ redef enum Notice::Type += {
# All redefs are automatically tracked. Comments of the "##" form can be use
# to further document it, but in some cases, like here, they wouldn't be
# ading any interesting information that's not implicit.
# adding any interesting information that's not implicit.
redef enum Log::ID += { LOG };
# Only identifiers declared in an export section will show up in generated docs.