Fix typos and formatting in the sumstats docs

This commit is contained in:
Daniel Thayer 2013-10-10 23:15:12 -05:00
parent 3812716ace
commit 0753853726
6 changed files with 36 additions and 34 deletions

View file

@ -1,6 +1,6 @@
##! This implements transparent cluster support for the SumStats framework.
##! Do not load this file directly. It's only meant to be loaded automatically
##! and will be depending on if the cluster framework has been enabled.
##! and will be if the cluster framework has been enabled.
##! The goal of this script is to make sumstats calculation completely and
##! transparently automated when running on a cluster.
@ -10,31 +10,32 @@
module SumStats;
export {
## The percent of the full threshold value that needs to be met on a single worker
## for that worker to send the value to its manager in order for it to request a
## global view for that value. There is no requirement that the manager requests
## a global view for the key since it may opt not to if it requested a global view
## for the key recently.
## The percent of the full threshold value that needs to be met on a
## single worker for that worker to send the value to its manager in
## order for it to request a global view for that value. There is no
## requirement that the manager requests a global view for the key since
## it may opt not to if it requested a global view for the key recently.
const cluster_request_global_view_percent = 0.2 &redef;
## This is to deal with intermediate update overload. A manager will only allow
## this many intermediate update requests to the workers to be inflight at any
## given time. Requested intermediate updates are currently thrown out and not
## performed. In practice this should hopefully have a minimal effect.
## This is to deal with intermediate update overload. A manager will
## only allow this many intermediate update requests to the workers to
## be inflight at any given time. Requested intermediate updates are
## currently thrown out and not performed. In practice this should
## hopefully have a minimal effect.
const max_outstanding_global_views = 10 &redef;
## Event sent by the manager in a cluster to initiate the collection of values for
## a sumstat.
## Event sent by the manager in a cluster to initiate the collection of
## values for a sumstat.
global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool);
## Event sent by nodes that are collecting sumstats after receiving a request for
## the sumstat from the manager.
## Event sent by nodes that are collecting sumstats after receiving a
## request for the sumstat from the manager.
#global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool);
## This event is sent by the manager in a cluster to initiate the collection of
## a single key value from a sumstat. It's typically used to get intermediate
## updates before the break interval triggers to speed detection of a value
## crossing a threshold.
## This event is sent by the manager in a cluster to initiate the
## collection of a single key value from a sumstat. It's typically used
## to get intermediate updates before the break interval triggers to
## speed detection of a value crossing a threshold.
global cluster_get_result: event(uid: string, ss_name: string, key: Key, cleanup: bool);
## This event is sent by nodes in response to a
@ -43,7 +44,7 @@ export {
## This is sent by workers to indicate that they crossed the percent
## of the current threshold by the percentage defined globally in
## :bro:id:`SumStats::cluster_request_global_view_percent`
## :bro:id:`SumStats::cluster_request_global_view_percent`.
global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key);
## This event is scheduled internally on workers to send result chunks.

View file

@ -51,8 +51,8 @@ export {
## would like to accept the data being inserted.
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
## A function to normalize the key. This can be used to aggregate or
## normalize the entire key.
## A function to normalize the key. This can be used to
## aggregate or normalize the entire key.
normalize_key: function(key: SumStats::Key): Key &optional;
};
@ -91,28 +91,28 @@ export {
name: string;
## The interval at which this filter should be "broken"
## and the '$epoch_result' callback called. The
## and the *epoch_result* callback called. The
## results are also reset at this time so any threshold
## based detection needs to be set to a
## value that should be expected to happen within
## this epoch.
epoch: interval;
## The reducers for the SumStat
## The reducers for the SumStat.
reducers: set[Reducer];
## Provide a function to calculate a value from the
## :bro:see:`SumStats::Result` structure which will be used
## for thresholding.
## This is required if a $threshold value is given.
## This is required if a *threshold* value is given.
threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional;
## The threshold value for calling the
## $threshold_crossed callback.
## *threshold_crossed* callback.
threshold: double &optional;
## A series of thresholds for calling the
## $threshold_crossed callback.
## *threshold_crossed* callback.
threshold_series: vector of double &optional;
## A callback that is called when a threshold is crossed.
@ -124,7 +124,7 @@ export {
epoch_result: function(ts: time, key: SumStats::Key, result: SumStats::Result) &optional;
## A callback that will be called when a single collection
## interval is completed. The ts value will be the time of
## interval is completed. The *ts* value will be the time of
## when the collection started.
epoch_finished: function(ts:time) &optional;
};

View file

@ -5,12 +5,12 @@ module SumStats;
export {
redef enum Calculation += {
## Keep last X observations in a queue
## Keep last X observations in a queue.
LAST
};
redef record Reducer += {
## number of elements to keep.
## Number of elements to keep.
num_last_elements: count &default=0;
};

View file

@ -4,7 +4,8 @@ module SumStats;
export {
redef enum Calculation += {
## Get uniquely distributed random samples from the observation stream.
## Get uniquely distributed random samples from the observation
## stream.
SAMPLE
};
@ -24,8 +25,8 @@ export {
redef record ResultVal += {
# Internal use only. This is not meant to be publically available
# and just a copy of num_samples from the Reducer. Needed for availability
# in the compose hook.
# and just a copy of num_samples from the Reducer. Needed for
# availability in the compose hook.
num_samples: count &default=0;
};

View file

@ -4,7 +4,7 @@ module SumStats;
export {
redef record Reducer += {
## number of elements to keep in the top-k list
## Number of elements to keep in the top-k list.
topk_size: count &default=500;
};

View file

@ -28,7 +28,7 @@ redef record ResultVal += {
# Internal use only. This is not meant to be publically available
# because we don't want to trust that we can inspect the values
# since we will like move to a probalistic data structure in the future.
# since we will likely move to a probabilistic data structure in the future.
# TODO: in the future this will optionally be a hyperloglog structure
unique_vals: set[Observation] &optional;
};