diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index 0e398ac257..476d314523 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -1,6 +1,6 @@ ##! The Bro logging interface. ##! -##! See :doc:`/frameworks/logging` for a introduction to Bro's +##! See :doc:`/frameworks/logging` for an introduction to Bro's ##! logging framework. module Log; @@ -27,7 +27,7 @@ export { const set_separator = "," &redef; ## String to use for empty fields. This should be different from - ## *unset_field* to make the output non-ambigious. + ## *unset_field* to make the output unambiguous. ## Can be overwritten by individual writers. const empty_field = "(empty)" &redef; @@ -41,8 +41,8 @@ export { columns: any; ## Event that will be raised once for each log entry. - ## The event receives a single same parameter, an instance of type - ## ``columns``. + ## The event receives a single same parameter, an instance of + ## type ``columns``. ev: any &optional; }; @@ -114,7 +114,7 @@ export { ## ## The specific interpretation of the string is up to ## the used writer, and may for example be the destination - ## file name. Generally, filenames are expected to given + ## file name. Generally, filenames are expected to be given ## without any extensions; writers will add appropiate ## extensions automatically. ## @@ -126,34 +126,36 @@ export { path: string &optional; ## A function returning the output path for recording entries - ## matching this filter. This is similar to ``path`` yet allows + ## matching this filter. This is similar to *path* yet allows ## to compute the string dynamically. It is ok to return ## different strings for separate calls, but be careful: it's ## easy to flood the disk by returning a new string for each - ## connection ... + ## connection. ## ## id: The ID associated with the log stream. ## ## path: A suggested path value, which may be either the filter's - ## ``path`` if defined, else a previous result from the function. - ## If no ``path`` is defined for the filter, then the first call - ## to the function will contain an empty string. + ## ``path`` if defined, else a previous result from the + ## function. If no ``path`` is defined for the filter, + ## then the first call to the function will contain an + ## empty string. ## ## rec: An instance of the streams's ``columns`` type with its ## fields set to the values to be logged. ## - ## Returns: The path to be used for the filter, which will be subject - ## to the same automatic correction rules as the *path* - ## field of :bro:type:`Log::Filter` in the case of conflicts - ## with other filters trying to use the same writer/path pair. + ## Returns: The path to be used for the filter, which will be + ## subject to the same automatic correction rules as + ## the *path* field of :bro:type:`Log::Filter` in the + ## case of conflicts with other filters trying to use + ## the same writer/path pair. path_func: function(id: ID, path: string, rec: any): string &optional; ## Subset of column names to record. If not given, all ## columns are recorded. include: set[string] &optional; - ## Subset of column names to exclude from recording. If not given, - ## all columns are recorded. + ## Subset of column names to exclude from recording. If not + ## given, all columns are recorded. exclude: set[string] &optional; ## If true, entries are recorded locally. @@ -229,7 +231,7 @@ export { ## ## filter: A record describing the desired logging parameters. ## - ## Returns: True if the filter was sucessfully added, false if + ## Returns: True if the filter was successfully added, false if ## the filter was not added or the *filter* argument was not ## the correct type. ## @@ -277,7 +279,7 @@ export { ## ## Returns: True if the stream was found and no error occurred in writing ## to it or if the stream was disabled and nothing was written. - ## False if the stream was was not found, or the *columns* + ## False if the stream was not found, or the *columns* ## argument did not match what the stream was initially defined ## to handle, or one of the stream's filters has an invalid ## ``path_func``. @@ -286,8 +288,8 @@ export { global write: function(id: ID, columns: any) : bool; ## Sets the buffering status for all the writers of a given logging stream. - ## A given writer implementation may or may not support buffering and if it - ## doesn't then toggling buffering with this function has no effect. + ## A given writer implementation may or may not support buffering and if + ## it doesn't then toggling buffering with this function has no effect. ## ## id: The ID associated with a logging stream for which to ## enable/disable buffering. @@ -347,7 +349,7 @@ export { ## ## npath: The new path of the file (after already being rotated/processed ## by writer-specific postprocessor as defined in - ## :bro:id:`Log::default_rotation_postprocessors`. + ## :bro:id:`Log::default_rotation_postprocessors`). ## ## Returns: True when :bro:id:`Log::default_rotation_postprocessor_cmd` ## is empty or the system command given by it has been invoked diff --git a/scripts/base/frameworks/logging/postprocessors/scp.bro b/scripts/base/frameworks/logging/postprocessors/scp.bro index 3aadc5bbf3..d63520abe6 100644 --- a/scripts/base/frameworks/logging/postprocessors/scp.bro +++ b/scripts/base/frameworks/logging/postprocessors/scp.bro @@ -16,9 +16,9 @@ module Log; export { - ## Secure-copies the rotated-log to all the remote hosts + ## Secure-copies the rotated log to all the remote hosts ## defined in :bro:id:`Log::scp_destinations` and then deletes - ## the local copy of the rotated-log. It's not active when + ## the local copy of the rotated log. It's not active when ## reading from trace files. ## ## info: A record holding meta-information about the log file to be @@ -42,9 +42,9 @@ export { }; ## A table indexed by a particular log writer and filter path, that yields - ## a set remote destinations. The :bro:id:`Log::scp_postprocessor` + ## a set of remote destinations. The :bro:id:`Log::scp_postprocessor` ## function queries this table upon log rotation and performs a secure - ## copy of the rotated-log to each destination in the set. This + ## copy of the rotated log to each destination in the set. This ## table can be modified at run-time. global scp_destinations: table[Writer, string] of set[SCPDestination]; diff --git a/scripts/base/frameworks/logging/postprocessors/sftp.bro b/scripts/base/frameworks/logging/postprocessors/sftp.bro index 5a31853063..b7f6827026 100644 --- a/scripts/base/frameworks/logging/postprocessors/sftp.bro +++ b/scripts/base/frameworks/logging/postprocessors/sftp.bro @@ -16,9 +16,9 @@ module Log; export { - ## Securely transfers the rotated-log to all the remote hosts + ## Securely transfers the rotated log to all the remote hosts ## defined in :bro:id:`Log::sftp_destinations` and then deletes - ## the local copy of the rotated-log. It's not active when + ## the local copy of the rotated log. It's not active when ## reading from trace files. ## ## info: A record holding meta-information about the log file to be @@ -42,9 +42,9 @@ export { }; ## A table indexed by a particular log writer and filter path, that yields - ## a set remote destinations. The :bro:id:`Log::sftp_postprocessor` + ## a set of remote destinations. The :bro:id:`Log::sftp_postprocessor` ## function queries this table upon log rotation and performs a secure - ## transfer of the rotated-log to each destination in the set. This + ## transfer of the rotated log to each destination in the set. This ## table can be modified at run-time. global sftp_destinations: table[Writer, string] of set[SFTPDestination]; diff --git a/scripts/base/frameworks/logging/writers/ascii.bro b/scripts/base/frameworks/logging/writers/ascii.bro index da1cfbde87..e510874951 100644 --- a/scripts/base/frameworks/logging/writers/ascii.bro +++ b/scripts/base/frameworks/logging/writers/ascii.bro @@ -2,10 +2,10 @@ ##! to tweak the output format of ASCII logs. ##! ##! The ASCII writer supports currently one writer-specific filter option via -##! ``config``: setting ``tsv`` to the string ``T`` turns the output into into -##! "tab-separated-value" mode where only a single header row with the column names -##! is printed out as meta information, with no "# fields" prepended; no other meta -##! data gets included in that mode. +##! ``config``: setting ``tsv`` to the string ``T`` turns the output into +##! "tab-separated-value" mode where only a single header row with the column +##! names is printed out as meta information, with no "# fields" prepended; no +##! other meta data gets included in that mode. ##! ##! Example filter using this:: ##! @@ -19,9 +19,9 @@ export { ## into files. This is primarily for debugging purposes. const output_to_stdout = F &redef; - ## If true, include lines with log meta information such as column names with - ## types, the values of ASCII logging options that in use, and the time when the - ## file was opened and closes (the latter at the end). + ## If true, include lines with log meta information such as column names + ## with types, the values of ASCII logging options that are in use, and + ## the time when the file was opened and closed (the latter at the end). const include_meta = T &redef; ## Prefix for lines with meta information. @@ -34,7 +34,7 @@ export { const set_separator = Log::set_separator &redef; ## String to use for empty fields. This should be different from - ## *unset_field* to make the output non-ambigious. + ## *unset_field* to make the output unambiguous. const empty_field = Log::empty_field &redef; ## String to use for an unset &optional field. diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro index e85d9c8c49..0b7b2f5a03 100644 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -6,16 +6,16 @@ export { ## Compression to use with the DS output file. Options are: ## ## 'none' -- No compression. - ## 'lzf' -- LZF compression. Very quick, but leads to larger output files. - ## 'lzo' -- LZO compression. Very fast decompression times. - ## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output. - ## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output. + ## 'lzf' -- LZF compression (very quick, but leads to larger output files). + ## 'lzo' -- LZO compression (very fast decompression times). + ## 'gz' -- GZIP compression (slower than LZF, but also produces smaller output). + ## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output). const compression = "gz" &redef; ## The extent buffer size. - ## Larger values here lead to better compression and more efficient writes, but - ## also increase the lag between the time events are received and the time they - ## are actually written to disk. + ## Larger values here lead to better compression and more efficient writes, + ## but also increase the lag between the time events are received and + ## the time they are actually written to disk. const extent_size = 65536 &redef; ## Should we dump the XML schema we use for this DS file to disk? @@ -43,8 +43,8 @@ export { } # Default function to postprocess a rotated DataSeries log file. It moves the -# rotated file to a new name that includes a timestamp with the opening time, and -# then runs the writer's default postprocessor command on it. +# rotated file to a new name that includes a timestamp with the opening time, +# and then runs the writer's default postprocessor command on it. function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool { # Move file to name including both opening and closing time. diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro index 1901759730..6292876bd0 100644 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ b/scripts/base/frameworks/logging/writers/elasticsearch.bro @@ -10,16 +10,16 @@ module LogElasticSearch; export { - ## Name of the ES cluster + ## Name of the ES cluster. const cluster_name = "elasticsearch" &redef; - ## ES Server + ## ES server. const server_host = "127.0.0.1" &redef; - ## ES Port + ## ES port. const server_port = 9200 &redef; - ## Name of the ES index + ## Name of the ES index. const index_prefix = "bro" &redef; ## The ES type prefix comes before the name of the related log. @@ -27,9 +27,9 @@ export { const type_prefix = "" &redef; ## The time before an ElasticSearch transfer will timeout. Note that - ## the fractional part of the timeout will be ignored. In particular, time - ## specifications less than a second result in a timeout value of 0, which - ## means "no timeout." + ## the fractional part of the timeout will be ignored. In particular, + ## time specifications less than a second result in a timeout value of + ## 0, which means "no timeout." const transfer_timeout = 2secs; ## The batch size is the number of messages that will be queued up before diff --git a/scripts/base/frameworks/logging/writers/none.bro b/scripts/base/frameworks/logging/writers/none.bro index 869d7246c7..5763b796a9 100644 --- a/scripts/base/frameworks/logging/writers/none.bro +++ b/scripts/base/frameworks/logging/writers/none.bro @@ -1,4 +1,4 @@ -##! Interface for the None log writer. Thiis writer is mainly for debugging. +##! Interface for the None log writer. This writer is mainly for debugging. module LogNone; diff --git a/scripts/base/frameworks/logging/writers/sqlite.bro b/scripts/base/frameworks/logging/writers/sqlite.bro index 0ad8946dcc..5df5e356c8 100644 --- a/scripts/base/frameworks/logging/writers/sqlite.bro +++ b/scripts/base/frameworks/logging/writers/sqlite.bro @@ -11,7 +11,7 @@ export { const unset_field = Log::unset_field &redef; ## String to use for empty fields. This should be different from - ## *unset_field* to make the output non-ambigious. + ## *unset_field* to make the output unambiguous. const empty_field = Log::empty_field &redef; }