From 02d7e16997f95009d2f53ca568866905be585230 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 21 Oct 2013 02:37:00 -0500 Subject: [PATCH] Fix typos and formatting in the other policy docs --- scripts/policy/integration/barnyard2/main.bro | 4 +- .../policy/integration/barnyard2/types.bro | 2 +- .../integration/collective-intel/main.bro | 4 +- scripts/policy/misc/capture-loss.bro | 6 +- .../policy/misc/detect-traceroute/main.bro | 26 +++++---- scripts/policy/misc/known-devices.bro | 19 ++++--- scripts/policy/misc/load-balancing.bro | 7 ++- scripts/policy/misc/loaded-scripts.bro | 8 +-- scripts/policy/misc/profiling.bro | 3 +- scripts/policy/misc/scan.bro | 56 ++++++++++--------- scripts/policy/misc/stats.bro | 23 ++++---- scripts/policy/misc/trim-trace-file.bro | 8 +-- .../policy/tuning/logs-to-elasticsearch.bro | 4 +- 13 files changed, 90 insertions(+), 80 deletions(-) diff --git a/scripts/policy/integration/barnyard2/main.bro b/scripts/policy/integration/barnyard2/main.bro index 1d38d80809..42364e8d76 100644 --- a/scripts/policy/integration/barnyard2/main.bro +++ b/scripts/policy/integration/barnyard2/main.bro @@ -15,8 +15,8 @@ export { alert: AlertData &log; }; - ## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to a - ## :bro:type:`conn_id` value in the case that you might need to index + ## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to + ## a :bro:type:`conn_id` value in the case that you might need to index ## into an existing data structure elsewhere within Bro. global pid2cid: function(p: PacketID): conn_id; } diff --git a/scripts/policy/integration/barnyard2/types.bro b/scripts/policy/integration/barnyard2/types.bro index 6cfcbb9535..da7015b302 100644 --- a/scripts/policy/integration/barnyard2/types.bro +++ b/scripts/policy/integration/barnyard2/types.bro @@ -11,7 +11,7 @@ export { generator_id: count; ##< Which generator generated the alert? signature_revision: count; ##< Sig revision for this id. classification_id: count; ##< Event classification. - classification: string; ##< Descriptive classification string, + classification: string; ##< Descriptive classification string. priority_id: count; ##< Event priority. event_id: count; ##< Event ID. } &log; diff --git a/scripts/policy/integration/collective-intel/main.bro b/scripts/policy/integration/collective-intel/main.bro index a1ee7a4ab9..48459c378a 100644 --- a/scripts/policy/integration/collective-intel/main.bro +++ b/scripts/policy/integration/collective-intel/main.bro @@ -3,8 +3,8 @@ module Intel; -## These are some fields to add extended compatibility between Bro and the Collective -## Intelligence Framework +## These are some fields to add extended compatibility between Bro and the +## Collective Intelligence Framework. redef record Intel::MetaData += { ## Maps to the Impact field in the Collective Intelligence Framework. cif_impact: string &optional; diff --git a/scripts/policy/misc/capture-loss.bro b/scripts/policy/misc/capture-loss.bro index 1f0726299d..fd578ebf25 100644 --- a/scripts/policy/misc/capture-loss.bro +++ b/scripts/policy/misc/capture-loss.bro @@ -4,7 +4,7 @@ ##! the packet capture or it could even be beyond the host. If you are ##! capturing from a switch with a SPAN port, it's very possible that ##! the switch itself could be overloaded and dropping packets. -##! Reported loss is computed in terms of number of "gap events" (ACKs +##! Reported loss is computed in terms of the number of "gap events" (ACKs ##! for a sequence number that's above a gap). @load base/frameworks/notice @@ -26,7 +26,7 @@ export { ## The time delay between this measurement and the last. ts_delta: interval &log; ## In the event that there are multiple Bro instances logging - ## to the same host, this distinguishes each peer with it's + ## to the same host, this distinguishes each peer with its ## individual name. peer: string &log; ## Number of missed ACKs from the previous measurement interval. @@ -43,7 +43,7 @@ export { ## The percentage of missed data that is considered "too much" ## when the :bro:enum:`CaptureLoss::Too_Much_Loss` notice should be ## generated. The value is expressed as a double between 0 and 1 with 1 - ## being 100% + ## being 100%. const too_much_loss: double = 0.1 &redef; } diff --git a/scripts/policy/misc/detect-traceroute/main.bro b/scripts/policy/misc/detect-traceroute/main.bro index 6b472f2948..aa403e6a08 100644 --- a/scripts/policy/misc/detect-traceroute/main.bro +++ b/scripts/policy/misc/detect-traceroute/main.bro @@ -1,7 +1,8 @@ -##! This script detects a large number of ICMP Time Exceeded messages heading toward -##! hosts that have sent low TTL packets. It generates a notice when the number of -##! ICMP Time Exceeded messages for a source-destination pair exceeds a -##! threshold. +##! This script detects a large number of ICMP Time Exceeded messages heading +##! toward hosts that have sent low TTL packets. It generates a notice when the +##! number of ICMP Time Exceeded messages for a source-destination pair exceeds +##! a threshold. + @load base/frameworks/sumstats @load base/frameworks/signatures @load-sigs ./detect-low-ttls.sig @@ -20,15 +21,16 @@ export { Detected }; - ## By default this script requires that any host detected running traceroutes - ## first send low TTL packets (TTL < 10) to the traceroute destination host. - ## Changing this this setting to `F` will relax the detection a bit by - ## solely relying on ICMP time-exceeded messages to detect traceroute. + ## By default this script requires that any host detected running + ## traceroutes first send low TTL packets (TTL < 10) to the traceroute + ## destination host. Changing this setting to F will relax the + ## detection a bit by solely relying on ICMP time-exceeded messages to + ## detect traceroute. const require_low_ttl_packets = T &redef; - ## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair. - ## This threshold only comes into play after a host is found to be - ## sending low ttl packets. + ## Defines the threshold for ICMP Time Exceeded messages for a src-dst + ## pair. This threshold only comes into play after a host is found to + ## be sending low TTL packets. const icmp_time_exceeded_threshold: double = 3 &redef; ## Interval at which to watch for the @@ -40,7 +42,7 @@ export { type Info: record { ## Timestamp ts: time &log; - ## Address initiaing the traceroute. + ## Address initiating the traceroute. src: addr &log; ## Destination address of the traceroute. dst: addr &log; diff --git a/scripts/policy/misc/known-devices.bro b/scripts/policy/misc/known-devices.bro index a7c0b314b7..16c5250d1c 100644 --- a/scripts/policy/misc/known-devices.bro +++ b/scripts/policy/misc/known-devices.bro @@ -1,7 +1,7 @@ -##! This script provides infrastructure for logging devices for which Bro has been -##! able to determine the MAC address, and it logs them once per day (by default). -##! The log that is output provides an easy way to determine a count of the devices -##! in use on a network per day. +##! This script provides infrastructure for logging devices for which Bro has +##! been able to determine the MAC address, and it logs them once per day (by +##! default). The log that is output provides an easy way to determine a count +##! of the devices in use on a network per day. ##! ##! .. note:: ##! @@ -15,7 +15,8 @@ export { ## The known-hosts logging stream identifier. redef enum Log::ID += { DEVICES_LOG }; - ## The record type which contains the column fields of the known-devices log. + ## The record type which contains the column fields of the known-devices + ## log. type DevicesInfo: record { ## The timestamp at which the host was detected. ts: time &log; @@ -24,10 +25,10 @@ export { }; ## The set of all known MAC addresses. It can accessed from other - ## to add, and check for, addresses seen in use. - ## - ## We maintain each entry for 24 hours by default so that the existence of - ## individual addressed is logged each day. + ## scripts to add, and check for, addresses seen in use. + ## + ## We maintain each entry for 24 hours by default so that the existence + ## of individual addresses is logged each day. global known_devices: set[string] &create_expire=1day &synchronized &redef; ## An event that can be handled to access the :bro:type:`Known::DevicesInfo` diff --git a/scripts/policy/misc/load-balancing.bro b/scripts/policy/misc/load-balancing.bro index 889d18119a..c2adf23f09 100644 --- a/scripts/policy/misc/load-balancing.bro +++ b/scripts/policy/misc/load-balancing.bro @@ -29,9 +29,10 @@ export { #global confirm_filter_installation: event(success: bool); redef record Cluster::Node += { - ## A BPF filter for load balancing traffic sniffed on a single interface - ## across a number of processes. In normal uses, this will be assigned - ## dynamically by the manager and installed by the workers. + ## A BPF filter for load balancing traffic sniffed on a single + ## interface across a number of processes. In normal uses, this + ## will be assigned dynamically by the manager and installed by + ## the workers. lb_filter: string &optional; }; } diff --git a/scripts/policy/misc/loaded-scripts.bro b/scripts/policy/misc/loaded-scripts.bro index 516826aa7e..bd6943e928 100644 --- a/scripts/policy/misc/loaded-scripts.bro +++ b/scripts/policy/misc/loaded-scripts.bro @@ -7,9 +7,9 @@ export { redef enum Log::ID += { LOG }; type Info: record { - ## Name of the script loaded potentially with spaces included before - ## the file name to indicate load depth. The convention is two spaces - ## per level of depth. + ## Name of the script loaded potentially with spaces included + ## before the file name to indicate load depth. The convention + ## is two spaces per level of depth. name: string &log; }; } @@ -36,4 +36,4 @@ event bro_init() &priority=5 event bro_script_loaded(path: string, level: count) { Log::write(LoadedScripts::LOG, [$name=cat(depth[level], compress_path(path))]); - } \ No newline at end of file + } diff --git a/scripts/policy/misc/profiling.bro b/scripts/policy/misc/profiling.bro index 31451f1a55..613e78f860 100644 --- a/scripts/policy/misc/profiling.bro +++ b/scripts/policy/misc/profiling.bro @@ -8,7 +8,8 @@ redef profiling_file = open_log_file("prof"); ## Set the cheap profiling interval. redef profiling_interval = 15 secs; -## Set the expensive profiling interval. +## Set the expensive profiling interval (multiple of +## :bro:id:`profiling_interval`). redef expensive_profiling_multiple = 20; event bro_init() diff --git a/scripts/policy/misc/scan.bro b/scripts/policy/misc/scan.bro index b1b63b74da..e458f6c450 100644 --- a/scripts/policy/misc/scan.bro +++ b/scripts/policy/misc/scan.bro @@ -1,8 +1,8 @@ -##! TCP Scan detection -##! -##! ..Authors: Sheharbano Khattak -##! Seth Hall -##! All the authors of the old scan.bro +##! TCP Scan detection. + +# ..Authors: Sheharbano Khattak +# Seth Hall +# All the authors of the old scan.bro @load base/frameworks/notice @load base/frameworks/sumstats @@ -13,37 +13,38 @@ module Scan; export { redef enum Notice::Type += { - ## Address scans detect that a host appears to be scanning some number - ## of destinations on a single port. This notice is generated when more - ## than :bro:id:`Scan::addr_scan_threshold` unique hosts are seen over - ## the previous :bro:id:`Scan::addr_scan_interval` time range. + ## Address scans detect that a host appears to be scanning some + ## number of destinations on a single port. This notice is + ## generated when more than :bro:id:`Scan::addr_scan_threshold` + ## unique hosts are seen over the previous + ## :bro:id:`Scan::addr_scan_interval` time range. Address_Scan, - ## Port scans detect that an attacking host appears to be scanning a - ## single victim host on several ports. This notice is generated when - ## an attacking host attempts to connect to + ## Port scans detect that an attacking host appears to be + ## scanning a single victim host on several ports. This notice + ## is generated when an attacking host attempts to connect to ## :bro:id:`Scan::port_scan_threshold` ## unique ports on a single host over the previous ## :bro:id:`Scan::port_scan_interval` time range. Port_Scan, }; - ## Failed connection attempts are tracked over this time interval for the address - ## scan detection. A higher interval will detect slower scanners, but may also - ## yield more false positives. + ## Failed connection attempts are tracked over this time interval for + ## the address scan detection. A higher interval will detect slower + ## scanners, but may also yield more false positives. const addr_scan_interval = 5min &redef; - ## Failed connection attempts are tracked over this time interval for the port scan - ## detection. A higher interval will detect slower scanners, but may also yield - ## more false positives. + ## Failed connection attempts are tracked over this time interval for + ## the port scan detection. A higher interval will detect slower + ## scanners, but may also yield more false positives. const port_scan_interval = 5min &redef; - ## The threshold of a unique number of hosts a scanning host has to have failed - ## connections with on a single port. + ## The threshold of the unique number of hosts a scanning host has to + ## have failed connections with on a single port. const addr_scan_threshold = 25.0 &redef; - ## The threshold of a number of unique ports a scanning host has to have failed - ## connections with on a single victim host. + ## The threshold of the number of unique ports a scanning host has to + ## have failed connections with on a single victim host. const port_scan_threshold = 15.0 &redef; global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); @@ -148,7 +149,7 @@ function is_reverse_failed_conn(c: connection): bool ## Generated for an unsuccessful connection attempt. This ## event is raised when an originator unsuccessfully attempted -## to establish a connection. “Unsuccessful” is defined as at least +## to establish a connection. "Unsuccessful" is defined as at least ## tcp_attempt_delay seconds having elapsed since the originator first sent a ## connection establishment packet to the destination without seeing a reply. event connection_attempt(c: connection) @@ -160,9 +161,9 @@ event connection_attempt(c: connection) add_sumstats(c$id, is_reverse_scan); } -## Generated for a rejected TCP connection. This event is raised when an originator -## attempted to setup a TCP connection but the responder replied with a RST packet -## denying it. +## Generated for a rejected TCP connection. This event is raised when an +## originator attempted to setup a TCP connection but the responder replied with +## a RST packet denying it. event connection_rejected(c: connection) { local is_reverse_scan = F; @@ -173,7 +174,8 @@ event connection_rejected(c: connection) } ## Generated when an endpoint aborted a TCP connection. The event is raised when -## one endpoint of an *established* TCP connection aborted by sending a RST packet. +## one endpoint of an *established* TCP connection aborted by sending a RST +## packet. event connection_reset(c: connection) { if ( is_failed_conn(c) ) diff --git a/scripts/policy/misc/stats.bro b/scripts/policy/misc/stats.bro index d7866fd136..7e1e4b6689 100644 --- a/scripts/policy/misc/stats.bro +++ b/scripts/policy/misc/stats.bro @@ -1,4 +1,5 @@ -##! Log memory/packet/lag statistics. Differs from profiling.bro in that this +##! Log memory/packet/lag statistics. Differs from +##! :doc:`/scripts/policy/misc/profiling` in that this ##! is lighter-weight (much less info, and less load to generate). @load base/frameworks/notice @@ -20,21 +21,23 @@ export { mem: count &log; ## Number of packets processed since the last stats interval. pkts_proc: count &log; - ## Number of events that been processed since the last stats interval. + ## Number of events processed since the last stats interval. events_proc: count &log; - ## Number of events that have been queued since the last stats interval. + ## Number of events that have been queued since the last stats + ## interval. events_queued: count &log; - ## Lag between the wall clock and packet timestamps if reading live traffic. + ## Lag between the wall clock and packet timestamps if reading + ## live traffic. lag: interval &log &optional; - ## Number of packets received since the last stats interval if reading - ## live traffic. + ## Number of packets received since the last stats interval if + ## reading live traffic. pkts_recv: count &log &optional; - ## Number of packets dropped since the last stats interval if reading - ## live traffic. + ## Number of packets dropped since the last stats interval if + ## reading live traffic. pkts_dropped: count &log &optional; - ## Number of packets seen on the link since the last stats interval - ## if reading live traffic. + ## Number of packets seen on the link since the last stats + ## interval if reading live traffic. pkts_link: count &log &optional; }; diff --git a/scripts/policy/misc/trim-trace-file.bro b/scripts/policy/misc/trim-trace-file.bro index 8a7781b628..8f534ec005 100644 --- a/scripts/policy/misc/trim-trace-file.bro +++ b/scripts/policy/misc/trim-trace-file.bro @@ -1,4 +1,4 @@ -##! Deletes the -w tracefile at regular intervals and starts a new file +##! Deletes the ``-w`` tracefile at regular intervals and starts a new file ##! from scratch. module TrimTraceFile; @@ -8,9 +8,9 @@ export { const trim_interval = 10 mins &redef; ## This event can be generated externally to this script if on-demand - ## tracefile rotation is required with the caveat that the script doesn't - ## currently attempt to get back on schedule automatically and the next - ## trim will likely won't happen on the + ## tracefile rotation is required with the caveat that the script + ## doesn't currently attempt to get back on schedule automatically and + ## the next trim likely won't happen on the ## :bro:id:`TrimTraceFile::trim_interval`. global go: event(first_trim: bool); } diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro index 2a4b70362a..b770b8f84b 100644 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ b/scripts/policy/tuning/logs-to-elasticsearch.bro @@ -12,8 +12,8 @@ export { ## If you want to explicitly only send certain :bro:type:`Log::ID` ## streams, add them to this set. If the set remains empty, all will - ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option will remain in - ## effect as well. + ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option + ## will remain in effect as well. const send_logs: set[Log::ID] &redef; }