mirror of
https://github.com/zeek/zeek.git
synced 2025-10-17 05:58:20 +00:00
Fix typos and formatting in the other policy docs
This commit is contained in:
parent
9374a7d584
commit
02d7e16997
13 changed files with 90 additions and 80 deletions
|
@ -15,8 +15,8 @@ export {
|
|||
alert: AlertData &log;
|
||||
};
|
||||
|
||||
## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to a
|
||||
## :bro:type:`conn_id` value in the case that you might need to index
|
||||
## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to
|
||||
## a :bro:type:`conn_id` value in the case that you might need to index
|
||||
## into an existing data structure elsewhere within Bro.
|
||||
global pid2cid: function(p: PacketID): conn_id;
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ export {
|
|||
generator_id: count; ##< Which generator generated the alert?
|
||||
signature_revision: count; ##< Sig revision for this id.
|
||||
classification_id: count; ##< Event classification.
|
||||
classification: string; ##< Descriptive classification string,
|
||||
classification: string; ##< Descriptive classification string.
|
||||
priority_id: count; ##< Event priority.
|
||||
event_id: count; ##< Event ID.
|
||||
} &log;
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
module Intel;
|
||||
|
||||
## These are some fields to add extended compatibility between Bro and the Collective
|
||||
## Intelligence Framework
|
||||
## These are some fields to add extended compatibility between Bro and the
|
||||
## Collective Intelligence Framework.
|
||||
redef record Intel::MetaData += {
|
||||
## Maps to the Impact field in the Collective Intelligence Framework.
|
||||
cif_impact: string &optional;
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
##! the packet capture or it could even be beyond the host. If you are
|
||||
##! capturing from a switch with a SPAN port, it's very possible that
|
||||
##! the switch itself could be overloaded and dropping packets.
|
||||
##! Reported loss is computed in terms of number of "gap events" (ACKs
|
||||
##! Reported loss is computed in terms of the number of "gap events" (ACKs
|
||||
##! for a sequence number that's above a gap).
|
||||
|
||||
@load base/frameworks/notice
|
||||
|
@ -26,7 +26,7 @@ export {
|
|||
## The time delay between this measurement and the last.
|
||||
ts_delta: interval &log;
|
||||
## In the event that there are multiple Bro instances logging
|
||||
## to the same host, this distinguishes each peer with it's
|
||||
## to the same host, this distinguishes each peer with its
|
||||
## individual name.
|
||||
peer: string &log;
|
||||
## Number of missed ACKs from the previous measurement interval.
|
||||
|
@ -43,7 +43,7 @@ export {
|
|||
## The percentage of missed data that is considered "too much"
|
||||
## when the :bro:enum:`CaptureLoss::Too_Much_Loss` notice should be
|
||||
## generated. The value is expressed as a double between 0 and 1 with 1
|
||||
## being 100%
|
||||
## being 100%.
|
||||
const too_much_loss: double = 0.1 &redef;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
##! This script detects a large number of ICMP Time Exceeded messages heading toward
|
||||
##! hosts that have sent low TTL packets. It generates a notice when the number of
|
||||
##! ICMP Time Exceeded messages for a source-destination pair exceeds a
|
||||
##! threshold.
|
||||
##! This script detects a large number of ICMP Time Exceeded messages heading
|
||||
##! toward hosts that have sent low TTL packets. It generates a notice when the
|
||||
##! number of ICMP Time Exceeded messages for a source-destination pair exceeds
|
||||
##! a threshold.
|
||||
|
||||
@load base/frameworks/sumstats
|
||||
@load base/frameworks/signatures
|
||||
@load-sigs ./detect-low-ttls.sig
|
||||
|
@ -20,15 +21,16 @@ export {
|
|||
Detected
|
||||
};
|
||||
|
||||
## By default this script requires that any host detected running traceroutes
|
||||
## first send low TTL packets (TTL < 10) to the traceroute destination host.
|
||||
## Changing this this setting to `F` will relax the detection a bit by
|
||||
## solely relying on ICMP time-exceeded messages to detect traceroute.
|
||||
## By default this script requires that any host detected running
|
||||
## traceroutes first send low TTL packets (TTL < 10) to the traceroute
|
||||
## destination host. Changing this setting to F will relax the
|
||||
## detection a bit by solely relying on ICMP time-exceeded messages to
|
||||
## detect traceroute.
|
||||
const require_low_ttl_packets = T &redef;
|
||||
|
||||
## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair.
|
||||
## This threshold only comes into play after a host is found to be
|
||||
## sending low ttl packets.
|
||||
## Defines the threshold for ICMP Time Exceeded messages for a src-dst
|
||||
## pair. This threshold only comes into play after a host is found to
|
||||
## be sending low TTL packets.
|
||||
const icmp_time_exceeded_threshold: double = 3 &redef;
|
||||
|
||||
## Interval at which to watch for the
|
||||
|
@ -40,7 +42,7 @@ export {
|
|||
type Info: record {
|
||||
## Timestamp
|
||||
ts: time &log;
|
||||
## Address initiaing the traceroute.
|
||||
## Address initiating the traceroute.
|
||||
src: addr &log;
|
||||
## Destination address of the traceroute.
|
||||
dst: addr &log;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
##! This script provides infrastructure for logging devices for which Bro has been
|
||||
##! able to determine the MAC address, and it logs them once per day (by default).
|
||||
##! The log that is output provides an easy way to determine a count of the devices
|
||||
##! in use on a network per day.
|
||||
##! This script provides infrastructure for logging devices for which Bro has
|
||||
##! been able to determine the MAC address, and it logs them once per day (by
|
||||
##! default). The log that is output provides an easy way to determine a count
|
||||
##! of the devices in use on a network per day.
|
||||
##!
|
||||
##! .. note::
|
||||
##!
|
||||
|
@ -15,7 +15,8 @@ export {
|
|||
## The known-hosts logging stream identifier.
|
||||
redef enum Log::ID += { DEVICES_LOG };
|
||||
|
||||
## The record type which contains the column fields of the known-devices log.
|
||||
## The record type which contains the column fields of the known-devices
|
||||
## log.
|
||||
type DevicesInfo: record {
|
||||
## The timestamp at which the host was detected.
|
||||
ts: time &log;
|
||||
|
@ -24,10 +25,10 @@ export {
|
|||
};
|
||||
|
||||
## The set of all known MAC addresses. It can accessed from other
|
||||
## to add, and check for, addresses seen in use.
|
||||
##
|
||||
## We maintain each entry for 24 hours by default so that the existence of
|
||||
## individual addressed is logged each day.
|
||||
## scripts to add, and check for, addresses seen in use.
|
||||
##
|
||||
## We maintain each entry for 24 hours by default so that the existence
|
||||
## of individual addresses is logged each day.
|
||||
global known_devices: set[string] &create_expire=1day &synchronized &redef;
|
||||
|
||||
## An event that can be handled to access the :bro:type:`Known::DevicesInfo`
|
||||
|
|
|
@ -29,9 +29,10 @@ export {
|
|||
#global confirm_filter_installation: event(success: bool);
|
||||
|
||||
redef record Cluster::Node += {
|
||||
## A BPF filter for load balancing traffic sniffed on a single interface
|
||||
## across a number of processes. In normal uses, this will be assigned
|
||||
## dynamically by the manager and installed by the workers.
|
||||
## A BPF filter for load balancing traffic sniffed on a single
|
||||
## interface across a number of processes. In normal uses, this
|
||||
## will be assigned dynamically by the manager and installed by
|
||||
## the workers.
|
||||
lb_filter: string &optional;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -7,9 +7,9 @@ export {
|
|||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Name of the script loaded potentially with spaces included before
|
||||
## the file name to indicate load depth. The convention is two spaces
|
||||
## per level of depth.
|
||||
## Name of the script loaded potentially with spaces included
|
||||
## before the file name to indicate load depth. The convention
|
||||
## is two spaces per level of depth.
|
||||
name: string &log;
|
||||
};
|
||||
}
|
||||
|
@ -36,4 +36,4 @@ event bro_init() &priority=5
|
|||
event bro_script_loaded(path: string, level: count)
|
||||
{
|
||||
Log::write(LoadedScripts::LOG, [$name=cat(depth[level], compress_path(path))]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,8 @@ redef profiling_file = open_log_file("prof");
|
|||
## Set the cheap profiling interval.
|
||||
redef profiling_interval = 15 secs;
|
||||
|
||||
## Set the expensive profiling interval.
|
||||
## Set the expensive profiling interval (multiple of
|
||||
## :bro:id:`profiling_interval`).
|
||||
redef expensive_profiling_multiple = 20;
|
||||
|
||||
event bro_init()
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
##! TCP Scan detection
|
||||
##!
|
||||
##! ..Authors: Sheharbano Khattak
|
||||
##! Seth Hall
|
||||
##! All the authors of the old scan.bro
|
||||
##! TCP Scan detection.
|
||||
|
||||
# ..Authors: Sheharbano Khattak
|
||||
# Seth Hall
|
||||
# All the authors of the old scan.bro
|
||||
|
||||
@load base/frameworks/notice
|
||||
@load base/frameworks/sumstats
|
||||
|
@ -13,37 +13,38 @@ module Scan;
|
|||
|
||||
export {
|
||||
redef enum Notice::Type += {
|
||||
## Address scans detect that a host appears to be scanning some number
|
||||
## of destinations on a single port. This notice is generated when more
|
||||
## than :bro:id:`Scan::addr_scan_threshold` unique hosts are seen over
|
||||
## the previous :bro:id:`Scan::addr_scan_interval` time range.
|
||||
## Address scans detect that a host appears to be scanning some
|
||||
## number of destinations on a single port. This notice is
|
||||
## generated when more than :bro:id:`Scan::addr_scan_threshold`
|
||||
## unique hosts are seen over the previous
|
||||
## :bro:id:`Scan::addr_scan_interval` time range.
|
||||
Address_Scan,
|
||||
|
||||
## Port scans detect that an attacking host appears to be scanning a
|
||||
## single victim host on several ports. This notice is generated when
|
||||
## an attacking host attempts to connect to
|
||||
## Port scans detect that an attacking host appears to be
|
||||
## scanning a single victim host on several ports. This notice
|
||||
## is generated when an attacking host attempts to connect to
|
||||
## :bro:id:`Scan::port_scan_threshold`
|
||||
## unique ports on a single host over the previous
|
||||
## :bro:id:`Scan::port_scan_interval` time range.
|
||||
Port_Scan,
|
||||
};
|
||||
|
||||
## Failed connection attempts are tracked over this time interval for the address
|
||||
## scan detection. A higher interval will detect slower scanners, but may also
|
||||
## yield more false positives.
|
||||
## Failed connection attempts are tracked over this time interval for
|
||||
## the address scan detection. A higher interval will detect slower
|
||||
## scanners, but may also yield more false positives.
|
||||
const addr_scan_interval = 5min &redef;
|
||||
|
||||
## Failed connection attempts are tracked over this time interval for the port scan
|
||||
## detection. A higher interval will detect slower scanners, but may also yield
|
||||
## more false positives.
|
||||
## Failed connection attempts are tracked over this time interval for
|
||||
## the port scan detection. A higher interval will detect slower
|
||||
## scanners, but may also yield more false positives.
|
||||
const port_scan_interval = 5min &redef;
|
||||
|
||||
## The threshold of a unique number of hosts a scanning host has to have failed
|
||||
## connections with on a single port.
|
||||
## The threshold of the unique number of hosts a scanning host has to
|
||||
## have failed connections with on a single port.
|
||||
const addr_scan_threshold = 25.0 &redef;
|
||||
|
||||
## The threshold of a number of unique ports a scanning host has to have failed
|
||||
## connections with on a single victim host.
|
||||
## The threshold of the number of unique ports a scanning host has to
|
||||
## have failed connections with on a single victim host.
|
||||
const port_scan_threshold = 15.0 &redef;
|
||||
|
||||
global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
|
||||
|
@ -148,7 +149,7 @@ function is_reverse_failed_conn(c: connection): bool
|
|||
|
||||
## Generated for an unsuccessful connection attempt. This
|
||||
## event is raised when an originator unsuccessfully attempted
|
||||
## to establish a connection. “Unsuccessful” is defined as at least
|
||||
## to establish a connection. "Unsuccessful" is defined as at least
|
||||
## tcp_attempt_delay seconds having elapsed since the originator first sent a
|
||||
## connection establishment packet to the destination without seeing a reply.
|
||||
event connection_attempt(c: connection)
|
||||
|
@ -160,9 +161,9 @@ event connection_attempt(c: connection)
|
|||
add_sumstats(c$id, is_reverse_scan);
|
||||
}
|
||||
|
||||
## Generated for a rejected TCP connection. This event is raised when an originator
|
||||
## attempted to setup a TCP connection but the responder replied with a RST packet
|
||||
## denying it.
|
||||
## Generated for a rejected TCP connection. This event is raised when an
|
||||
## originator attempted to setup a TCP connection but the responder replied with
|
||||
## a RST packet denying it.
|
||||
event connection_rejected(c: connection)
|
||||
{
|
||||
local is_reverse_scan = F;
|
||||
|
@ -173,7 +174,8 @@ event connection_rejected(c: connection)
|
|||
}
|
||||
|
||||
## Generated when an endpoint aborted a TCP connection. The event is raised when
|
||||
## one endpoint of an *established* TCP connection aborted by sending a RST packet.
|
||||
## one endpoint of an *established* TCP connection aborted by sending a RST
|
||||
## packet.
|
||||
event connection_reset(c: connection)
|
||||
{
|
||||
if ( is_failed_conn(c) )
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
##! Log memory/packet/lag statistics. Differs from profiling.bro in that this
|
||||
##! Log memory/packet/lag statistics. Differs from
|
||||
##! :doc:`/scripts/policy/misc/profiling` in that this
|
||||
##! is lighter-weight (much less info, and less load to generate).
|
||||
|
||||
@load base/frameworks/notice
|
||||
|
@ -20,21 +21,23 @@ export {
|
|||
mem: count &log;
|
||||
## Number of packets processed since the last stats interval.
|
||||
pkts_proc: count &log;
|
||||
## Number of events that been processed since the last stats interval.
|
||||
## Number of events processed since the last stats interval.
|
||||
events_proc: count &log;
|
||||
## Number of events that have been queued since the last stats interval.
|
||||
## Number of events that have been queued since the last stats
|
||||
## interval.
|
||||
events_queued: count &log;
|
||||
|
||||
## Lag between the wall clock and packet timestamps if reading live traffic.
|
||||
## Lag between the wall clock and packet timestamps if reading
|
||||
## live traffic.
|
||||
lag: interval &log &optional;
|
||||
## Number of packets received since the last stats interval if reading
|
||||
## live traffic.
|
||||
## Number of packets received since the last stats interval if
|
||||
## reading live traffic.
|
||||
pkts_recv: count &log &optional;
|
||||
## Number of packets dropped since the last stats interval if reading
|
||||
## live traffic.
|
||||
## Number of packets dropped since the last stats interval if
|
||||
## reading live traffic.
|
||||
pkts_dropped: count &log &optional;
|
||||
## Number of packets seen on the link since the last stats interval
|
||||
## if reading live traffic.
|
||||
## Number of packets seen on the link since the last stats
|
||||
## interval if reading live traffic.
|
||||
pkts_link: count &log &optional;
|
||||
};
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
##! Deletes the -w tracefile at regular intervals and starts a new file
|
||||
##! Deletes the ``-w`` tracefile at regular intervals and starts a new file
|
||||
##! from scratch.
|
||||
|
||||
module TrimTraceFile;
|
||||
|
@ -8,9 +8,9 @@ export {
|
|||
const trim_interval = 10 mins &redef;
|
||||
|
||||
## This event can be generated externally to this script if on-demand
|
||||
## tracefile rotation is required with the caveat that the script doesn't
|
||||
## currently attempt to get back on schedule automatically and the next
|
||||
## trim will likely won't happen on the
|
||||
## tracefile rotation is required with the caveat that the script
|
||||
## doesn't currently attempt to get back on schedule automatically and
|
||||
## the next trim likely won't happen on the
|
||||
## :bro:id:`TrimTraceFile::trim_interval`.
|
||||
global go: event(first_trim: bool);
|
||||
}
|
||||
|
|
|
@ -12,8 +12,8 @@ export {
|
|||
|
||||
## If you want to explicitly only send certain :bro:type:`Log::ID`
|
||||
## streams, add them to this set. If the set remains empty, all will
|
||||
## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option will remain in
|
||||
## effect as well.
|
||||
## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option
|
||||
## will remain in effect as well.
|
||||
const send_logs: set[Log::ID] &redef;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue