mirror of
https://github.com/zeek/zeek.git
synced 2025-10-17 05:58:20 +00:00
Fix typos and formatting in the other policy docs
This commit is contained in:
parent
9374a7d584
commit
02d7e16997
13 changed files with 90 additions and 80 deletions
|
@ -15,8 +15,8 @@ export {
|
||||||
alert: AlertData &log;
|
alert: AlertData &log;
|
||||||
};
|
};
|
||||||
|
|
||||||
## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to a
|
## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to
|
||||||
## :bro:type:`conn_id` value in the case that you might need to index
|
## a :bro:type:`conn_id` value in the case that you might need to index
|
||||||
## into an existing data structure elsewhere within Bro.
|
## into an existing data structure elsewhere within Bro.
|
||||||
global pid2cid: function(p: PacketID): conn_id;
|
global pid2cid: function(p: PacketID): conn_id;
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,7 @@ export {
|
||||||
generator_id: count; ##< Which generator generated the alert?
|
generator_id: count; ##< Which generator generated the alert?
|
||||||
signature_revision: count; ##< Sig revision for this id.
|
signature_revision: count; ##< Sig revision for this id.
|
||||||
classification_id: count; ##< Event classification.
|
classification_id: count; ##< Event classification.
|
||||||
classification: string; ##< Descriptive classification string,
|
classification: string; ##< Descriptive classification string.
|
||||||
priority_id: count; ##< Event priority.
|
priority_id: count; ##< Event priority.
|
||||||
event_id: count; ##< Event ID.
|
event_id: count; ##< Event ID.
|
||||||
} &log;
|
} &log;
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
|
|
||||||
module Intel;
|
module Intel;
|
||||||
|
|
||||||
## These are some fields to add extended compatibility between Bro and the Collective
|
## These are some fields to add extended compatibility between Bro and the
|
||||||
## Intelligence Framework
|
## Collective Intelligence Framework.
|
||||||
redef record Intel::MetaData += {
|
redef record Intel::MetaData += {
|
||||||
## Maps to the Impact field in the Collective Intelligence Framework.
|
## Maps to the Impact field in the Collective Intelligence Framework.
|
||||||
cif_impact: string &optional;
|
cif_impact: string &optional;
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
##! the packet capture or it could even be beyond the host. If you are
|
##! the packet capture or it could even be beyond the host. If you are
|
||||||
##! capturing from a switch with a SPAN port, it's very possible that
|
##! capturing from a switch with a SPAN port, it's very possible that
|
||||||
##! the switch itself could be overloaded and dropping packets.
|
##! the switch itself could be overloaded and dropping packets.
|
||||||
##! Reported loss is computed in terms of number of "gap events" (ACKs
|
##! Reported loss is computed in terms of the number of "gap events" (ACKs
|
||||||
##! for a sequence number that's above a gap).
|
##! for a sequence number that's above a gap).
|
||||||
|
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
|
@ -26,7 +26,7 @@ export {
|
||||||
## The time delay between this measurement and the last.
|
## The time delay between this measurement and the last.
|
||||||
ts_delta: interval &log;
|
ts_delta: interval &log;
|
||||||
## In the event that there are multiple Bro instances logging
|
## In the event that there are multiple Bro instances logging
|
||||||
## to the same host, this distinguishes each peer with it's
|
## to the same host, this distinguishes each peer with its
|
||||||
## individual name.
|
## individual name.
|
||||||
peer: string &log;
|
peer: string &log;
|
||||||
## Number of missed ACKs from the previous measurement interval.
|
## Number of missed ACKs from the previous measurement interval.
|
||||||
|
@ -43,7 +43,7 @@ export {
|
||||||
## The percentage of missed data that is considered "too much"
|
## The percentage of missed data that is considered "too much"
|
||||||
## when the :bro:enum:`CaptureLoss::Too_Much_Loss` notice should be
|
## when the :bro:enum:`CaptureLoss::Too_Much_Loss` notice should be
|
||||||
## generated. The value is expressed as a double between 0 and 1 with 1
|
## generated. The value is expressed as a double between 0 and 1 with 1
|
||||||
## being 100%
|
## being 100%.
|
||||||
const too_much_loss: double = 0.1 &redef;
|
const too_much_loss: double = 0.1 &redef;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
##! This script detects a large number of ICMP Time Exceeded messages heading toward
|
##! This script detects a large number of ICMP Time Exceeded messages heading
|
||||||
##! hosts that have sent low TTL packets. It generates a notice when the number of
|
##! toward hosts that have sent low TTL packets. It generates a notice when the
|
||||||
##! ICMP Time Exceeded messages for a source-destination pair exceeds a
|
##! number of ICMP Time Exceeded messages for a source-destination pair exceeds
|
||||||
##! threshold.
|
##! a threshold.
|
||||||
|
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats
|
||||||
@load base/frameworks/signatures
|
@load base/frameworks/signatures
|
||||||
@load-sigs ./detect-low-ttls.sig
|
@load-sigs ./detect-low-ttls.sig
|
||||||
|
@ -20,15 +21,16 @@ export {
|
||||||
Detected
|
Detected
|
||||||
};
|
};
|
||||||
|
|
||||||
## By default this script requires that any host detected running traceroutes
|
## By default this script requires that any host detected running
|
||||||
## first send low TTL packets (TTL < 10) to the traceroute destination host.
|
## traceroutes first send low TTL packets (TTL < 10) to the traceroute
|
||||||
## Changing this this setting to `F` will relax the detection a bit by
|
## destination host. Changing this setting to F will relax the
|
||||||
## solely relying on ICMP time-exceeded messages to detect traceroute.
|
## detection a bit by solely relying on ICMP time-exceeded messages to
|
||||||
|
## detect traceroute.
|
||||||
const require_low_ttl_packets = T &redef;
|
const require_low_ttl_packets = T &redef;
|
||||||
|
|
||||||
## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair.
|
## Defines the threshold for ICMP Time Exceeded messages for a src-dst
|
||||||
## This threshold only comes into play after a host is found to be
|
## pair. This threshold only comes into play after a host is found to
|
||||||
## sending low ttl packets.
|
## be sending low TTL packets.
|
||||||
const icmp_time_exceeded_threshold: double = 3 &redef;
|
const icmp_time_exceeded_threshold: double = 3 &redef;
|
||||||
|
|
||||||
## Interval at which to watch for the
|
## Interval at which to watch for the
|
||||||
|
@ -40,7 +42,7 @@ export {
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Timestamp
|
## Timestamp
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
## Address initiaing the traceroute.
|
## Address initiating the traceroute.
|
||||||
src: addr &log;
|
src: addr &log;
|
||||||
## Destination address of the traceroute.
|
## Destination address of the traceroute.
|
||||||
dst: addr &log;
|
dst: addr &log;
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
##! This script provides infrastructure for logging devices for which Bro has been
|
##! This script provides infrastructure for logging devices for which Bro has
|
||||||
##! able to determine the MAC address, and it logs them once per day (by default).
|
##! been able to determine the MAC address, and it logs them once per day (by
|
||||||
##! The log that is output provides an easy way to determine a count of the devices
|
##! default). The log that is output provides an easy way to determine a count
|
||||||
##! in use on a network per day.
|
##! of the devices in use on a network per day.
|
||||||
##!
|
##!
|
||||||
##! .. note::
|
##! .. note::
|
||||||
##!
|
##!
|
||||||
|
@ -15,7 +15,8 @@ export {
|
||||||
## The known-hosts logging stream identifier.
|
## The known-hosts logging stream identifier.
|
||||||
redef enum Log::ID += { DEVICES_LOG };
|
redef enum Log::ID += { DEVICES_LOG };
|
||||||
|
|
||||||
## The record type which contains the column fields of the known-devices log.
|
## The record type which contains the column fields of the known-devices
|
||||||
|
## log.
|
||||||
type DevicesInfo: record {
|
type DevicesInfo: record {
|
||||||
## The timestamp at which the host was detected.
|
## The timestamp at which the host was detected.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
|
@ -24,10 +25,10 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
## The set of all known MAC addresses. It can accessed from other
|
## The set of all known MAC addresses. It can accessed from other
|
||||||
## to add, and check for, addresses seen in use.
|
## scripts to add, and check for, addresses seen in use.
|
||||||
##
|
##
|
||||||
## We maintain each entry for 24 hours by default so that the existence of
|
## We maintain each entry for 24 hours by default so that the existence
|
||||||
## individual addressed is logged each day.
|
## of individual addresses is logged each day.
|
||||||
global known_devices: set[string] &create_expire=1day &synchronized &redef;
|
global known_devices: set[string] &create_expire=1day &synchronized &redef;
|
||||||
|
|
||||||
## An event that can be handled to access the :bro:type:`Known::DevicesInfo`
|
## An event that can be handled to access the :bro:type:`Known::DevicesInfo`
|
||||||
|
|
|
@ -29,9 +29,10 @@ export {
|
||||||
#global confirm_filter_installation: event(success: bool);
|
#global confirm_filter_installation: event(success: bool);
|
||||||
|
|
||||||
redef record Cluster::Node += {
|
redef record Cluster::Node += {
|
||||||
## A BPF filter for load balancing traffic sniffed on a single interface
|
## A BPF filter for load balancing traffic sniffed on a single
|
||||||
## across a number of processes. In normal uses, this will be assigned
|
## interface across a number of processes. In normal uses, this
|
||||||
## dynamically by the manager and installed by the workers.
|
## will be assigned dynamically by the manager and installed by
|
||||||
|
## the workers.
|
||||||
lb_filter: string &optional;
|
lb_filter: string &optional;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,9 +7,9 @@ export {
|
||||||
redef enum Log::ID += { LOG };
|
redef enum Log::ID += { LOG };
|
||||||
|
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Name of the script loaded potentially with spaces included before
|
## Name of the script loaded potentially with spaces included
|
||||||
## the file name to indicate load depth. The convention is two spaces
|
## before the file name to indicate load depth. The convention
|
||||||
## per level of depth.
|
## is two spaces per level of depth.
|
||||||
name: string &log;
|
name: string &log;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -36,4 +36,4 @@ event bro_init() &priority=5
|
||||||
event bro_script_loaded(path: string, level: count)
|
event bro_script_loaded(path: string, level: count)
|
||||||
{
|
{
|
||||||
Log::write(LoadedScripts::LOG, [$name=cat(depth[level], compress_path(path))]);
|
Log::write(LoadedScripts::LOG, [$name=cat(depth[level], compress_path(path))]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,8 @@ redef profiling_file = open_log_file("prof");
|
||||||
## Set the cheap profiling interval.
|
## Set the cheap profiling interval.
|
||||||
redef profiling_interval = 15 secs;
|
redef profiling_interval = 15 secs;
|
||||||
|
|
||||||
## Set the expensive profiling interval.
|
## Set the expensive profiling interval (multiple of
|
||||||
|
## :bro:id:`profiling_interval`).
|
||||||
redef expensive_profiling_multiple = 20;
|
redef expensive_profiling_multiple = 20;
|
||||||
|
|
||||||
event bro_init()
|
event bro_init()
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
##! TCP Scan detection
|
##! TCP Scan detection.
|
||||||
##!
|
|
||||||
##! ..Authors: Sheharbano Khattak
|
# ..Authors: Sheharbano Khattak
|
||||||
##! Seth Hall
|
# Seth Hall
|
||||||
##! All the authors of the old scan.bro
|
# All the authors of the old scan.bro
|
||||||
|
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats
|
||||||
|
@ -13,37 +13,38 @@ module Scan;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Notice::Type += {
|
redef enum Notice::Type += {
|
||||||
## Address scans detect that a host appears to be scanning some number
|
## Address scans detect that a host appears to be scanning some
|
||||||
## of destinations on a single port. This notice is generated when more
|
## number of destinations on a single port. This notice is
|
||||||
## than :bro:id:`Scan::addr_scan_threshold` unique hosts are seen over
|
## generated when more than :bro:id:`Scan::addr_scan_threshold`
|
||||||
## the previous :bro:id:`Scan::addr_scan_interval` time range.
|
## unique hosts are seen over the previous
|
||||||
|
## :bro:id:`Scan::addr_scan_interval` time range.
|
||||||
Address_Scan,
|
Address_Scan,
|
||||||
|
|
||||||
## Port scans detect that an attacking host appears to be scanning a
|
## Port scans detect that an attacking host appears to be
|
||||||
## single victim host on several ports. This notice is generated when
|
## scanning a single victim host on several ports. This notice
|
||||||
## an attacking host attempts to connect to
|
## is generated when an attacking host attempts to connect to
|
||||||
## :bro:id:`Scan::port_scan_threshold`
|
## :bro:id:`Scan::port_scan_threshold`
|
||||||
## unique ports on a single host over the previous
|
## unique ports on a single host over the previous
|
||||||
## :bro:id:`Scan::port_scan_interval` time range.
|
## :bro:id:`Scan::port_scan_interval` time range.
|
||||||
Port_Scan,
|
Port_Scan,
|
||||||
};
|
};
|
||||||
|
|
||||||
## Failed connection attempts are tracked over this time interval for the address
|
## Failed connection attempts are tracked over this time interval for
|
||||||
## scan detection. A higher interval will detect slower scanners, but may also
|
## the address scan detection. A higher interval will detect slower
|
||||||
## yield more false positives.
|
## scanners, but may also yield more false positives.
|
||||||
const addr_scan_interval = 5min &redef;
|
const addr_scan_interval = 5min &redef;
|
||||||
|
|
||||||
## Failed connection attempts are tracked over this time interval for the port scan
|
## Failed connection attempts are tracked over this time interval for
|
||||||
## detection. A higher interval will detect slower scanners, but may also yield
|
## the port scan detection. A higher interval will detect slower
|
||||||
## more false positives.
|
## scanners, but may also yield more false positives.
|
||||||
const port_scan_interval = 5min &redef;
|
const port_scan_interval = 5min &redef;
|
||||||
|
|
||||||
## The threshold of a unique number of hosts a scanning host has to have failed
|
## The threshold of the unique number of hosts a scanning host has to
|
||||||
## connections with on a single port.
|
## have failed connections with on a single port.
|
||||||
const addr_scan_threshold = 25.0 &redef;
|
const addr_scan_threshold = 25.0 &redef;
|
||||||
|
|
||||||
## The threshold of a number of unique ports a scanning host has to have failed
|
## The threshold of the number of unique ports a scanning host has to
|
||||||
## connections with on a single victim host.
|
## have failed connections with on a single victim host.
|
||||||
const port_scan_threshold = 15.0 &redef;
|
const port_scan_threshold = 15.0 &redef;
|
||||||
|
|
||||||
global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
|
global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
|
||||||
|
@ -148,7 +149,7 @@ function is_reverse_failed_conn(c: connection): bool
|
||||||
|
|
||||||
## Generated for an unsuccessful connection attempt. This
|
## Generated for an unsuccessful connection attempt. This
|
||||||
## event is raised when an originator unsuccessfully attempted
|
## event is raised when an originator unsuccessfully attempted
|
||||||
## to establish a connection. “Unsuccessful” is defined as at least
|
## to establish a connection. "Unsuccessful" is defined as at least
|
||||||
## tcp_attempt_delay seconds having elapsed since the originator first sent a
|
## tcp_attempt_delay seconds having elapsed since the originator first sent a
|
||||||
## connection establishment packet to the destination without seeing a reply.
|
## connection establishment packet to the destination without seeing a reply.
|
||||||
event connection_attempt(c: connection)
|
event connection_attempt(c: connection)
|
||||||
|
@ -160,9 +161,9 @@ event connection_attempt(c: connection)
|
||||||
add_sumstats(c$id, is_reverse_scan);
|
add_sumstats(c$id, is_reverse_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated for a rejected TCP connection. This event is raised when an originator
|
## Generated for a rejected TCP connection. This event is raised when an
|
||||||
## attempted to setup a TCP connection but the responder replied with a RST packet
|
## originator attempted to setup a TCP connection but the responder replied with
|
||||||
## denying it.
|
## a RST packet denying it.
|
||||||
event connection_rejected(c: connection)
|
event connection_rejected(c: connection)
|
||||||
{
|
{
|
||||||
local is_reverse_scan = F;
|
local is_reverse_scan = F;
|
||||||
|
@ -173,7 +174,8 @@ event connection_rejected(c: connection)
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated when an endpoint aborted a TCP connection. The event is raised when
|
## Generated when an endpoint aborted a TCP connection. The event is raised when
|
||||||
## one endpoint of an *established* TCP connection aborted by sending a RST packet.
|
## one endpoint of an *established* TCP connection aborted by sending a RST
|
||||||
|
## packet.
|
||||||
event connection_reset(c: connection)
|
event connection_reset(c: connection)
|
||||||
{
|
{
|
||||||
if ( is_failed_conn(c) )
|
if ( is_failed_conn(c) )
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
##! Log memory/packet/lag statistics. Differs from profiling.bro in that this
|
##! Log memory/packet/lag statistics. Differs from
|
||||||
|
##! :doc:`/scripts/policy/misc/profiling` in that this
|
||||||
##! is lighter-weight (much less info, and less load to generate).
|
##! is lighter-weight (much less info, and less load to generate).
|
||||||
|
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
|
@ -20,21 +21,23 @@ export {
|
||||||
mem: count &log;
|
mem: count &log;
|
||||||
## Number of packets processed since the last stats interval.
|
## Number of packets processed since the last stats interval.
|
||||||
pkts_proc: count &log;
|
pkts_proc: count &log;
|
||||||
## Number of events that been processed since the last stats interval.
|
## Number of events processed since the last stats interval.
|
||||||
events_proc: count &log;
|
events_proc: count &log;
|
||||||
## Number of events that have been queued since the last stats interval.
|
## Number of events that have been queued since the last stats
|
||||||
|
## interval.
|
||||||
events_queued: count &log;
|
events_queued: count &log;
|
||||||
|
|
||||||
## Lag between the wall clock and packet timestamps if reading live traffic.
|
## Lag between the wall clock and packet timestamps if reading
|
||||||
|
## live traffic.
|
||||||
lag: interval &log &optional;
|
lag: interval &log &optional;
|
||||||
## Number of packets received since the last stats interval if reading
|
## Number of packets received since the last stats interval if
|
||||||
## live traffic.
|
## reading live traffic.
|
||||||
pkts_recv: count &log &optional;
|
pkts_recv: count &log &optional;
|
||||||
## Number of packets dropped since the last stats interval if reading
|
## Number of packets dropped since the last stats interval if
|
||||||
## live traffic.
|
## reading live traffic.
|
||||||
pkts_dropped: count &log &optional;
|
pkts_dropped: count &log &optional;
|
||||||
## Number of packets seen on the link since the last stats interval
|
## Number of packets seen on the link since the last stats
|
||||||
## if reading live traffic.
|
## interval if reading live traffic.
|
||||||
pkts_link: count &log &optional;
|
pkts_link: count &log &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
##! Deletes the -w tracefile at regular intervals and starts a new file
|
##! Deletes the ``-w`` tracefile at regular intervals and starts a new file
|
||||||
##! from scratch.
|
##! from scratch.
|
||||||
|
|
||||||
module TrimTraceFile;
|
module TrimTraceFile;
|
||||||
|
@ -8,9 +8,9 @@ export {
|
||||||
const trim_interval = 10 mins &redef;
|
const trim_interval = 10 mins &redef;
|
||||||
|
|
||||||
## This event can be generated externally to this script if on-demand
|
## This event can be generated externally to this script if on-demand
|
||||||
## tracefile rotation is required with the caveat that the script doesn't
|
## tracefile rotation is required with the caveat that the script
|
||||||
## currently attempt to get back on schedule automatically and the next
|
## doesn't currently attempt to get back on schedule automatically and
|
||||||
## trim will likely won't happen on the
|
## the next trim likely won't happen on the
|
||||||
## :bro:id:`TrimTraceFile::trim_interval`.
|
## :bro:id:`TrimTraceFile::trim_interval`.
|
||||||
global go: event(first_trim: bool);
|
global go: event(first_trim: bool);
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,8 +12,8 @@ export {
|
||||||
|
|
||||||
## If you want to explicitly only send certain :bro:type:`Log::ID`
|
## If you want to explicitly only send certain :bro:type:`Log::ID`
|
||||||
## streams, add them to this set. If the set remains empty, all will
|
## streams, add them to this set. If the set remains empty, all will
|
||||||
## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option will remain in
|
## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option
|
||||||
## effect as well.
|
## will remain in effect as well.
|
||||||
const send_logs: set[Log::ID] &redef;
|
const send_logs: set[Log::ID] &redef;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue