diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index f1468cea6b..6e71f2ba88 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -29,7 +29,7 @@ type icmp_conn: record { }; type icmp_hdr: record { - icmp_type: count; # type of message + icmp_type: count; ##< type of message }; type icmp_context: record { @@ -57,16 +57,19 @@ type dns_mapping: record { type ftp_port: record { h: addr; p: port; - valid: bool; # true if format was right + valid: bool; ##< true if format was right }; type endpoint: record { - size: count; # logical size (for TCP: from seq numbers) + size: count; ##< logical size (for TCP: from seq numbers) state: count; - # The following are set if use_conn_size_analyzer is T. - num_pkts: count &optional; # number of packets on the wire - num_bytes_ip: count &optional; # actual number of IP-level bytes on the wire + ## Number of packets on the wire + ## Set if :bro:id:`use_conn_size_analyzer` is true. + num_pkts: count &optional; + ## Number of IP-level bytes on the wire + ## Set if :bro:id:`use_conn_size_analyzer` is true. + num_bytes_ip: count &optional; }; type endpoint_stats: record { @@ -87,9 +90,9 @@ type connection: record { resp: endpoint; start_time: time; duration: interval; - service: string_set; # if empty, service hasn't been determined + service: string_set; ##< if empty, service hasn't been determined addl: string; - hot: count; # how hot; 0 = don't know or not hot + hot: count; ##< how hot; 0 = don't know or not hot history: string; uid: string; }; @@ -114,30 +117,30 @@ type NetStats: record { }; type bro_resources: record { - version: string; # Bro version string - debug: bool; # true if compiled with --enable-debug - start_time: time; # start time of process - real_time: interval; # elapsed real time since Bro started running - user_time: interval; # user CPU seconds - system_time: interval; # system CPU seconds - mem: count; # maximum memory consumed, in KB - minor_faults: count; # page faults not requiring actual I/O - major_faults: count; # page faults requiring actual I/O - num_swap: count; # times swapped out - blocking_input: count; # blocking input operations - blocking_output: count; # blocking output operations - num_context: count; # number of involuntary context switches + version: string; ##< Bro version string + debug: bool; ##< true if compiled with --enable-debug + start_time: time; ##< start time of process + real_time: interval; ##< elapsed real time since Bro started running + user_time: interval; ##< user CPU seconds + system_time: interval; ##< system CPU seconds + mem: count; ##< maximum memory consumed, in KB + minor_faults: count; ##< page faults not requiring actual I/O + major_faults: count; ##< page faults requiring actual I/O + num_swap: count; ##< times swapped out + blocking_input: count; ##< blocking input operations + blocking_output: count; ##< blocking output operations + num_context: count; ##< number of involuntary context switches - num_TCP_conns: count; # current number of TCP connections + num_TCP_conns: count; ##< current number of TCP connections num_UDP_conns: count; num_ICMP_conns: count; - num_fragments: count; # current number of fragments pending reassembly - num_packets: count; # total number packets processed to date - num_timers: count; # current number of pending timers - num_events_queued: count; # total number of events queued so far - num_events_dispatched: count; # same for events dispatched + num_fragments: count; ##< current number of fragments pending reassembly + num_packets: count; ##< total number packets processed to date + num_timers: count; ##< current number of pending timers + num_events_queued: count; ##< total number of events queued so far + num_events_dispatched: count; ##< same for events dispatched - max_TCP_conns: count; # maximum number of TCP connections, etc. + max_TCP_conns: count; ##< maximum number of TCP connections, etc. max_UDP_conns: count; max_ICMP_conns: count; max_fragments: count; @@ -145,34 +148,34 @@ type bro_resources: record { }; -# Summary statistics of all DFA_State_Caches. +## Summary statistics of all DFA_State_Caches. type matcher_stats: record { - matchers: count; # number of distinct RE matchers - dfa_states: count; # number of DFA states across all matchers - computed: count; # number of computed DFA state transitions - mem: count; # number of bytes used by DFA states - hits: count; # number of cache hits - misses: count; # number of cache misses - avg_nfa_states: count; # average # NFA states across all matchers + matchers: count; ##< number of distinct RE matchers + dfa_states: count; ##< number of DFA states across all matchers + computed: count; ##< number of computed DFA state transitions + mem: count; ##< number of bytes used by DFA states + hits: count; ##< number of cache hits + misses: count; ##< number of cache misses + avg_nfa_states: count; ##< average # NFA states across all matchers }; -# Info provided to gap_report, and also available by get_gap_summary(). +## Info provided to gap_report, and also available by get_gap_summary(). type gap_info: record { - ack_events: count; # how many ack events *could* have had gaps - ack_bytes: count; # how many bytes those covered - gap_events: count; # how many *did* have gaps - gap_bytes: count; # how many bytes were missing in the gaps: + ack_events: count; ##< how many ack events *could* have had gaps + ack_bytes: count; ##< how many bytes those covered + gap_events: count; ##< how many *did* have gaps + gap_bytes: count; ##< how many bytes were missing in the gaps: }; # This record should be read-only. type packet: record { conn: connection; is_orig: bool; - seq: count; # seq=k => it is the kth *packet* of the connection + seq: count; ##< seq=k => it is the kth *packet* of the connection timestamp: time; }; -type var_sizes: table[string] of count; # indexed by var's name, returns size +type var_sizes: table[string] of count; ##< indexed by var's name, returns size type script_id: record { type_name: string; @@ -213,30 +216,30 @@ type IPAddrAnonymization: enum { }; type IPAddrAnonymizationClass: enum { - ORIG_ADDR, # client address - RESP_ADDR, # server address + ORIG_ADDR, ##< client address + RESP_ADDR, ##< server address OTHER_ADDR, }; -# Events are generated by event_peer's (which may be either ourselves, or -# some remote process). +## Events are generated by event_peer's (which may be either ourselves, or +## some remote process). type peer_id: count; type event_peer: record { - id: peer_id; # locally unique ID of peer (returned by connect()) + id: peer_id; ##< locally unique ID of peer (returned by connect()) host: addr; p: port; - is_local: bool; # true if this peer describes the current process. - descr: string; # source's external_source_description + is_local: bool; ##< true if this peer describes the current process. + descr: string; ##< source's external_source_description class: string &optional; # self-assigned class of the peer }; type rotate_info: record { - old_name: string; # original filename - new_name: string; # file name after rotation - open: time; # time when opened - close: time; # time when closed + old_name: string; ##< original filename + new_name: string; ##< file name after rotation + open: time; ##< time when opened + close: time; ##< time when closed }; @@ -249,29 +252,29 @@ type rotate_info: record { # }; type sw_params: record { - # Minimum size of a substring, minimum "granularity". + ## Minimum size of a substring, minimum "granularity". min_strlen: count &default = 3; - # Smith-Waterman flavor to use. + ## Smith-Waterman flavor to use. sw_variant: count &default = 0; }; type sw_align: record { - str: string; # string a substring is part of - index: count; # at which offset + str: string; ##< string a substring is part of + index: count; ##< at which offset }; type sw_align_vec: vector of sw_align; type sw_substring: record { - str: string; # a substring - aligns: sw_align_vec; # all strings of which it's a substring - new: bool; # true if start of new alignment + str: string; ##< a substring + aligns: sw_align_vec; ##< all strings of which it's a substring + new: bool; ##< true if start of new alignment }; type sw_substring_vec: vector of sw_substring; -# Policy-level handling of pcap packets. +## Policy-level handling of pcap packets. type pcap_packet: record { ts_sec: count; ts_usec: count; @@ -280,7 +283,7 @@ type pcap_packet: record { data: string; }; -# GeoIP support. +## GeoIP support. type geo_location: record { country_code: string &optional; region: string &optional; @@ -305,10 +308,10 @@ type entropy_test_result: record { global log_file_name: function(tag: string): string &redef; global open_log_file: function(tag: string): file &redef; -# Where to store the persistent state. +## Where to store the persistent state. const state_dir = ".state" &redef; -# Length of the delays added when storing state incrementally. +## Length of the delays added when storing state incrementally. const state_write_delay = 0.01 secs &redef; global done_with_network = F; @@ -366,149 +369,158 @@ const TCP_ESTABLISHED = 4; const TCP_CLOSED = 5; const TCP_RESET = 6; -# If true, don't verify checksums. Useful for running on altered trace -# files, and for saving a few cycles, but of course dangerous, too ... -# Note that the -C command-line option overrides the setting of this -# variable. +## If true, don't verify checksums. Useful for running on altered trace +## files, and for saving a few cycles, but of course dangerous, too ... +## Note that the -C command-line option overrides the setting of this +## variable. const ignore_checksums = F &redef; -# If true, instantiate connection state when a partial connection -# (one missing its initial establishment negotiation) is seen. +## If true, instantiate connection state when a partial connection +## (one missing its initial establishment negotiation) is seen. const partial_connection_ok = T &redef; -# If true, instantiate connection state when a SYN ack is seen -# but not the initial SYN (even if partial_connection_ok is false). +## If true, instantiate connection state when a SYN ack is seen +## but not the initial SYN (even if partial_connection_ok is false). const tcp_SYN_ack_ok = T &redef; -# If a connection state is removed there may still be some undelivered -# data waiting in the reassembler. If true, pass this to the signature -# engine before flushing the state. +## If a connection state is removed there may still be some undelivered +## data waiting in the reassembler. If true, pass this to the signature +## engine before flushing the state. const tcp_match_undelivered = T &redef; -# Check up on the result of an initial SYN after this much time. +## Check up on the result of an initial SYN after this much time. const tcp_SYN_timeout = 5 secs &redef; -# After a connection has closed, wait this long for further activity -# before checking whether to time out its state. +## After a connection has closed, wait this long for further activity +## before checking whether to time out its state. const tcp_session_timer = 6 secs &redef; -# When checking a closed connection for further activity, consider it -# inactive if there hasn't been any for this long. Complain if the -# connection is reused before this much time has elapsed. +## When checking a closed connection for further activity, consider it +## inactive if there hasn't been any for this long. Complain if the +## connection is reused before this much time has elapsed. const tcp_connection_linger = 5 secs &redef; -# Wait this long upon seeing an initial SYN before timing out the -# connection attempt. +## Wait this long upon seeing an initial SYN before timing out the +## connection attempt. const tcp_attempt_delay = 5 secs &redef; -# Upon seeing a normal connection close, flush state after this much time. +## Upon seeing a normal connection close, flush state after this much time. const tcp_close_delay = 5 secs &redef; -# Upon seeing a RST, flush state after this much time. +## Upon seeing a RST, flush state after this much time. const tcp_reset_delay = 5 secs &redef; -# Generate a connection_partial_close event this much time after one half -# of a partial connection closes, assuming there has been no subsequent -# activity. +## Generate a :bro:id:`connection_partial_close` event this much time after one half +## of a partial connection closes, assuming there has been no subsequent +## activity. const tcp_partial_close_delay = 3 secs &redef; -# If a connection belongs to an application that we don't analyze, -# time it out after this interval. If 0 secs, then don't time it out. +## If a connection belongs to an application that we don't analyze, +## time it out after this interval. If 0 secs, then don't time it out. const non_analyzed_lifetime = 0 secs &redef; -# If a connection is inactive, time it out after this interval. -# If 0 secs, then don't time it out. +## If a connection is inactive, time it out after this interval. +## If 0 secs, then don't time it out. const tcp_inactivity_timeout = 5 min &redef; +## See :bro:id:`tcp_inactivity_timeout` const udp_inactivity_timeout = 1 min &redef; +## See :bro:id:`tcp_inactivity_timeout` const icmp_inactivity_timeout = 1 min &redef; -# This many FINs/RSTs in a row constitutes a "storm". +## This many FINs/RSTs in a row constitutes a "storm". const tcp_storm_thresh = 1000 &redef; -# The FINs/RSTs must come with this much time or less between them. +## The FINs/RSTs must come with this much time or less between them. const tcp_storm_interarrival_thresh = 1 sec &redef; -# Maximum amount of data that might plausibly be sent in an initial -# flight (prior to receiving any acks). Used to determine whether we -# must not be seeing our peer's acks. Set to zero to turn off this -# determination. +## Maximum amount of data that might plausibly be sent in an initial +## flight (prior to receiving any acks). Used to determine whether we +## must not be seeing our peer's acks. Set to zero to turn off this +## determination. const tcp_max_initial_window = 4096; -# If we're not seeing our peer's acks, the maximum volume of data above -# a sequence hole that we'll tolerate before assuming that there's -# been a packet drop and we should give up on tracking a connection. -# If set to zero, then we don't ever give up. +## If we're not seeing our peer's acks, the maximum volume of data above +## a sequence hole that we'll tolerate before assuming that there's +## been a packet drop and we should give up on tracking a connection. +## If set to zero, then we don't ever give up. const tcp_max_above_hole_without_any_acks = 4096; -# If we've seen this much data without any of it being acked, we give up -# on that connection to avoid memory exhaustion due to buffering all that -# stuff. If set to zero, then we don't ever give up. Ideally, Bro would -# track the current window on a connection and use it to infer that data -# has in fact gone too far, but for now we just make this quite beefy. +## If we've seen this much data without any of it being acked, we give up +## on that connection to avoid memory exhaustion due to buffering all that +## stuff. If set to zero, then we don't ever give up. Ideally, Bro would +## track the current window on a connection and use it to infer that data +## has in fact gone too far, but for now we just make this quite beefy. const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024; -# For services without a handler, these sets define which -# side of a connection is to be reassembled. +## For services without a handler, these sets define which +## side of a connection is to be reassembled. const tcp_reassembler_ports_orig: set[port] = {} &redef; +## See :bro:id:`tcp_reassembler_ports_orig` const tcp_reassembler_ports_resp: set[port] = {} &redef; -# These sets define destination ports for which the contents -# of the originator (responder, respectively) stream should -# be delivered via tcp_contents. +## These sets define destination ports for which the contents +## of the originator (responder, respectively) stream should +## be delivered via tcp_contents. const tcp_content_delivery_ports_orig: table[port] of bool = {} &redef; +## See :bro:id:`tcp_content_delivery_ports_orig` const tcp_content_delivery_ports_resp: table[port] of bool = {} &redef; # To have all TCP orig->resp/resp->orig traffic reported via tcp_contents, # redef these to T. const tcp_content_deliver_all_orig = F &redef; +## See :bro:id:`tcp_content_deliver_all_orig` const tcp_content_deliver_all_resp = F &redef; -# These sets define destination ports for which the contents -# of the originator (responder, respectively) stream should -# be delivered via udp_contents. +## These sets define destination ports for which the contents +## of the originator (responder, respectively) stream should +## be delivered via udp_contents. const udp_content_delivery_ports_orig: table[port] of bool = {} &redef; +## See :bro:id:`udp_content_delivery_ports_orig` const udp_content_delivery_ports_resp: table[port] of bool = {} &redef; -# To have all UDP orig->resp/resp->orig traffic reported via udp_contents, -# redef these to T. +## To have all UDP orig->resp/resp->orig traffic reported via udp_contents, +## redef these to T. const udp_content_deliver_all_orig = F &redef; +## See :bro:id:`udp_content_deliver_all_orig` const udp_content_deliver_all_resp = F &redef; -# Check for expired table entries after this amount of time +## Check for expired table entries after this amount of time const table_expire_interval = 10 secs &redef; -# When expiring/serializing, don't work on more than this many table -# entries at a time. +## When expiring/serializing, don't work on more than this many table +## entries at a time. const table_incremental_step = 5000 &redef; -# When expiring, wait this amount of time before checking the next chunk -# of entries. +## When expiring, wait this amount of time before checking the next chunk +## of entries. const table_expire_delay = 0.01 secs &redef; -# Time to wait before timing out a DNS/NTP/RPC request. +## Time to wait before timing out a DNS request. const dns_session_timeout = 10 sec &redef; +## Time to wait before timing out a NTP request. const ntp_session_timeout = 300 sec &redef; +## Time to wait before timing out a RPC request. const rpc_timeout = 24 sec &redef; -# Time window for reordering packets (to deal with timestamp -# discrepency between multiple packet sources). +## Time window for reordering packets (to deal with timestamp +## discrepency between multiple packet sources). const packet_sort_window = 0 usecs &redef; -# How long to hold onto fragments for possible reassembly. A value -# of 0.0 means "forever", which resists evasion, but can lead to -# state accrual. +## How long to hold onto fragments for possible reassembly. A value +## of 0.0 means "forever", which resists evasion, but can lead to +## state accrual. const frag_timeout = 0.0 sec &redef; -# If positive, indicates the encapsulation header size that should -# be skipped over for each captured packet .... +## If positive, indicates the encapsulation header size that should +## be skipped over for each captured packet .... const encap_hdr_size = 0 &redef; -# ... or just for the following UDP port. +## ... or just for the following UDP port. const tunnel_port = 0/udp &redef; -# Whether to use the ConnSize analyzer to count the number of -# packets and IP-level bytes transfered by each endpoint. If -# true, these values are returned in the connection's endpoint -# record val. +## Whether to use the ConnSize analyzer to count the number of +## packets and IP-level bytes transfered by each endpoint. If +## true, these values are returned in the connection's endpoint +## record val. const use_conn_size_analyzer = T &redef; const UDP_INACTIVE = 0; @@ -539,6 +551,7 @@ function append_addl_marker(c: connection, addl: string, marker: string) # Values for set_contents_file's "direction" argument. +# TODO: these should go into an enum to make them autodoc'able const CONTENTS_NONE = 0; # turn off recording of contents const CONTENTS_ORIG = 1; # record originator contents const CONTENTS_RESP = 2; # record responder contents @@ -564,44 +577,44 @@ const IPPROTO_UDP = 17; # user datagram protocol const IPPROTO_RAW = 255; # raw IP packet type ip_hdr: record { - hl: count; # header length (in bytes) - tos: count; # type of service - len: count; # total length - id: count; # identification - ttl: count; # time to live - p: count; # protocol - src: addr; # source address - dst: addr; # dest address + hl: count; ##< header length (in bytes) + tos: count; ##< type of service + len: count; ##< total length + id: count; ##< identification + ttl: count; ##< time to live + p: count; ##< protocol + src: addr; ##< source address + dst: addr; ##< dest address }; -# TCP flags. +## TCP flags. const TH_FIN = 1; const TH_SYN = 2; const TH_RST = 4; const TH_PUSH = 8; const TH_ACK = 16; const TH_URG = 32; -const TH_FLAGS = 63; # (TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) +const TH_FLAGS = 63; ##< (TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) type tcp_hdr: record { - sport: port; # source port - dport: port; # destination port - seq: count; # sequence number - ack: count; # acknowledgement number - hl: count; # header length (in bytes) - dl: count; # data length (xxx: not in original tcphdr!) - flags: count; # flags - win: count; # window + sport: port; ##< source port + dport: port; ##< destination port + seq: count; ##< sequence number + ack: count; ##< acknowledgement number + hl: count; ##< header length (in bytes) + dl: count; ##< data length (xxx: not in original tcphdr!) + flags: count; ##< flags + win: count; ##< window }; type udp_hdr: record { - sport: port; # source port - dport: port; # destination port - ulen: count; # udp length + sport: port; ##< source port + dport: port; ##< destination port + ulen: count; ##< udp length }; -# Holds an ip_hdr and one of tcp_hdr, udp_hdr, or icmp_hdr. +## Holds an ip_hdr and one of tcp_hdr, udp_hdr, or icmp_hdr. type pkt_hdr: record { ip: ip_hdr; tcp: tcp_hdr &optional; @@ -610,13 +623,13 @@ type pkt_hdr: record { }; -# If you add elements here, then for a given BPF filter as index, when -# a packet matching that filter is captured, the corresponding event handler -# will be invoked. +## If you add elements here, then for a given BPF filter as index, when +## a packet matching that filter is captured, the corresponding event handler +## will be invoked. global secondary_filters: table[string] of event(filter: string, pkt: pkt_hdr) &redef; -global discarder_maxlen = 128 &redef; # maximum amount of data passed to fnc +global discarder_maxlen = 128 &redef; ##< maximum amount of data passed to fnc global discarder_check_ip: function(i: ip_hdr): bool; global discarder_check_tcp: function(i: ip_hdr, t: tcp_hdr, d: string): bool; @@ -626,17 +639,18 @@ global discarder_check_icmp: function(i: ip_hdr, ih: icmp_hdr): bool; const watchdog_interval = 10 sec &redef; -# The maximum number of timers to expire after processing each new -# packet. The value trades off spreading out the timer expiration load -# with possibly having to hold state longer. A value of 0 means -# "process all expired timers with each new packet". +## The maximum number of timers to expire after processing each new +## packet. The value trades off spreading out the timer expiration load +## with possibly having to hold state longer. A value of 0 means +## "process all expired timers with each new packet". const max_timer_expires = 300 &redef; -# With a similar trade-off, this gives the number of remote events -# to process in a batch before interleaving other activity. +## With a similar trade-off, this gives the number of remote events +## to process in a batch before interleaving other activity. const max_remote_events_processed = 10 &redef; # These need to match the definitions in Login.h. +# TODO: use enum to make them autodoc'able const LOGIN_STATE_AUTHENTICATE = 0; # trying to authenticate const LOGIN_STATE_LOGGED_IN = 1; # successful authentication const LOGIN_STATE_SKIP = 2; # skip any further processing @@ -714,37 +728,42 @@ const RPC_status = { module NFS3; export { - # Should the read and write events return the file data that has been - # read/written? + ## Should the read and write events return the file data that has been + ## read/written? const return_data = F &redef; - # If nfs_return_data is true, how much data should be returned at most. + ## If bro:id:`nfs_return_data` is true, how much data should be returned at most. const return_data_max = 512 &redef; - # If nfs_return_data is true, whether to *only* return data if the read or write - # offset is 0, i.e., only return data for the beginning of the file. + ## If nfs_return_data is true, whether to *only* return data if the read or write + ## offset is 0, i.e., only return data for the beginning of the file. const return_data_first_only = T &redef; - # This record summarizes the general results and status of NFSv3 request/reply - # pairs. It's part of every NFSv3 event. + ## This record summarizes the general results and status of NFSv3 request/reply + ## pairs. It's part of every NFSv3 event. type info_t: record { - rpc_stat: rpc_status; # If this indicates not successful, the reply record in the - # events will be empty and contain uninitialized fields, so - # don't use it. + ## If this indicates not successful, the reply record in the + ## events will be empty and contain uninitialized fields, so + ## don't use it. + rpc_stat: rpc_status; nfs_stat: status_t; - # The start time, duration, and length in bytes of the request (call). Note that - # the start and end time might not be accurate. For TCP, we record the - # time when a chunk of data is delivered to the analyzer. Depending on the - # Reassembler, this might be well after the first packet of the request - # was received. + ## The start time, duration, and length in bytes of the request (call). Note that + ## the start and end time might not be accurate. For TCP, we record the + ## time when a chunk of data is delivered to the analyzer. Depending on the + ## Reassembler, this might be well after the first packet of the request + ## was received. req_start: time; + ## See :bro:id:`req_start` req_dur: interval; + ## See :bro:id:`req_start` req_len: count; - # Same for the reply. + ## Like :bro:id:`req_start` but for reply. rep_start: time; + ## Like :bro:id:`req_dur` but for reply. rep_dur: interval; + ## Like :bro:id:`req_len` but for reply. rep_len: count; }; @@ -767,49 +786,49 @@ export { }; type diropargs_t : record { - dirfh: string; # the file handle of the directory - fname: string; # the name of the file we are interested in + dirfh: string; ##< the file handle of the directory + fname: string; ##< the name of the file we are interested in }; # Note, we don't need a "post_op_attr" type. We use an "fattr_t &optional" # instead. + ## If the lookup failed, dir_attr may be set. + ## If the lookup succeeded, fh is always set and obj_attr and dir_attr may be set. type lookup_reply_t: record { - # If the lookup failed, dir_attr may be set. - # If the lookup succeeded, fh is always set and obj_attr and dir_attr may be set. - fh: string &optional; # file handle of object looked up - obj_attr: fattr_t &optional; # optional attributes associated w/ file - dir_attr: fattr_t &optional; # optional attributes associated w/ dir. + fh: string &optional; ##< file handle of object looked up + obj_attr: fattr_t &optional; ##< optional attributes associated w/ file + dir_attr: fattr_t &optional; ##< optional attributes associated w/ dir. }; type readargs_t: record { - fh: string; # file handle to read from - offset: count; # offset in file - size: count; # number of bytes to read + fh: string; ##< file handle to read from + offset: count; ##< offset in file + size: count; ##< number of bytes to read }; + ## If the lookup fails, attr may be set. If the lookup succeeds, attr may be set + ## and all other fields are set. type read_reply_t: record { - # If the lookup fails, attr may be set. If the lookup succeeds, attr may be set - # and all other fields are set. - attr: fattr_t &optional; # attributes - size: count &optional; # number of bytes read - eof: bool &optional; # did the read end at EOF - data: string &optional; # the actual data; not yet implemented. + attr: fattr_t &optional; ##< attributes + size: count &optional; ##< number of bytes read + eof: bool &optional; ##< did the read end at EOF + data: string &optional; ##< the actual data; not yet implemented. }; + ## If the request fails, attr may be set. If the request succeeds, attr may be + ## set and all other fields are set. type readlink_reply_t: record { - # If the request fails, attr may be set. If the request succeeds, attr may be - # set and all other fields are set. - attr: fattr_t &optional; # attributes - nfspath: string &optional; # the contents of the symlink; in general a pathname as text + attr: fattr_t &optional; ##< attributes + nfspath: string &optional; ##< the contents of the symlink; in general a pathname as text }; type writeargs_t: record { - fh: string; # file handle to write to - offset: count; # offset in file - size: count; # number of bytes to write - stable: stable_how_t; # how and when data is commited - data: string &optional; # the actual data; not implemented yet + fh: string; ##< file handle to write to + offset: count; ##< offset in file + size: count; ##< number of bytes to write + stable: stable_how_t; ##< how and when data is commited + data: string &optional; ##< the actual data; not implemented yet }; type wcc_attr_t: record { @@ -818,65 +837,65 @@ export { mtime: time; }; + ## If the request fails, pre|post attr may be set. If the request succeeds, + ## pre|post attr may be set and all other fields are set. type write_reply_t: record { - # If the request fails, pre|post attr may be set. If the request succeeds, - # pre|post attr may be set and all other fields are set. - preattr: wcc_attr_t &optional; # pre operation attributes - postattr: fattr_t &optional; # post operation attributes + preattr: wcc_attr_t &optional; ##< pre operation attributes + postattr: fattr_t &optional; ##< post operation attributes size: count &optional; commited: stable_how_t &optional; - verf: count &optional; # write verifier cookue + verf: count &optional; ##< write verifier cookue }; - # reply for create, mkdir, symlink + ## reply for create, mkdir, symlink + ## If the proc failed, dir_*_attr may be set. If the proc succeeded, fh and + ## the attr's may be set. Note: no guarantee that fh is set after + ## success. type newobj_reply_t: record { - # If the proc failed, dir_*_attr may be set. If the proc succeeded, fh and - # the attr's may be set. Note: no guarantee that fh is set after - # success. - fh: string &optional; # file handle of object created - obj_attr: fattr_t &optional; # optional attributes associated w/ new object - dir_pre_attr: wcc_attr_t &optional; # optional attributes associated w/ dir - dir_post_attr: fattr_t &optional; # optional attributes associated w/ dir + fh: string &optional; ##< file handle of object created + obj_attr: fattr_t &optional; ##< optional attributes associated w/ new object + dir_pre_attr: wcc_attr_t &optional; ##< optional attributes associated w/ dir + dir_post_attr: fattr_t &optional; ##< optional attributes associated w/ dir }; - # reply for remove, rmdir - # Corresponds to "wcc_data" in the spec. + ## reply for remove, rmdir + ## Corresponds to "wcc_data" in the spec. type delobj_reply_t: record { - dir_pre_attr: wcc_attr_t &optional; # optional attributes associated w/ dir - dir_post_attr: fattr_t &optional; # optional attributes associated w/ dir + dir_pre_attr: wcc_attr_t &optional; ##< optional attributes associated w/ dir + dir_post_attr: fattr_t &optional; ##< optional attributes associated w/ dir }; - # This record is used for both readdir and readdirplus. + ## This record is used for both readdir and readdirplus. type readdirargs_t: record { - isplus: bool; # is this a readdirplus request? - dirfh: string; # the directory filehandle - cookie: count; # cookie / pos in dir; 0 for first call - cookieverf: count; # the cookie verifier - dircount: count; # "count" field for readdir; maxcount otherwise (in bytes) - maxcount: count &optional; # only used for readdirplus. in bytes + isplus: bool; ##< is this a readdirplus request? + dirfh: string; ##< the directory filehandle + cookie: count; ##< cookie / pos in dir; 0 for first call + cookieverf: count; ##< the cookie verifier + dircount: count; ##< "count" field for readdir; maxcount otherwise (in bytes) + maxcount: count &optional; ##< only used for readdirplus. in bytes }; + ## fh and attr are used for readdirplus. However, even for readdirplus they may + ## not be filled out. type direntry_t: record { - # fh and attr are used for readdirplus. However, even for readdirplus they may - # not be filled out. - fileid: count; # e.g., inode number - fname: string; # filename + fileid: count; ##< e.g., inode number + fname: string; ##< filename cookie: count; - attr: fattr_t &optional; # readdirplus: the FH attributes for the entry - fh: string &optional; # readdirplus: the FH for the entry + attr: fattr_t &optional; ##< readdirplus: the FH attributes for the entry + fh: string &optional; ##< readdirplus: the FH for the entry }; type direntry_vec_t: vector of direntry_t; - # Used for readdir and readdirplus. + ## Used for readdir and readdirplus. + ## If error: dir_attr might be set. If success: dir_attr may be set, all others + ## must be set. type readdir_reply_t: record { - # If error: dir_attr might be set. If success: dir_attr may be set, all others - # must be set. - isplus: bool; # is the reply for a readdirplus request + isplus: bool; ##< is the reply for a readdirplus request dir_attr: fattr_t &optional; cookieverf: count &optional; entries: direntry_vec_t &optional; - eof: bool; # if true, no more entries in dir. + eof: bool; ##< if true, no more entries in dir. }; type fsstat_t: record { @@ -908,7 +927,7 @@ type ntp_msg: record { }; -# Maps Samba command numbers to descriptive names. +## Maps Samba command numbers to descriptive names. global samba_cmds: table[count] of string &redef &default = function(c: count): string { return fmt("samba-unknown-%d", c); }; @@ -959,16 +978,16 @@ type smb_tree_connect : record { type smb_negotiate : table[count] of string; -# A list of router addresses offered by the server. +## A list of router addresses offered by the server. type dhcp_router_list: table[count] of addr; type dhcp_msg: record { - op: count; # message OP code. 1 = BOOTREQUEST, 2 = BOOTREPLY - m_type: count; # the type of DHCP message - xid: count; # transaction ID of a DHCP session - h_addr: string; # hardware address of the client - ciaddr: addr; # original IP address of the client - yiaddr: addr; # IP address assigned to the client + op: count; ##< message OP code. 1 = BOOTREQUEST, 2 = BOOTREPLY + m_type: count; ##< the type of DHCP message + xid: count; ##< transaction ID of a DHCP session + h_addr: string; ##< hardware address of the client + ciaddr: addr; ##< original IP address of the client + yiaddr: addr; ##< IP address assigned to the client }; type dns_msg: record { @@ -991,13 +1010,13 @@ type dns_msg: record { }; type dns_soa: record { - mname: string; # primary source of data for zone - rname: string; # mailbox for responsible person - serial: count; # version number of zone - refresh: interval; # seconds before refreshing - retry: interval; # how long before retrying failed refresh - expire: interval; # when zone no longer authoritative - minimum: interval; # minimum TTL to use when exporting + mname: string; ##< primary source of data for zone + rname: string; ##< mailbox for responsible person + serial: count; ##< version number of zone + refresh: interval; ##< seconds before refreshing + retry: interval; ##< how long before retrying failed refresh + expire: interval; ##< when zone no longer authoritative + minimum: interval; ##< minimum TTL to use when exporting }; type dns_edns_additional: record { @@ -1026,6 +1045,7 @@ type dns_tsig_additional: record { # Different values for "answer_type" in the following. DNS_QUERY # shouldn't occur, it's just for completeness. +# TODO: use enums to help autodoc const DNS_QUERY = 0; const DNS_ANS = 1; const DNS_AUTH = 2; @@ -1039,22 +1059,25 @@ type dns_answer: record { TTL: interval; }; -# For servers in these sets, omit processing the AUTH or ADDL records -# they include in their replies. +## For servers in these sets, omit processing the AUTH records +## they include in their replies. global dns_skip_auth: set[addr] &redef; +## For servers in these sets, omit processing the ADDL records +## they include in their replies. global dns_skip_addl: set[addr] &redef; -# If the following are true, then all AUTH or ADDL records are skipped. +## If the following are true, then all AUTH records are skipped. global dns_skip_all_auth = T &redef; +## If the following are true, then all ADDL records are skipped. global dns_skip_all_addl = T &redef; -# If a DNS request includes more than this many queries, assume it's -# non-DNS traffic and do not process it. Set to 0 to turn off this -# functionality. +## If a DNS request includes more than this many queries, assume it's +## non-DNS traffic and do not process it. Set to 0 to turn off this +## functionality. global dns_max_queries = 5; -# The maxiumum size in bytes for an SSL cipherspec. If we see a packet that -# has bigger cipherspecs, we won't do a comparisons of cipherspecs. +## The maxiumum size in bytes for an SSL cipherspec. If we see a packet that +## has bigger cipherspecs, we won't do a comparisons of cipherspecs. const ssl_max_cipherspec_size = 68 &redef; type X509_extensions: table[count] of string; @@ -1068,7 +1091,7 @@ type X509: record { not_valid_after: time; }; -# This is indexed with the CA's name and yields a DER (binary) encoded certificate. +## This is indexed with the CA's name and yields a DER (binary) encoded certificate. const root_ca_certs: table[string] of string = {} &redef; type http_stats_rec: record { @@ -1079,25 +1102,28 @@ type http_stats_rec: record { }; type http_message_stat: record { - start: time; # when the request/reply line was complete - interrupted: bool; # whether the message is interrupted - finish_msg: string; # reason phrase if interrupted - body_length: count; # length of body processed - # (before finished/interrupted) - content_gap_length: count; # total len of gaps within body_length - header_length: count; # length of headers - # (including the req/reply line, - # but not CR/LF's) + ## when the request/reply line was complete + start: time; + ## whether the message is interrupted + interrupted: bool; + ## reason phrase if interrupted + finish_msg: string; + ## length of body processed (before finished/interrupted) + body_length: count; + ## total len of gaps within body_length + content_gap_length: count; + ## length of headers (including the req/reply line, but not CR/LF's) + header_length: count; }; global http_entity_data_delivery_size = 1500 &redef; -# Truncate URIs longer than this to prevent over-long URIs (usually sent -# by worms) from slowing down event processing. A value of -1 means "do -# not truncate". +## Truncate URIs longer than this to prevent over-long URIs (usually sent +## by worms) from slowing down event processing. A value of -1 means "do +## not truncate". const truncate_http_URI = -1 &redef; -# IRC-related globals to which the event engine is sensitive. +## IRC-related globals to which the event engine is sensitive. type irc_join_info: record { nick: string; channel: string; @@ -1107,11 +1133,11 @@ type irc_join_info: record { type irc_join_list: set[irc_join_info]; global irc_servers : set[addr] &redef; -# Stepping-stone globals. +## Stepping-stone globals. const stp_delta: interval &redef; const stp_idle_min: interval &redef; -# Don't do analysis on these sources. Used to avoid overload from scanners. +## Don't do analysis on these sources. Used to avoid overload from scanners. global stp_skip_src: set[addr] &redef; const interconn_min_interarrival: interval &redef; @@ -1149,10 +1175,10 @@ type backdoor_endp_stats: record { }; type signature_state: record { - sig_id: string; # ID of the signature - conn: connection; # Current connection - is_orig: bool; # True if current endpoint is originator - payload_size: count; # Payload size of the first pkt of curr. endpoint + sig_id: string; ##< ID of the signature + conn: connection; ##< Current connection + is_orig: bool; ##< True if current endpoint is originator + payload_size: count; ##< Payload size of the first pkt of curr. endpoint }; @@ -1195,21 +1221,21 @@ type load_sample_info: set[string]; # NetFlow-related data structures. -# The following provides a mean to sort together flow headers and flow -# records at the script level. rcvr_id equals the name of the file -# (e.g., netflow.dat) or the socket address (e.g., 127.0.0.1:5555), -# or an explicit name if specified to -y or -Y; pdu_id is just a serial -# number, ignoring any overflows. +## The following provides a mean to sort together NetFlow headers and flow +## records at the script level. rcvr_id equals the name of the file +## (e.g., netflow.dat) or the socket address (e.g., 127.0.0.1:5555), +## or an explicit name if specified to -y or -Y; pdu_id is just a serial +## number, ignoring any overflows. type nfheader_id: record { rcvr_id: string; pdu_id: count; }; type nf_v5_header: record { - h_id: nfheader_id; # ID for sorting, per the above + h_id: nfheader_id; ##< ID for sorting, per the above cnt: count; - sysuptime: interval; # router's uptime - exporttime: time; # when the data was exported + sysuptime: interval; ##< router's uptime + exporttime: time; ##< when the data was exported flow_seq: count; eng_type: count; eng_id: count; @@ -1227,7 +1253,7 @@ type nf_v5_record: record { octets: count; first: time; last: time; - tcpflag_fin: bool; # Taken from tcpflags in NF V5; or directly. + tcpflag_fin: bool; ##< Taken from tcpflags in NF V5; or directly. tcpflag_syn: bool; tcpflag_rst: bool; tcpflag_psh: bool; @@ -1242,17 +1268,17 @@ type nf_v5_record: record { }; -# The peer record and the corresponding set type used by the -# BitTorrent analyzer. +## The peer record and the corresponding set type used by the +## BitTorrent analyzer. type bittorrent_peer: record { h: addr; p: port; }; type bittorrent_peer_set: set[bittorrent_peer]; -# The benc value record and the corresponding table type used by the -# BitTorrenttracker analyzer. Note that "benc" = Bencode ("Bee-Encode"), -# per http://en.wikipedia.org/wiki/Bencode. +## The benc value record and the corresponding table type used by the +## BitTorrenttracker analyzer. Note that "benc" = Bencode ("Bee-Encode"), +## per http://en.wikipedia.org/wiki/Bencode. type bittorrent_benc_value: record { i: int &optional; s: string &optional; @@ -1261,7 +1287,7 @@ type bittorrent_benc_value: record { }; type bittorrent_benc_dir: table[string] of bittorrent_benc_value; -# The header table type used by the bittorrenttracker analyzer. +## The header table type used by the bittorrenttracker analyzer. type bt_tracker_headers: table[string] of string; @load event.bif.bro @@ -1270,38 +1296,38 @@ type bt_tracker_headers: table[string] of string; # empty if none. const cmd_line_bpf_filter = "" &redef; -# Rotate logs every x interval. +## Rotate logs every x interval. const log_rotate_interval = 0 sec &redef; -# If set, rotate logs at given time + i * log_rotate_interval. -# (string is time in 24h format, e.g., "18:00"). +## If set, rotate logs at given time + i * log_rotate_interval. +## (string is time in 24h format, e.g., "18:00"). const log_rotate_base_time = "0:00" &redef; -# Rotate logs when they reach this size (in bytes). Note, the -# parameter is a double rather than a count to enable easy expression -# of large values such as 1e7 or exceeding 2^32. +## Rotate logs when they reach this size (in bytes). Note, the +## parameter is a double rather than a count to enable easy expression +## of large values such as 1e7 or exceeding 2^32. const log_max_size = 0.0 &redef; -# Default public key for encrypting log files. +## Default public key for encrypting log files. const log_encryption_key = "" &redef; -# Write profiling info into this file. +## Write profiling info into this file. global profiling_file: file &redef; -# Update interval for profiling (0 disables). +## Update interval for profiling (0 disables). const profiling_interval = 0 secs &redef; -# Multiples of profiling_interval at which (expensive) memory -# profiling is done (0 disables). +## Multiples of profiling_interval at which (expensive) memory +## profiling is done (0 disables). const expensive_profiling_multiple = 0 &redef; -# If true, then write segment profiling information (very high volume!) -# in addition to statistics. +## If true, then write segment profiling information (very high volume!) +## in addition to statistics. const segment_profiling = F &redef; -# Output packet profiling information every secs (mode 1), -# every packets (mode 2), or every bytes (mode 3). -# Mode 0 disables. +## Output packet profiling information every secs (mode 1), +## every packets (mode 2), or every bytes (mode 3). +## Mode 0 disables. type pkt_profile_modes: enum { PKT_PROFILE_MODE_NONE, PKT_PROFILE_MODE_SECS, @@ -1310,74 +1336,76 @@ type pkt_profile_modes: enum { }; const pkt_profile_mode = PKT_PROFILE_MODE_NONE &redef; -# Frequency associated with packet profiling. +## Frequency associated with packet profiling. const pkt_profile_freq = 0.0 &redef; -# File where packet profiles are logged. +## File where packet profiles are logged. global pkt_profile_file: file &redef; -# Rate at which to generate load_sample events, *if* you've also -# defined a load_sample handler. Units are inverse number of packets; -# e.g., a value of 20 means "roughly one in every 20 packets". +## Rate at which to generate load_sample events, *if* you've also +## defined a load_sample handler. Units are inverse number of packets; +## e.g., a value of 20 means "roughly one in every 20 packets". global load_sample_freq = 20 &redef; -# Rate at which to generate gap_report events assessing to what -# degree the measurement process appears to exhibit loss. +## Rate at which to generate gap_report events assessing to what +## degree the measurement process appears to exhibit loss. const gap_report_freq = 1.0 sec &redef; -# Whether we want content_gap and drop reports for partial connections -# (a connection is partial if it is missing a full handshake). Note that -# gap reports for partial connections might not be reliable. +## Whether we want content_gap and drop reports for partial connections +## (a connection is partial if it is missing a full handshake). Note that +## gap reports for partial connections might not be reliable. const report_gaps_for_partial = F &redef; -# Globals associated with entire-run statistics on gaps (useful -# for final summaries). +## Globals associated with entire-run statistics on gaps (useful +## for final summaries). -# The CA certificate file to authorize remote Bros. +## The CA certificate file to authorize remote Bros. const ssl_ca_certificate = "" &redef; -# File containing our private key and our certificate. +## File containing our private key and our certificate. const ssl_private_key = "" &redef; -# The passphrase for our private key. Keeping this undefined -# causes Bro to prompt for the passphrase. +## The passphrase for our private key. Keeping this undefined +## causes Bro to prompt for the passphrase. const ssl_passphrase = "" &redef; -# Whether the Bro-level packet filter drops packets per default or not. +## Whether the Bro-level packet filter drops packets per default or not. const packet_filter_default = F &redef; -# Maximum size of regular expression groups for signature matching. +## Maximum size of regular expression groups for signature matching. const sig_max_group_size = 50 &redef; -# If true, send logger messages to syslog. +## If true, send logger messages to syslog. const enable_syslog = F &redef; -# This is transmitted to peers receiving our events. +## This is transmitted to peers receiving our events. const peer_description = "bro" &redef; -# If true, broadcast events/state received from one peer to other peers. -# NOTE: These options are only temporary. They will disappear when we get a -# more sophisticated script-level communication framework. +## If true, broadcast events/state received from one peer to other peers. +## NOTE: These options are only temporary. They will disappear when we get a +## more sophisticated script-level communication framework. const forward_remote_events = F &redef; +## See :bro:id:`forward_remote_events` const forward_remote_state_changes = F &redef; const PEER_ID_NONE = 0; -# Whether to use the connection tracker. +## Whether to use the connection tracker. const use_connection_compressor = T &redef; -# Whether compressor should handle refused connections itself. +## Whether compressor should handle refused connections itself. const cc_handle_resets = F &redef; -# Whether compressor should only take care of initial SYNs. -# (By default on, this is basically "connection compressor lite".) +## Whether compressor should only take care of initial SYNs. +## (By default on, this is basically "connection compressor lite".) const cc_handle_only_syns = T &redef; -# Whether compressor instantiates full state when originator sends a -# non-control packet. +## Whether compressor instantiates full state when originator sends a +## non-control packet. const cc_instantiate_on_data = F &redef; # Signature payload pattern types +# TODO: use enum to help autodoc const SIG_PATTERN_PAYLOAD = 0; const SIG_PATTERN_HTTP = 1; const SIG_PATTERN_FTP = 2; @@ -1385,24 +1413,26 @@ const SIG_PATTERN_FINGER = 3; # Log-levels for remote_log. # Eventually we should create a general logging framework and merge these in. +# TODO: use enum to help autodoc const REMOTE_LOG_INFO = 1; const REMOTE_LOG_ERROR = 2; # Sources for remote_log. +# TODO: use enum to help autodoc const REMOTE_SRC_CHILD = 1; const REMOTE_SRC_PARENT = 2; const REMOTE_SRC_SCRIPT = 3; -# Synchronize trace processing at a regular basis in pseudo-realtime mode. +## Synchronize trace processing at a regular basis in pseudo-realtime mode. const remote_trace_sync_interval = 0 secs &redef; -# Number of peers across which to synchronize trace processing. +## Number of peers across which to synchronize trace processing. const remote_trace_sync_peers = 0 &redef; -# Whether for &synchronized state to send the old value as a consistency check. +## Whether for &synchronized state to send the old value as a consistency check. const remote_check_sync_consistency = F &redef; -# Prepend the peer description, if set. +## Prepend the peer description, if set. function prefixed_id(id: count): string { if ( peer_description == "" ) @@ -1411,8 +1441,8 @@ function prefixed_id(id: count): string return cat(peer_description, "-", id); } -# Analyzer tags. The core automatically defines constants -# ANALYZER_*, e.g., ANALYZER_HTTP. +## Analyzer tags. The core automatically defines constants +## ANALYZER_*, e.g., ANALYZER_HTTP. type AnalyzerTag: count; # DPD configuration. @@ -1423,66 +1453,66 @@ type dpd_protocol_config: record { const dpd_config: table[AnalyzerTag] of dpd_protocol_config = {} &redef; -# Reassemble the beginning of all TCP connections before doing -# signature-matching for protocol detection. +## Reassemble the beginning of all TCP connections before doing +## signature-matching for protocol detection. const dpd_reassemble_first_packets = T &redef; -# Size of per-connection buffer in bytes. If the buffer is full, data is -# deleted and lost to analyzers that are activated afterwards. +## Size of per-connection buffer in bytes. If the buffer is full, data is +## deleted and lost to analyzers that are activated afterwards. const dpd_buffer_size = 1024 &redef; -# If true, stops signature matching if dpd_buffer_size has been reached. +## If true, stops signature matching if dpd_buffer_size has been reached. const dpd_match_only_beginning = T &redef; -# If true, don't consider any ports for deciding which analyzer to use. +## If true, don't consider any ports for deciding which analyzer to use. const dpd_ignore_ports = F &redef; -# Ports which the core considers being likely used by servers. +## Ports which the core considers being likely used by servers. const likely_server_ports: set[port] &redef; -# Set of all ports for which we know an analyzer. +## Set of all ports for which we know an analyzer. global dpd_analyzer_ports: table[port] of set[AnalyzerTag]; -# Per-incident timer managers are drained after this amount of inactivity. +## Per-incident timer managers are drained after this amount of inactivity. const timer_mgr_inactivity_timeout = 1 min &redef; -# If true, output profiling for time-machine queries. +## If true, output profiling for time-machine queries. const time_machine_profiling = F &redef; -# If true, warns about unused event handlers at startup. +## If true, warns about unused event handlers at startup. const check_for_unused_event_handlers = F &redef; -# If true, dumps all invoked event handlers at startup. +## If true, dumps all invoked event handlers at startup. const dump_used_event_handlers = F &redef; -# If true, we suppress prints to local files if we have a receiver for -# print_hook events. Ignored for files with a &disable_print_hook attribute. +## If true, we suppress prints to local files if we have a receiver for +## print_hook events. Ignored for files with a &disable_print_hook attribute. const suppress_local_output = F &redef; -# Holds the filename of the trace file given with -w (empty if none). +## Holds the filename of the trace file given with -w (empty if none). const trace_output_file = ""; -# If a trace file is given, dump *all* packets seen by Bro into it. -# By default, Bro applies (very few) heuristics to reduce the volume. -# A side effect of setting this to true is that we can write the -# packets out before we actually process them, which can be helpful -# for debugging in case the analysis triggers a crash. +## If a trace file is given, dump *all* packets seen by Bro into it. +## By default, Bro applies (very few) heuristics to reduce the volume. +## A side effect of setting this to true is that we can write the +## packets out before we actually process them, which can be helpful +## for debugging in case the analysis triggers a crash. const record_all_packets = F &redef; -# Some connections (e.g., SSH) retransmit the acknowledged last -# byte to keep the connection alive. If ignore_keep_alive_rexmit -# is set to T, such retransmissions will be excluded in the rexmit -# counter in conn_stats. +## Some connections (e.g., SSH) retransmit the acknowledged last +## byte to keep the connection alive. If ignore_keep_alive_rexmit +## is set to T, such retransmissions will be excluded in the rexmit +## counter in conn_stats. const ignore_keep_alive_rexmit = F &redef; -# Skip HTTP data portions for performance considerations (the skipped -# portion will not go through TCP reassembly). +## Skip HTTP data portions for performance considerations (the skipped +## portion will not go through TCP reassembly). const skip_http_data = F &redef; -# Whether the analysis engine parses IP packets encapsulated in -# UDP tunnels. See also: udp_tunnel_port, policy/udp-tunnel.bro. +## Whether the analysis engine parses IP packets encapsulated in +## UDP tunnels. See also: udp_tunnel_port, policy/udp-tunnel.bro. const parse_udp_tunnels = F &redef; -# Load the logging framework here because it uses fairly deep integration with -# BiFs and script-land defined types. +## Load the logging framework here because it uses fairly deep integration with +## BiFs and script-land defined types. @load base/frameworks/logging